repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
saketkc/hatex | 2019_Spring/CSCI-572/HW04/CSCI572_HW4/create_edgerank.py | 1 | 2917 | import pandas as pd
from bs4 import BeautifulSoup
import glob
import ntpath
import networkx as nx
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def get_outgoing_links(html_file):
"""Get list of outgoing links for the input html file.
Parameters
----------
html_file: str
Path to html file
Returns
-------
list_of_urls: list
List of outgoing urls
"""
soup = BeautifulSoup(open(html_file).read().encode("utf-8"))
links = []
for link in soup.findAll(
"a", href=True
): # attrs=['href']: re.compile("^https://")}):
# Skip internal linkes
try:
href = link.get("href")
except IndexError:
continue
if href == "#":
continue
try:
text = link.contents[0]
except IndexError:
# text = ''
pass
links.append(link.get("href"))
return links
def get_filenames_for_URLs(mapping_file_df, list_of_links):
"""Get list of html filenames for a list of links
Parameters
----------
mapping_file_df: pd.DataFrame
Dataframe with mapping.csv loaded
list_of_links: list
List of URLs
Returns
-------
list_of_filenames: list
List of filenames
"""
return mapping_file_df[mapping_file_df.URL.isin(list_of_links)].filename.tolist()
def main():
crawl_data_dir = (
"/media/rna/yahoo_crawl_data/Yahoo-20190406T235503Z-001/Yahoo/yahoo/"
)
csv_file = "/media/rna/yahoo_crawl_data/Yahoo-20190406T235503Z-001/Yahoo/URLtoHTML_yahoo_news.csv"
mapping_file_df = (
pd.read_csv(csv_file).sort_values(by=["filename", "URL"]).reset_index(drop=True)
)
list_of_html_files = glob.glob("{}/*.html".format(crawl_data_dir))
with open("edgeList.txt", "w") as fh:
for filepath in list_of_html_files:
filename = path_leaf(filepath)
links = get_outgoing_links(filepath)
filenames_for_url = get_filenames_for_URLs(mapping_file_df, links)
# connection_matrix.loc[filename, filenames_for_url]+=1
# connection_matrix.loc[filename, filenames_for_url] =1
# with open()
fh.write("{} {}\n".format(filename, " ".join(filenames_for_url)))
G = nx.read_adjlist("edgeList.txt", create_using=nx.DiGraph())
pagerank = nx.pagerank(
G,
alpha=0.85,
personalization=None,
max_iter=100,
tol=1e-06,
nstart=None,
weight="weight",
dangling=None,
)
with open("external_PageRankFile.txt", "w") as fh:
for key, value in pagerank.items():
fh.write("{}/{}={}\n".format(crawl_data_dir, key, value))
if __name__ == "__main__":
main()
| mit |
M4rtinK/anaconda | pyanaconda/modules/storage/partitioning/automatic/automatic_module.py | 5 | 5931 | #
# Auto partitioning module.
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
import copy
from pyanaconda.anaconda_loggers import get_module_logger
from pyanaconda.core.configuration.anaconda import conf
from pyanaconda.core.signal import Signal
from pyanaconda.modules.common.structures.partitioning import PartitioningRequest
from pyanaconda.modules.storage.partitioning.automatic.resizable_module import \
ResizableDeviceTreeModule
from pyanaconda.modules.storage.partitioning.base import PartitioningModule
from pyanaconda.modules.storage.partitioning.automatic.automatic_interface import \
AutoPartitioningInterface
from pyanaconda.modules.storage.partitioning.constants import PartitioningMethod
from pyanaconda.modules.storage.partitioning.automatic.automatic_partitioning import \
AutomaticPartitioningTask
log = get_module_logger(__name__)
class AutoPartitioningModule(PartitioningModule):
"""The auto partitioning module."""
def __init__(self):
"""Initialize the module."""
super().__init__()
self.request_changed = Signal()
self._request = PartitioningRequest()
@property
def partitioning_method(self):
"""Type of the partitioning method."""
return PartitioningMethod.AUTOMATIC
def for_publication(self):
"""Return a DBus representation."""
return AutoPartitioningInterface(self)
def _create_device_tree(self):
"""Create the device tree module."""
return ResizableDeviceTreeModule()
def process_kickstart(self, data):
"""Process the kickstart data."""
request = PartitioningRequest()
if data.autopart.type is not None:
request.partitioning_scheme = data.autopart.type
if data.autopart.fstype:
request.file_system_type = data.autopart.fstype
if data.autopart.noboot:
request.excluded_mount_points.append("/boot")
if data.autopart.nohome:
request.excluded_mount_points.append("/home")
if data.autopart.noswap:
request.excluded_mount_points.append("swap")
if data.autopart.encrypted:
request.encrypted = True
request.passphrase = data.autopart.passphrase
request.cipher = data.autopart.cipher
request.luks_version = data.autopart.luks_version
request.pbkdf = data.autopart.pbkdf
request.pbkdf_memory = data.autopart.pbkdf_memory
request.pbkdf_time = data.autopart.pbkdf_time
request.pbkdf_iterations = data.autopart.pbkdf_iterations
request.escrow_certificate = data.autopart.escrowcert
request.backup_passphrase_enabled = data.autopart.backuppassphrase
self.set_request(request)
def setup_kickstart(self, data):
"""Setup the kickstart data."""
data.autopart.autopart = True
data.autopart.fstype = self.request.file_system_type
if self.request.partitioning_scheme != conf.storage.default_scheme:
data.autopart.type = self.request.partitioning_scheme
data.autopart.nohome = "/home" in self.request.excluded_mount_points
data.autopart.noboot = "/boot" in self.request.excluded_mount_points
data.autopart.noswap = "swap" in self.request.excluded_mount_points
data.autopart.encrypted = self.request.encrypted
# Don't generate sensitive information.
data.autopart.passphrase = ""
data.autopart.cipher = self.request.cipher
data.autopart.luks_version = self.request.luks_version
data.autopart.pbkdf = self.request.pbkdf
data.autopart.pbkdf_memory = self.request.pbkdf_memory
data.autopart.pbkdf_time = self.request.pbkdf_time
data.autopart.pbkdf_iterations = self.request.pbkdf_iterations
data.autopart.escrowcert = self.request.escrow_certificate
data.autopart.backuppassphrase = self.request.backup_passphrase_enabled
@property
def request(self):
"""The partitioning request."""
return self._request
def set_request(self, request):
"""Set the partitioning request.
:param request: a request
"""
self._request = request
self.request_changed.emit()
log.debug("Request is set to '%s'.", request)
def requires_passphrase(self):
"""Is the default passphrase required?
:return: True or False
"""
return self.request.encrypted and not self.request.passphrase
def set_passphrase(self, passphrase):
"""Set a default passphrase for all encrypted devices.
:param passphrase: a string with a passphrase
"""
# Update the request with a new copy.
request = copy.deepcopy(self.request)
request.passphrase = passphrase
self.set_request(request)
def configure_with_task(self):
"""Schedule the partitioning actions."""
return AutomaticPartitioningTask(self.storage, self.request)
| gpl-2.0 |
celery/kombu | kombu/log.py | 1 | 4036 | """Logging Utilities."""
import logging
import numbers
import os
import sys
from logging.handlers import WatchedFileHandler
from .utils.encoding import safe_repr, safe_str
from .utils.functional import maybe_evaluate
from .utils.objects import cached_property
__all__ = ('LogMixin', 'LOG_LEVELS', 'get_loglevel', 'setup_logging')
LOG_LEVELS = dict(logging._nameToLevel)
LOG_LEVELS.update(logging._levelToName)
LOG_LEVELS.setdefault('FATAL', logging.FATAL)
LOG_LEVELS.setdefault(logging.FATAL, 'FATAL')
DISABLE_TRACEBACKS = os.environ.get('DISABLE_TRACEBACKS')
def get_logger(logger):
"""Get logger by name."""
if isinstance(logger, str):
logger = logging.getLogger(logger)
if not logger.handlers:
logger.addHandler(logging.NullHandler())
return logger
def get_loglevel(level):
"""Get loglevel by name."""
if isinstance(level, str):
return LOG_LEVELS[level]
return level
def naive_format_parts(fmt):
parts = fmt.split('%')
for i, e in enumerate(parts[1:]):
yield None if not e or not parts[i - 1] else e[0]
def safeify_format(fmt, args, filters=None):
filters = {'s': safe_str, 'r': safe_repr} if not filters else filters
for index, type in enumerate(naive_format_parts(fmt)):
filt = filters.get(type)
yield filt(args[index]) if filt else args[index]
class LogMixin:
"""Mixin that adds severity methods to any class."""
def debug(self, *args, **kwargs):
return self.log(logging.DEBUG, *args, **kwargs)
def info(self, *args, **kwargs):
return self.log(logging.INFO, *args, **kwargs)
def warn(self, *args, **kwargs):
return self.log(logging.WARN, *args, **kwargs)
def error(self, *args, **kwargs):
kwargs.setdefault('exc_info', True)
return self.log(logging.ERROR, *args, **kwargs)
def critical(self, *args, **kwargs):
kwargs.setdefault('exc_info', True)
return self.log(logging.CRITICAL, *args, **kwargs)
def annotate(self, text):
return f'{self.logger_name} - {text}'
def log(self, severity, *args, **kwargs):
if DISABLE_TRACEBACKS:
kwargs.pop('exc_info', None)
if self.logger.isEnabledFor(severity):
log = self.logger.log
if len(args) > 1 and isinstance(args[0], str):
expand = [maybe_evaluate(arg) for arg in args[1:]]
return log(severity,
self.annotate(args[0].replace('%r', '%s')),
*list(safeify_format(args[0], expand)), **kwargs)
else:
return self.logger.log(
severity, self.annotate(' '.join(map(safe_str, args))),
**kwargs)
def get_logger(self):
return get_logger(self.logger_name)
def is_enabled_for(self, level):
return self.logger.isEnabledFor(self.get_loglevel(level))
def get_loglevel(self, level):
if not isinstance(level, numbers.Integral):
return LOG_LEVELS[level]
return level
@cached_property
def logger(self):
return self.get_logger()
@property
def logger_name(self):
return self.__class__.__name__
class Log(LogMixin):
def __init__(self, name, logger=None):
self._logger_name = name
self._logger = logger
def get_logger(self):
if self._logger:
return self._logger
return LogMixin.get_logger(self)
@property
def logger_name(self):
return self._logger_name
def setup_logging(loglevel=None, logfile=None):
"""Setup logging."""
logger = logging.getLogger()
loglevel = get_loglevel(loglevel or 'ERROR')
logfile = logfile if logfile else sys.__stderr__
if not logger.handlers:
if hasattr(logfile, 'write'):
handler = logging.StreamHandler(logfile)
else:
handler = WatchedFileHandler(logfile)
logger.addHandler(handler)
logger.setLevel(loglevel)
return logger
| bsd-3-clause |
DPaaS-Raksha/devstack | tools/jenkins/jenkins_home/print_summary.py | 26 | 1210 | #!/usr/bin/python
import urllib
import json
import sys
def print_usage():
print ("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"
% sys.argv[0])
sys.exit()
def fetch_blob(url):
return json.loads(urllib.urlopen(url + '/api/json').read())
if len(sys.argv) < 2:
print_usage()
BASE_URL = sys.argv[1]
root = fetch_blob(BASE_URL)
results = {}
for job_url in root['jobs']:
job = fetch_blob(job_url['url'])
if job.get('activeConfigurations'):
(tag, name) = job['name'].split('-')
if not results.get(tag):
results[tag] = {}
if not results[tag].get(name):
results[tag][name] = []
for config_url in job['activeConfigurations']:
config = fetch_blob(config_url['url'])
log_url = ''
if config.get('lastBuild'):
log_url = config['lastBuild']['url'] + 'console'
results[tag][name].append({'test': config['displayName'],
'status': config['color'],
'logUrl': log_url,
'healthReport': config['healthReport']})
print json.dumps(results)
| apache-2.0 |
xen0l/ansible | lib/ansible/modules/network/f5/bigip_gtm_wide_ip.py | 6 | 23101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_gtm_wide_ip
short_description: Manages F5 BIG-IP GTM wide ip
description:
- Manages F5 BIG-IP GTM wide ip.
version_added: 2.0
options:
pool_lb_method:
description:
- Specifies the load balancing method used to select a pool in this wide
IP. This setting is relevant only when multiple pools are configured
for a wide IP.
- The C(round_robin) value is deprecated and will be removed in Ansible 2.9.
- The C(global_availability) value is deprecated and will be removed in Ansible 2.9.
required: True
aliases: ['lb_method']
choices:
- round-robin
- ratio
- topology
- global-availability
- global_availability
- round_robin
version_added: 2.5
name:
description:
- Wide IP name. This name must be formatted as a fully qualified
domain name (FQDN). You can also use the alias C(wide_ip) but this
is deprecated and will be removed in a future Ansible version.
required: True
aliases:
- wide_ip
type:
description:
- Specifies the type of wide IP. GTM wide IPs need to be keyed by query
type in addition to name, since pool members need different attributes
depending on the response RDATA they are meant to supply. This value
is required if you are using BIG-IP versions >= 12.0.0.
choices:
- a
- aaaa
- cname
- mx
- naptr
- srv
version_added: 2.4
state:
description:
- When C(present) or C(enabled), ensures that the Wide IP exists and
is enabled.
- When C(absent), ensures that the Wide IP has been removed.
- When C(disabled), ensures that the Wide IP exists and is disabled.
default: present
choices:
- present
- absent
- disabled
- enabled
version_added: 2.4
partition:
description:
- Device partition to manage resources on.
default: Common
version_added: 2.5
pools:
description:
- The pools that you want associated with the Wide IP.
- If C(ratio) is not provided when creating a new Wide IP, it will default
to 1.
suboptions:
name:
description:
- The name of the pool to include.
required: True
ratio:
description:
- Ratio for the pool.
- The system uses this number with the Ratio load balancing method.
version_added: 2.5
irules:
version_added: 2.6
description:
- List of rules to be applied.
- If you want to remove all existing iRules, specify a single empty value; C("").
See the documentation for an example.
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Set lb method
bigip_gtm_wide_ip:
server: lb.mydomain.com
user: admin
password: secret
pool_lb_method: round-robin
name: my-wide-ip.example.com
delegate_to: localhost
- name: Add iRules to the Wide IP
bigip_gtm_wide_ip:
server: lb.mydomain.com
user: admin
password: secret
pool_lb_method: round-robin
name: my-wide-ip.example.com
irules:
- irule1
- irule2
delegate_to: localhost
- name: Remove one iRule from the Virtual Server
bigip_gtm_wide_ip:
server: lb.mydomain.com
user: admin
password: secret
pool_lb_method: round-robin
name: my-wide-ip.example.com
irules:
- irule1
delegate_to: localhost
- name: Remove all iRules from the Virtual Server
bigip_gtm_wide_ip:
server: lb.mydomain.com
user: admin
password: secret
pool_lb_method: round-robin
name: my-wide-ip.example.com
irules: ""
delegate_to: localhost
- name: Assign a pool with ratio to the Wide IP
bigip_gtm_wide_ip:
server: lb.mydomain.com
user: admin
password: secret
pool_lb_method: round-robin
name: my-wide-ip.example.com
pools:
- name: pool1
ratio: 100
delegate_to: localhost
'''
RETURN = r'''
lb_method:
description: The new load balancing method used by the wide IP.
returned: changed
type: string
sample: topology
state:
description: The new state of the wide IP.
returned: changed
type: string
sample: disabled
irules:
description: iRules set on the Wide IP.
returned: changed
type: list
sample: ['/Common/irule1', '/Common/irule2']
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.six import iteritems
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import is_valid_fqdn
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import is_valid_fqdn
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'poolLbMode': 'pool_lb_method',
'rules': 'irules',
}
updatables = [
'pool_lb_method', 'state', 'pools', 'irules', 'enabled', 'disabled'
]
returnables = [
'name', 'pool_lb_method', 'state', 'pools', 'irules'
]
api_attributes = [
'poolLbMode', 'enabled', 'disabled', 'pools', 'rules'
]
class ApiParameters(Parameters):
@property
def disabled(self):
if self._values['disabled'] is True:
return True
return False
@property
def enabled(self):
if self._values['enabled'] is True:
return True
return False
@property
def pools(self):
result = []
if self._values['pools'] is None:
return None
pools = sorted(self._values['pools'], key=lambda x: x['order'])
for item in pools:
pool = dict()
pool.update(item)
name = '/{0}/{1}'.format(item['partition'], item['name'])
del pool['nameReference']
del pool['order']
del pool['name']
del pool['partition']
pool['name'] = name
result.append(pool)
return result
class ModuleParameters(Parameters):
@property
def pool_lb_method(self):
if self._values['pool_lb_method'] is None:
return None
lb_method = str(self._values['pool_lb_method'])
if lb_method == 'global_availability':
if self._values['__warnings'] is None:
self._values['__warnings'] = []
self._values['__warnings'].append(
dict(
msg='The provided pool_lb_method is deprecated',
version='2.4'
)
)
lb_method = 'global-availability'
elif lb_method == 'round_robin':
if self._values['__warnings'] is None:
self._values['__warnings'] = []
self._values['__warnings'].append(
dict(
msg='The provided pool_lb_method is deprecated',
version='2.4'
)
)
lb_method = 'round-robin'
return lb_method
@property
def type(self):
if self._values['type'] is None:
return None
return str(self._values['type'])
@property
def name(self):
if self._values['name'] is None:
return None
if not is_valid_fqdn(self._values['name']):
raise F5ModuleError(
"The provided name must be a valid FQDN"
)
return self._values['name']
@property
def state(self):
if self._values['state'] == 'enabled':
return 'present'
return self._values['state']
@property
def enabled(self):
if self._values['state'] == 'disabled':
return False
elif self._values['state'] in ['present', 'enabled']:
return True
else:
return None
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
elif self._values['state'] in ['present', 'enabled']:
return False
else:
return None
@property
def pools(self):
result = []
if self._values['pools'] is None:
return None
for item in self._values['pools']:
pool = dict()
if 'name' not in item:
raise F5ModuleError(
"'name' is a required key for items in the list of pools."
)
if 'ratio' in item:
pool['ratio'] = item['ratio']
pool['name'] = fq_name(self.partition, item['name'])
result.append(pool)
return result
@property
def irules(self):
results = []
if self._values['irules'] is None:
return None
if len(self._values['irules']) == 1 and self._values['irules'][0] == '':
return ''
for irule in self._values['irules']:
result = fq_name(self.partition, irule)
results.append(result)
return results
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
change = getattr(self, returnable)
if isinstance(change, dict):
result.update(change)
else:
result[returnable] = change
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def irules(self):
if self._values['irules'] is None:
return None
if self._values['irules'] == '':
return []
return self._values['irules']
class ReportableChanges(Changes):
@property
def pool_lb_method(self):
result = dict(
lb_method=self._values['pool_lb_method'],
pool_lb_method=self._values['pool_lb_method'],
)
return result
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
def to_tuple(self, items):
result = []
for x in items:
tmp = [(str(k), str(v)) for k, v in iteritems(x)]
result += tmp
return result
def _diff_complex_items(self, want, have):
if want == [] and have is None:
return None
if want is None:
return None
w = self.to_tuple(want)
h = self.to_tuple(have)
if set(w).issubset(set(h)):
return None
else:
return want
@property
def state(self):
if self.want.state == 'disabled' and self.have.enabled:
return self.want.state
elif self.want.state in ['present', 'enabled'] and self.have.disabled:
return self.want.state
@property
def pools(self):
result = self._diff_complex_items(self.want.pools, self.have.pools)
return result
@property
def irules(self):
if self.want.irules is None:
return None
if self.want.irules == '' and self.have.irules is None:
return None
if self.want.irules == '' and len(self.have.irules) > 0:
return []
if sorted(set(self.want.irules)) != sorted(set(self.have.irules)):
return self.want.irules
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.kwargs = kwargs
def exec_module(self):
if self.version_is_less_than_12():
manager = self.get_manager('untyped')
else:
manager = self.get_manager('typed')
return manager.exec_module()
def get_manager(self, type):
if type == 'typed':
return TypedManager(**self.kwargs)
elif type == 'untyped':
return UntypedManager(**self.kwargs)
def version_is_less_than_12(self):
version = self.client.api.tmos_version
if LooseVersion(version) < LooseVersion('12.0.0'):
return True
else:
return False
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state in ["present", "disabled"]:
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
if self.want.pool_lb_method is None:
raise F5ModuleError(
"The 'pool_lb_method' option is required when state is 'present'"
)
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the Wide IP")
return True
class UntypedManager(BaseManager):
def exists(self):
return self.client.api.tm.gtm.wideips.wideip.exists(
name=self.want.name,
partition=self.want.partition
)
def update_on_device(self):
params = self.changes.api_params()
result = self.client.api.tm.gtm.wideips.wipeip.load(
name=self.want.name,
partition=self.want.partition
)
result.modify(**params)
def read_current_from_device(self):
resource = self.client.api.tm.gtm.wideips.wideip.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return ApiParameters(params=result)
def create_on_device(self):
params = self.changes.api_params()
self.client.api.tm.gtm.wideips.wideip.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def remove_from_device(self):
result = self.client.api.tm.gtm.wideips.wideip.load(
name=self.want.name,
partition=self.want.partition
)
if result:
result.delete()
class TypedManager(BaseManager):
def __init__(self, *args, **kwargs):
super(TypedManager, self).__init__(**kwargs)
if self.want.type is None:
raise F5ModuleError(
"The 'type' option is required for BIG-IP instances "
"greater than or equal to 12.x"
)
type_map = dict(
a='a_s',
aaaa='aaaas',
cname='cnames',
mx='mxs',
naptr='naptrs',
srv='srvs'
)
self.collection = type_map[self.want.type]
def exists(self):
wideips = self.client.api.tm.gtm.wideips
collection = getattr(wideips, self.collection)
resource = getattr(collection, self.want.type)
result = resource.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update_on_device(self):
params = self.changes.api_params()
wideips = self.client.api.tm.gtm.wideips
collection = getattr(wideips, self.collection)
resource = getattr(collection, self.want.type)
result = resource.load(
name=self.want.name,
partition=self.want.partition
)
result.modify(**params)
def read_current_from_device(self):
wideips = self.client.api.tm.gtm.wideips
collection = getattr(wideips, self.collection)
resource = getattr(collection, self.want.type)
result = resource.load(
name=self.want.name,
partition=self.want.partition
)
result = result.attrs
return ApiParameters(params=result)
def create_on_device(self):
params = self.changes.api_params()
wideips = self.client.api.tm.gtm.wideips
collection = getattr(wideips, self.collection)
resource = getattr(collection, self.want.type)
resource.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def remove_from_device(self):
wideips = self.client.api.tm.gtm.wideips
collection = getattr(wideips, self.collection)
resource = getattr(collection, self.want.type)
result = resource.load(
name=self.want.name,
partition=self.want.partition
)
if result:
result.delete()
class ArgumentSpec(object):
def __init__(self):
lb_method_choices = [
'round-robin', 'topology', 'ratio', 'global-availability',
# TODO(Remove in Ansible 2.9)
'round_robin', 'global_availability'
]
self.supports_check_mode = True
argument_spec = dict(
pool_lb_method=dict(
choices=lb_method_choices,
aliases=['lb_method']
),
name=dict(
required=True,
aliases=['wide_ip']
),
type=dict(
choices=[
'a', 'aaaa', 'cname', 'mx', 'naptr', 'srv'
]
),
state=dict(
default='present',
choices=['absent', 'present', 'enabled', 'disabled']
),
pools=dict(
type='list',
options=dict(
name=dict(required=True),
ratio=dict(type='int')
)
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
irules=dict(
type='list',
),
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as e:
cleanup_tokens(client)
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
macosforge/ccs-calendarserver | txdav/common/datastore/upgrade/sql/test/test_upgrade.py | 1 | 16058 | ##
# Copyright (c) 2010-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Tests for L{txdav.common.datastore.upgrade.sql.upgrade}.
"""
from twext.enterprise.dal.parseschema import schemaFromPath
from twext.enterprise.ienterprise import ORACLE_DIALECT, POSTGRES_DIALECT
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.python.modules import getModule
from twisted.trial.unittest import TestCase
from txdav.base.datastore.suboracle import cleanDatabase
from txdav.common.datastore.sql_dump import dumpSchema
from txdav.common.datastore.test.util import StubNotifierFactory, SQLStoreBuilder, \
DB_TYPE, theStoreBuilder
from txdav.common.datastore.upgrade.sql.upgrade import (
UpgradeDatabaseSchemaStep, UpgradeDatabaseAddressBookDataStep, UpgradeDatabaseCalendarDataStep, NotAllowedToUpgrade)
import re
class SchemaUpgradeTests(TestCase):
"""
Tests for L{UpgradeDatabaseSchemaStep}.
"""
def __init__(self, methodName='runTest'):
super(SchemaUpgradeTests, self).__init__(methodName)
if DB_TYPE[0] == POSTGRES_DIALECT:
self.testStoreBuilder = theStoreBuilder
else:
self.testStoreBuilder = SQLStoreBuilder(dsnUser="test_dbUpgrades", noCleanup=True)
@staticmethod
def _getRawSchemaVersion(fp, versionKey):
schema = fp.getContent()
found = re.search("insert into CALENDARSERVER (\(NAME, VALUE\) )?values \('%s', '(\d+)'\);" % (versionKey,), schema)
return int(found.group(2)) if found else None
def _getSchemaVersion(self, fp, versionKey):
found = SchemaUpgradeTests._getRawSchemaVersion(fp, versionKey)
if found is None:
if versionKey == "VERSION":
self.fail("Could not determine schema version for: %s" % (fp,))
else:
return 1
return found
def test_scanUpgradeFiles(self):
upgrader = UpgradeDatabaseSchemaStep(None)
upgrader.schemaLocation = getModule(__name__).filePath.sibling("fake_schema1")
files = upgrader.scanForUpgradeFiles("fake_dialect")
self.assertEqual(
files,
[(3, 4, upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_3_to_4.sql"))],
)
upgrader.schemaLocation = getModule(__name__).filePath.sibling("fake_schema2")
files = upgrader.scanForUpgradeFiles("fake_dialect")
self.assertEqual(
files,
[
(3, 4, upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_3_to_4.sql")),
(3, 5, upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_3_to_5.sql")),
(4, 5, upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_4_to_5.sql")),
]
)
def test_determineUpgradeSequence(self):
upgrader = UpgradeDatabaseSchemaStep(None)
upgrader.schemaLocation = getModule(__name__).filePath.sibling("fake_schema1")
files = upgrader.scanForUpgradeFiles("fake_dialect")
upgrades = upgrader.determineUpgradeSequence(3, 4, files, "fake_dialect")
self.assertEqual(
upgrades,
[upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_3_to_4.sql")],
)
self.assertRaises(RuntimeError, upgrader.determineUpgradeSequence, 3, 5, files, "fake_dialect")
upgrader.schemaLocation = getModule(__name__).filePath.sibling("fake_schema2")
files = upgrader.scanForUpgradeFiles("fake_dialect")
upgrades = upgrader.determineUpgradeSequence(3, 5, files, "fake_dialect")
self.assertEqual(
upgrades,
[upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_3_to_5.sql")]
)
upgrades = upgrader.determineUpgradeSequence(4, 5, files, "fake_dialect")
self.assertEqual(
upgrades,
[upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_4_to_5.sql")]
)
upgrader.schemaLocation = getModule(__name__).filePath.sibling("fake_schema3")
files = upgrader.scanForUpgradeFiles("fake_dialect")
upgrades = upgrader.determineUpgradeSequence(3, 5, files, "fake_dialect")
self.assertEqual(
upgrades,
[
upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_3_to_4.sql"),
upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_4_to_5.sql"),
]
)
def test_upgradeAvailability(self):
"""
Make sure that each old schema has a valid upgrade path to the current one.
"""
for dialect in (POSTGRES_DIALECT, ORACLE_DIALECT,):
upgrader = UpgradeDatabaseSchemaStep(None)
files = upgrader.scanForUpgradeFiles(dialect)
current_version = self._getSchemaVersion(upgrader.schemaLocation.child(DB_TYPE[2]), "VERSION")
for child in upgrader.schemaLocation.child("old").child(dialect).globChildren("*.sql"):
old_version = self._getSchemaVersion(child, "VERSION")
upgrades = upgrader.determineUpgradeSequence(old_version, current_version, files, dialect)
self.assertNotEqual(len(upgrades), 0)
# def test_upgradeDataAvailability(self):
# """
# Make sure that each upgrade file has a valid data upgrade file or None.
# """
#
# for dialect in (POSTGRES_DIALECT, ORACLE_DIALECT,):
# upgrader = UpgradeDatabaseSchemaStep(None)
# files = upgrader.scanForUpgradeFiles(dialect)
# for _ignore_from, _ignore_to, fp in files:
# result = upgrader.getDataUpgrade(fp)
# if result is not None:
# self.assertIsInstance(result, types.FunctionType)
@inlineCallbacks
def _dbSchemaUpgrades(self, child):
"""
This does a full DB test of all possible upgrade paths. For each old schema, it loads it into the DB
then runs the upgrade service. This ensures all the upgrade.sql files work correctly - at least for
postgres.
"""
store = yield self.testStoreBuilder.buildStore(
self, {"push": StubNotifierFactory()}, enableJobProcessing=False
)
@inlineCallbacks
def _loadOldSchema(path):
"""
Use the postgres schema mechanism to do tests under a separate "namespace"
in postgres that we can quickly wipe clean afterwards.
"""
startTxn = store.newTransaction("test_dbUpgrades")
if startTxn.dbtype.dialect == POSTGRES_DIALECT:
yield startTxn.execSQL("create schema test_dbUpgrades")
yield startTxn.execSQL("set search_path to test_dbUpgrades")
yield startTxn.execSQLBlock(path.getContent())
yield startTxn.commit()
@inlineCallbacks
def _loadVersion():
startTxn = store.newTransaction("test_dbUpgrades")
new_version = yield startTxn.execSQL("select value from calendarserver where name = 'VERSION'")
yield startTxn.commit()
returnValue(int(new_version[0][0]))
@inlineCallbacks
def _loadSchemaFromDatabase():
startTxn = store.newTransaction("test_dbUpgrades")
schema = yield dumpSchema(startTxn, "Upgraded from %s" % (child.basename(),), "test_dbUpgrades")
yield startTxn.commit()
returnValue(schema)
@inlineCallbacks
def _unloadOldSchema():
startTxn = store.newTransaction("test_dbUpgrades")
if startTxn.dbtype.dialect == POSTGRES_DIALECT:
yield startTxn.execSQL("set search_path to public")
yield startTxn.execSQL("drop schema test_dbUpgrades cascade")
elif startTxn.dbtype.dialect == ORACLE_DIALECT:
yield cleanDatabase(startTxn)
yield startTxn.commit()
@inlineCallbacks
def _cleanupOldSchema():
startTxn = store.newTransaction("test_dbUpgrades")
if startTxn.dbtype.dialect == POSTGRES_DIALECT:
yield startTxn.execSQL("set search_path to public")
yield startTxn.execSQL("drop schema if exists test_dbUpgrades cascade")
elif startTxn.dbtype.dialect == ORACLE_DIALECT:
yield cleanDatabase(startTxn)
yield startTxn.commit()
self.addCleanup(_cleanupOldSchema)
test_upgrader = UpgradeDatabaseSchemaStep(None)
expected_version = self._getSchemaVersion(test_upgrader.schemaLocation.child(DB_TYPE[2]), "VERSION")
# Upgrade allowed
upgrader = UpgradeDatabaseSchemaStep(store)
yield _loadOldSchema(child)
yield upgrader.databaseUpgrade()
new_version = yield _loadVersion()
# Compare the upgraded schema with the expected current schema
new_schema = yield _loadSchemaFromDatabase()
currentSchema = schemaFromPath(test_upgrader.schemaLocation.child(DB_TYPE[2]))
mismatched = currentSchema.compare(new_schema)
# These are special case exceptions
for i in (
"Table: CALENDAR_HOME, column name DATAVERSION default mismatch",
"Table: CALENDAR_HOME, mismatched constraints: set([<Constraint: (NOT NULL ('DATAVERSION',) None)>])",
"Table: ADDRESSBOOK_HOME, column name DATAVERSION default mismatch",
"Table: ADDRESSBOOK_HOME, mismatched constraints: set([<Constraint: (NOT NULL ('DATAVERSION',) None)>])",
"Table: PUSH_NOTIFICATION_WORK, column name PUSH_PRIORITY default mismatch",
):
try:
mismatched.remove(i)
except ValueError:
pass
if mismatched and mismatched[0].startswith("Comparing schema: current.sql to Upgraded from"):
del mismatched[0]
self.assertEqual(len(mismatched), 0, "Schema mismatch:\n" + "\n".join(mismatched))
yield _unloadOldSchema()
self.assertEqual(new_version, expected_version)
# Upgrade disallowed
upgrader = UpgradeDatabaseSchemaStep(store, failIfUpgradeNeeded=True)
yield _loadOldSchema(child)
old_version = yield _loadVersion()
try:
yield upgrader.databaseUpgrade()
except NotAllowedToUpgrade:
pass
except Exception:
self.fail("NotAllowedToUpgrade not raised")
else:
self.fail("NotAllowedToUpgrade not raised")
new_version = yield _loadVersion()
yield _unloadOldSchema()
self.assertEqual(old_version, new_version)
@inlineCallbacks
def _dbDataUpgrades(self, version, versionKey, upgraderClass):
"""
This does a full DB test of all possible data upgrade paths. For each old schema, it loads it into the DB
then runs the data upgrade service. This ensures all the upgrade_XX.py files work correctly - at least for
postgres.
TODO: this currently does not create any data to test with. It simply runs the upgrade on an empty
store.
"""
store = yield self.testStoreBuilder.buildStore(
self, {"push": StubNotifierFactory()}, enableJobProcessing=False
)
@inlineCallbacks
def _loadOldData(path, oldVersion):
"""
Use the postgres schema mechanism to do tests under a separate "namespace"
in postgres that we can quickly wipe clean afterwards.
"""
startTxn = store.newTransaction("test_dbUpgrades")
if startTxn.dbtype.dialect == POSTGRES_DIALECT:
yield startTxn.execSQL("create schema test_dbUpgrades")
yield startTxn.execSQL("set search_path to test_dbUpgrades")
yield startTxn.execSQLBlock(path.getContent())
yield startTxn.execSQL("update CALENDARSERVER set VALUE = '%s' where NAME = '%s'" % (oldVersion, versionKey,))
yield startTxn.commit()
@inlineCallbacks
def _loadVersion():
startTxn = store.newTransaction("test_dbUpgrades")
new_version = yield startTxn.execSQL("select value from calendarserver where name = '%s'" % (versionKey,))
yield startTxn.commit()
returnValue(int(new_version[0][0]))
@inlineCallbacks
def _unloadOldData():
startTxn = store.newTransaction("test_dbUpgrades")
if startTxn.dbtype.dialect == POSTGRES_DIALECT:
yield startTxn.execSQL("set search_path to public")
yield startTxn.execSQL("drop schema test_dbUpgrades cascade")
elif startTxn.dbtype.dialect == ORACLE_DIALECT:
yield cleanDatabase(startTxn)
yield startTxn.commit()
@inlineCallbacks
def _cleanupOldData():
startTxn = store.newTransaction("test_dbUpgrades")
if startTxn.dbtype.dialect == POSTGRES_DIALECT:
yield startTxn.execSQL("set search_path to public")
yield startTxn.execSQL("drop schema if exists test_dbUpgrades cascade")
elif startTxn.dbtype.dialect == ORACLE_DIALECT:
yield cleanDatabase(startTxn)
yield startTxn.commit()
self.addCleanup(_cleanupOldData)
test_upgrader = UpgradeDatabaseSchemaStep(None)
expected_version = self._getSchemaVersion(test_upgrader.schemaLocation.child(DB_TYPE[2]), versionKey)
oldVersion = version
upgrader = upgraderClass(store)
yield _loadOldData(test_upgrader.schemaLocation.child(DB_TYPE[2]), oldVersion)
yield upgrader.databaseUpgrade()
new_version = yield _loadVersion()
yield _unloadOldData()
self.assertEqual(new_version, expected_version)
test_upgrader = UpgradeDatabaseSchemaStep(None)
DIALECT = DB_TYPE[0]
# Bind test methods for each schema version
for child in test_upgrader.schemaLocation.child("old").child(DIALECT).globChildren("*.sql"):
def f(self, lchild=child):
return self._dbSchemaUpgrades(lchild)
setattr(SchemaUpgradeTests, "test_dbSchemaUpgrades_%s" % (child.basename().split(".", 1)[0],), f)
# Bind test methods for each addressbook data version
versions = set()
for child in test_upgrader.schemaLocation.child("old").child(DIALECT).globChildren("*.sql"):
version = SchemaUpgradeTests._getRawSchemaVersion(child, "ADDRESSBOOK-DATAVERSION")
versions.add(version if version else 1)
for version in sorted(versions):
def f(self, lversion=version):
return self._dbDataUpgrades(lversion, "ADDRESSBOOK-DATAVERSION", UpgradeDatabaseAddressBookDataStep)
setattr(SchemaUpgradeTests, "test_dbAddressBookDataUpgrades_%s" % (version,), f)
# Bind test methods for each calendar data version
versions = set()
for child in test_upgrader.schemaLocation.child("old").child(DIALECT).globChildren("*.sql"):
version = SchemaUpgradeTests._getRawSchemaVersion(child, "CALENDAR-DATAVERSION")
versions.add(version if version else 1)
for version in sorted(versions):
def f(self, lversion=version):
return self._dbDataUpgrades(lversion, "CALENDAR-DATAVERSION", UpgradeDatabaseCalendarDataStep)
setattr(SchemaUpgradeTests, "test_dbCalendarDataUpgrades_%s" % (version,), f)
| apache-2.0 |
pbrod/scipy | scipy/linalg/decomp_schur.py | 117 | 8375 | """Schur decomposition functions."""
from __future__ import division, print_function, absolute_import
import numpy
from numpy import asarray_chkfinite, single, asarray
from scipy._lib.six import callable
# Local imports.
from . import misc
from .misc import LinAlgError, _datacopied
from .lapack import get_lapack_funcs
from .decomp import eigvals
__all__ = ['schur', 'rsf2csf']
_double_precision = ['i','l','d']
def schur(a, output='real', lwork=None, overwrite_a=False, sort=None,
check_finite=True):
"""
Compute Schur decomposition of a matrix.
The Schur decomposition is::
A = Z T Z^H
where Z is unitary and T is either upper-triangular, or for real
Schur decomposition (output='real'), quasi-upper triangular. In
the quasi-triangular form, 2x2 blocks describing complex-valued
eigenvalue pairs may extrude from the diagonal.
Parameters
----------
a : (M, M) array_like
Matrix to decompose
output : {'real', 'complex'}, optional
Construct the real or complex Schur decomposition (for real matrices).
lwork : int, optional
Work array size. If None or -1, it is automatically computed.
overwrite_a : bool, optional
Whether to overwrite data in a (may improve performance).
sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
Specifies whether the upper eigenvalues should be sorted. A callable
may be passed that, given a eigenvalue, returns a boolean denoting
whether the eigenvalue should be sorted to the top-left (True).
Alternatively, string parameters may be used::
'lhp' Left-hand plane (x.real < 0.0)
'rhp' Right-hand plane (x.real > 0.0)
'iuc' Inside the unit circle (x*x.conjugate() <= 1.0)
'ouc' Outside the unit circle (x*x.conjugate() > 1.0)
Defaults to None (no sorting).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
T : (M, M) ndarray
Schur form of A. It is real-valued for the real Schur decomposition.
Z : (M, M) ndarray
An unitary Schur transformation matrix for A.
It is real-valued for the real Schur decomposition.
sdim : int
If and only if sorting was requested, a third return value will
contain the number of eigenvalues satisfying the sort condition.
Raises
------
LinAlgError
Error raised under three conditions:
1. The algorithm failed due to a failure of the QR algorithm to
compute all eigenvalues
2. If eigenvalue sorting was requested, the eigenvalues could not be
reordered due to a failure to separate eigenvalues, usually because
of poor conditioning
3. If eigenvalue sorting was requested, roundoff errors caused the
leading eigenvalues to no longer satisfy the sorting condition
See also
--------
rsf2csf : Convert real Schur form to complex Schur form
"""
if output not in ['real','complex','r','c']:
raise ValueError("argument must be 'real', or 'complex'")
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
raise ValueError('expected square matrix')
typ = a1.dtype.char
if output in ['complex','c'] and typ not in ['F','D']:
if typ in _double_precision:
a1 = a1.astype('D')
typ = 'D'
else:
a1 = a1.astype('F')
typ = 'F'
overwrite_a = overwrite_a or (_datacopied(a1, a))
gees, = get_lapack_funcs(('gees',), (a1,))
if lwork is None or lwork == -1:
# get optimal work array
result = gees(lambda x: None, a1, lwork=-1)
lwork = result[-2][0].real.astype(numpy.int)
if sort is None:
sort_t = 0
sfunction = lambda x: None
else:
sort_t = 1
if callable(sort):
sfunction = sort
elif sort == 'lhp':
sfunction = lambda x: (numpy.real(x) < 0.0)
elif sort == 'rhp':
sfunction = lambda x: (numpy.real(x) >= 0.0)
elif sort == 'iuc':
sfunction = lambda x: (abs(x) <= 1.0)
elif sort == 'ouc':
sfunction = lambda x: (abs(x) > 1.0)
else:
raise ValueError("sort parameter must be None, a callable, or " +
"one of ('lhp','rhp','iuc','ouc')")
result = gees(sfunction, a1, lwork=lwork, overwrite_a=overwrite_a,
sort_t=sort_t)
info = result[-1]
if info < 0:
raise ValueError('illegal value in %d-th argument of internal gees'
% -info)
elif info == a1.shape[0] + 1:
raise LinAlgError('Eigenvalues could not be separated for reordering.')
elif info == a1.shape[0] + 2:
raise LinAlgError('Leading eigenvalues do not satisfy sort condition.')
elif info > 0:
raise LinAlgError("Schur form not found. Possibly ill-conditioned.")
if sort_t == 0:
return result[0], result[-3]
else:
return result[0], result[-3], result[1]
eps = numpy.finfo(float).eps
feps = numpy.finfo(single).eps
_array_kind = {'b':0, 'h':0, 'B': 0, 'i':0, 'l': 0, 'f': 0, 'd': 0, 'F': 1, 'D': 1}
_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1}
_array_type = [['f', 'd'], ['F', 'D']]
def _commonType(*arrays):
kind = 0
precision = 0
for a in arrays:
t = a.dtype.char
kind = max(kind, _array_kind[t])
precision = max(precision, _array_precision[t])
return _array_type[kind][precision]
def _castCopy(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.char == type:
cast_arrays = cast_arrays + (a.copy(),)
else:
cast_arrays = cast_arrays + (a.astype(type),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def rsf2csf(T, Z, check_finite=True):
"""
Convert real Schur form to complex Schur form.
Convert a quasi-diagonal real-valued Schur form to the upper triangular
complex-valued Schur form.
Parameters
----------
T : (M, M) array_like
Real Schur form of the original matrix
Z : (M, M) array_like
Schur transformation matrix
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
T : (M, M) ndarray
Complex Schur form of the original matrix
Z : (M, M) ndarray
Schur transformation matrix corresponding to the complex form
See also
--------
schur : Schur decompose a matrix
"""
if check_finite:
Z, T = map(asarray_chkfinite, (Z, T))
else:
Z,T = map(asarray, (Z,T))
if len(Z.shape) != 2 or Z.shape[0] != Z.shape[1]:
raise ValueError("matrix must be square.")
if len(T.shape) != 2 or T.shape[0] != T.shape[1]:
raise ValueError("matrix must be square.")
if T.shape[0] != Z.shape[0]:
raise ValueError("matrices must be same dimension.")
N = T.shape[0]
arr = numpy.array
t = _commonType(Z, T, arr([3.0],'F'))
Z, T = _castCopy(t, Z, T)
conj = numpy.conj
dot = numpy.dot
r_ = numpy.r_
transp = numpy.transpose
for m in range(N-1, 0, -1):
if abs(T[m,m-1]) > eps*(abs(T[m-1,m-1]) + abs(T[m,m])):
k = slice(m-1, m+1)
mu = eigvals(T[k,k]) - T[m,m]
r = misc.norm([mu[0], T[m,m-1]])
c = mu[0] / r
s = T[m,m-1] / r
G = r_[arr([[conj(c), s]], dtype=t), arr([[-s, c]], dtype=t)]
Gc = conj(transp(G))
j = slice(m-1, N)
T[k,j] = dot(G, T[k,j])
i = slice(0, m+1)
T[i,k] = dot(T[i,k], Gc)
i = slice(0, N)
Z[i,k] = dot(Z[i,k], Gc)
T[m,m-1] = 0.0
return T, Z
| bsd-3-clause |
mcglonelevi/pysazz | pysazz.py | 1 | 6416 | ###################################################
# Pysazz - By Stephen Levi McGlone #
###################################################
#!/usr/bin/python
import xml.etree.ElementTree as ET
import urllib.request as urllib
import os
import sys
import sass
config = "pysazz.config.xml"
def init():
file = open(config, "w")
file.write("<?xml version=\"1.0\"?>\n<config>\n\t<links>\n\t\t<link folder=\"\">http://mtechnologies.site.nfoservers.com/_file3.scss</link>\n\t</links>\n</config>")
file.close()
def compiled():
#begin XML parse
tree = ET.parse(config)
root = tree.getroot()
#create primary scss file
file = open("style.scss", "w")
#download and write imports for all scss files
for x in range(0, len(root)):
if root[x].tag == "links":
for y in range(0, len(root[x])):
#gets array of attributes and puts files in appropriate directories
if len(root[x][y].attrib) > 0:
if 'saveas' in root[x][y].attrib:
if 'folder' in root[x][y].attrib:
try:
os.stat(root[x][y].attrib['folder'])
except:
os.makedirs(root[x][y].attrib['folder'])
if root[x][y].text[:4] == "http":
if not os.path.isfile(root[x][y].attrib["folder"] + "/" + root[x][y].attrib["saveas"]):
urllib.urlretrieve (root[x][y].text, root[x][y].attrib["folder"] + "/" + root[x][y].attrib["saveas"])
if root[x][y].attrib["saveas"][-4:] == "scss" or root[x][y].attrib["saveas"][-4:] == "sass":
file.write("@import \"" + root[x][y].attrib["folder"] + "/" + root[x][y].attrib["saveas"] + "\";\n")
else:
if 'folder' in root[x][y].attrib:
try:
os.stat(root[x][y].attrib['folder'])
except:
os.makedirs(root[x][y].attrib['folder'])
if root[x][y].text[:4] == "http":
if not os.path.isfile(root[x][y].attrib["folder"] + "/" + root[x][y].text.split('/')[-1]):
urllib.urlretrieve (root[x][y].text, root[x][y].attrib["folder"] + "/" + root[x][y].text.split('/')[-1])
if root[x][y].text.split('/')[-1][-4:] == "scss" or root[x][y].text.split('/')[-1][-4:] == "sass":
file.write("@import \"" + root[x][y].attrib["folder"] + "/" + root[x][y].text.split('/')[-1] + "\";\n")
#grabs all other files and drops them in the main directory
else:
if not os.path.isfile(root[x][y].text.split('/')[-1]) and root[x][y].text[:4] == "http":
urllib.urlretrieve (root[x][y].text, root[x][y].text.split('/')[-1])
#close main scss file
file.close()
#compile style.scss
if len(sys.argv) <= 2:
compiledString = sass.compile(filename="style.scss")
else:
for x in range(0, len(sys.argv)):
if sys.argv[x][:1] == "-":
compiledString = sass.compile(filename="style.scss", output_style=sys.argv[x].strip('-'))
#write compiled sass to file
outputFile = open("style.css", "w")
outputFile.write(compiledString)
outputFile.close()
def update():
#begin XML parse
tree = ET.parse(config)
root = tree.getroot()
#create primary scss file
file = open("style.scss", "w")
#download and write imports for all scss files
for x in range(0, len(root)):
if root[x].tag == "links":
for y in range(0, len(root[x])):
#gets array of attributes and puts files in appropriate directories
if len(root[x][y].attrib) > 0:
if 'saveas' in root[x][y].attrib:
if 'folder' in root[x][y].attrib:
try:
os.stat(root[x][y].attrib['folder'])
except:
os.makedirs(root[x][y].attrib['folder'])
if root[x][y].text[:4] == "http":
urllib.urlretrieve (root[x][y].text, root[x][y].attrib["folder"] + "/" + root[x][y].attrib["saveas"])
if root[x][y].attrib["saveas"][-4:] == "scss" or root[x][y].attrib["saveas"][-4:] == "sass":
file.write("@import \"" + root[x][y].attrib["folder"] + "/" + root[x][y].attrib["saveas"] + "\";\n")
else:
if 'folder' in root[x][y].attrib:
try:
os.stat(root[x][y].attrib['folder'])
except:
os.makedirs(root[x][y].attrib['folder'])
if root[x][y].text[:4] == "http":
urllib.urlretrieve (root[x][y].text, root[x][y].attrib["folder"] + "/" + root[x][y].text.split('/')[-1])
if root[x][y].text.split('/')[-1][-4:] == "scss" or root[x][y].text.split('/')[-1][-4:] == "sass":
file.write("@import \"" + root[x][y].attrib["folder"] + "/" + root[x][y].text.split('/')[-1] + "\";\n")
#grabs all other files and drops them in the main directory
else:
if root[x][y].text[:4] == "http":
urllib.urlretrieve (root[x][y].text, root[x][y].text.split('/')[-1])
#close main scss file
file.close()
#compile style.scss
if len(sys.argv) <= 2:
compiledString = sass.compile(filename="style.scss")
else:
for x in range(0, len(sys.argv)):
if sys.argv[x][:1] == "-":
compiledString = sass.compile(filename="style.scss", output_style=sys.argv[x].strip('-'))
#write compiled sass to file
outputFile = open("style.css", "w")
outputFile.write(compiledString)
outputFile.close()
if sys.argv[1] == "init":
init()
elif sys.argv[1] == "compile":
compiled()
elif sys.argv[1] == "update":
update()
else:
print ("Invalid, please use valid command.") | gpl-2.0 |
litecoin-project/litecoin | test/functional/wallet_groups.py | 4 | 3973 | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet group functionality."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import CTransaction, FromHex, ToHex
from test_framework.util import (
assert_equal,
)
def assert_approx(v, vexp, vspan=0.00001):
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
class WalletGroupTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-maxtxfee=1.0"], ["-maxtxfee=1.0"], ['-avoidpartialspends', '-maxtxfee=1.0']]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Mine some coins
self.nodes[0].generate(110)
# Get some addresses from the two nodes
addr1 = [self.nodes[1].getnewaddress() for i in range(3)]
addr2 = [self.nodes[2].getnewaddress() for i in range(3)]
addrs = addr1 + addr2
# Send 1 + 0.5 coin to each address
[self.nodes[0].sendtoaddress(addr, 1.0) for addr in addrs]
[self.nodes[0].sendtoaddress(addr, 0.5) for addr in addrs]
self.nodes[0].generate(1)
self.sync_all()
# For each node, send 0.2 coins back to 0;
# - node[1] should pick one 0.5 UTXO and leave the rest
# - node[2] should pick one (1.0 + 0.5) UTXO group corresponding to a
# given address, and leave the rest
txid1 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx1 = self.nodes[1].getrawtransaction(txid1, True)
# txid1 should have 1 input and 2 outputs
assert_equal(1, len(tx1["vin"]))
assert_equal(2, len(tx1["vout"]))
# one output should be 0.2, the other should be ~0.3
v = [vout["value"] for vout in tx1["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 0.3, 0.001)
txid2 = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx2 = self.nodes[2].getrawtransaction(txid2, True)
# txid2 should have 2 inputs and 2 outputs
assert_equal(2, len(tx2["vin"]))
assert_equal(2, len(tx2["vout"]))
# one output should be 0.2, the other should be ~1.3
v = [vout["value"] for vout in tx2["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 1.3, 0.001)
# Empty out node2's wallet
self.nodes[2].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=self.nodes[2].getbalance(), subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
# Fill node2's wallet with 10000 outputs corresponding to the same
# scriptPubKey
for i in range(5):
raw_tx = self.nodes[0].createrawtransaction([{"txid":"0"*64, "vout":0}], [{addr2[0]: 0.05}])
tx = FromHex(CTransaction(), raw_tx)
tx.vin = []
tx.vout = [tx.vout[0]] * 2000
funded_tx = self.nodes[0].fundrawtransaction(ToHex(tx))
signed_tx = self.nodes[0].signrawtransactionwithwallet(funded_tx['hex'])
self.nodes[0].sendrawtransaction(signed_tx['hex'])
self.nodes[0].generate(1)
self.sync_all()
# Check that we can create a transaction that only requires ~100 of our
# utxos, without pulling in all outputs and creating a transaction that
# is way too big.
assert self.nodes[2].sendtoaddress(address=addr2[0], amount=5)
if __name__ == '__main__':
WalletGroupTest().main ()
| mit |
chemelnucfin/tensorflow | tensorflow/python/keras/saving/saved_model/serialized_attributes.py | 6 | 11873 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper classes that list&validate all attributes to serialize to SavedModel.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun
from tensorflow.python.keras.saving.saved_model import constants
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking.tracking import AutoTrackable
from tensorflow.python.util.lazy_loader import LazyLoader
# TODO(b/134426265): Switch back to single-quotes to match the rest of the file
# once the issue with copybara is fixed.
# pylint:disable=g-inconsistent-quotes
base_layer = LazyLoader(
"base_layer", globals(),
"tensorflow.python.keras.engine.base_layer")
training_lib = LazyLoader(
"training_lib", globals(),
"tensorflow.python.keras.engine.training")
# pylint:enable=g-inconsistent-quotes
class SerializedAttributes(object):
"""Class that tracks and validates all serialization attributes.
Keras models contain many Python-defined components. For example, the
trainable_variable property lists the model's trainable variables by
recursively retrieving the trainable variables from each of the child layers.
Another example is model.call, a python function that calls child layers and
adds ops to the backend graph.
Only Tensorflow checkpointable objects and functions can be serialized to
SavedModel. Serializing a Keras model as-is results in a checkpointable object
that does not resemble a Keras model at all. Thus, extra checkpointable
objects and functions must be created during serialization.
**Defining new serialized attributes**
Child classes should be defined using:
SerializedAttributes.with_attributes(
'name', checkpointable_objects=[...], functions=[...], copy_from=[...])
This class is used to cache generated checkpointable objects and functions,
ensuring that new objects and functions are generated a single time.
**Usage during serialization**
Each Layer/Model object should have a corresponding instance of
SerializedAttributes. Create a new instance by calling
`SerializedAttributes.new(obj)`. Objects and functions may be saved using
`.set_and_validate_checkpointable_objects`/`.set_and_and_validate_functions`.
The properties `.checkpointable_objects` and `.functions` returns the cached
values.
**Adding/changing attributes to save to SavedModel**
1. Change the call to `SerializedAttributes.with_attributes` in the correct
class:
- CommonEndpoints: Base attributes to be added during serialization. If
these attributes are present in a Trackable object, it can be
deserialized to a Keras Model.
- LayerAttributes: Attributes to serialize for Layer objects.
- ModelAttributes: Attributes to serialize for Model objects.
2. Update class docstring
3. Update arguments to any calls to `set_and_validate_*`. For example, if
`call_raw_tensors` is added to the ModelAttributes function list, then
a `call_raw_tensors` function should be passed to
`set_and_validate_functions`.
**Common endpoints vs other attributes**
Only common endpoints are attached directly to the root object. Keras-specific
attributes are saved to a separate trackable object with the name "keras_api".
The number of objects attached to the root is limited because any naming
conflicts will cause user code to break.
Another reason is that this will only affect users who call
`tf.saved_model.load` instead of `tf.keras.models.load_model`. These are
advanced users who are likely to have defined their own tf.functions and
trackable objects. The added Keras-specific attributes are kept out of the way
in the "keras_api" namespace.
Properties defined in this class may be used to filter out keras-specific
attributes:
- `functions_to_serialize`: Returns dict of functions to attach to the root
object.
- `checkpointable_objects_to_serialize`: Returns dict of objects to attach to
the root object (including separate trackable object containing
keras-specific attributes)
All changes to the serialized attributes must be backwards-compatible, so
attributes should not be removed or modified without sufficient justification.
"""
@staticmethod
def with_attributes(
name, checkpointable_objects=None, functions=None, copy_from=None):
"""Creates a subclass with all attributes as specified in the arguments.
Args:
name: Name of subclass
checkpointable_objects: List of checkpointable objects to be serialized
in the SavedModel.
functions: List of functions to be serialized in the SavedModel.
copy_from: List of other SerializedAttributes subclasses. The returend
class will copy checkpoint objects/functions from each subclass.
Returns:
Child class with attributes as defined in the `checkpointable_objects`
and `functions` lists.
"""
checkpointable_objects = checkpointable_objects or []
functions = functions or []
if copy_from is not None:
for cls in copy_from:
checkpointable_objects.extend(cls.all_checkpointable_objects)
functions.extend(cls.all_functions)
classdict = {
'all_checkpointable_objects': set(checkpointable_objects),
'all_functions': set(functions)}
return type(name, (SerializedAttributes,), classdict)
@staticmethod
def new(obj):
if isinstance(obj, training_lib.Model):
return ModelAttributes()
elif isinstance(obj, base_layer.Layer):
return LayerAttributes()
else:
raise TypeError('Internal error during serialization: Expected Keras '
'Layer object, got {} of type {}'.format(obj, type(obj)))
def __init__(self):
self._object_dict = {}
self._function_dict = {}
self._keras_trackable = AutoTrackable()
@property
def functions(self):
"""Returns dictionary of all functions."""
return {key: value for key, value in self._function_dict.items()
if value is not None}
@property
def checkpointable_objects(self):
"""Returns dictionary of all checkpointable objects."""
return {key: value for key, value in self._object_dict.items()
if value is not None}
@property
def functions_to_serialize(self):
"""Returns functions to attach to the root object during serialization."""
return {key: value for key, value in self.functions.items()
if key in CommonEndpoints.all_functions}
@property
def objects_to_serialize(self):
"""Returns objects to attach to the root object during serialization."""
objects = {key: value for key, value in self.checkpointable_objects.items()
if key in CommonEndpoints.all_checkpointable_objects}
objects[constants.KERAS_ATTR] = self._keras_trackable
return objects
def set_and_validate_functions(self, function_dict):
"""Saves function dictionary, and validates dictionary values."""
for key in self.all_functions:
if key in function_dict:
if (function_dict[key] is not None and # Not all functions are required
not isinstance(function_dict[key],
(defun.Function, def_function.Function))):
raise ValueError(
'Function dictionary contained a non-function object: {} (for key'
' {})'.format(function_dict[key], key))
self._function_dict[key] = function_dict[key]
setattr(self._keras_trackable, key, function_dict[key])
else:
raise ValueError('Function {} missing from serialized function dict.'
.format(key))
return self.functions
def set_and_validate_objects(self, object_dict):
"""Saves objects to a dictionary, and validates the values."""
for key in self.all_checkpointable_objects:
if key in object_dict:
if not isinstance(object_dict[key], trackable.Trackable):
raise ValueError(
'Object dictionary contained a non-trackable object: {} (for key'
' {})'.format(object_dict[key], key))
self._object_dict[key] = object_dict[key]
setattr(self._keras_trackable, key, object_dict[key])
else:
raise ValueError('Object {} missing from serialized object dict.')
return self.checkpointable_objects
class CommonEndpoints(SerializedAttributes.with_attributes(
'CommonEndpoints',
checkpointable_objects=['variables', 'trainable_variables',
'regularization_losses'],
functions=['__call__', 'call_and_return_all_conditional_losses',
'_default_save_signature'])):
"""Common endpoints shared by all models loadable by Keras.
List of all attributes:
variables: List of all variables in the model and its sublayers.
trainable_variables: List of all trainable variables in the model and its
sublayers.
regulariation_losses: List of all unconditional losses (losses not dependent
on the inputs) in the model and its sublayers.
__call__: Function that takes inputs and returns the outputs of the model
call function.
call_and_return_all_conditional_losses: Function that returns a tuple of
(call function outputs, list of all losses that depend on the inputs).
_default_save_signature: Traced model call function. This is only included
if the top level exported object is a Keras model.
"""
class LayerAttributes(SerializedAttributes.with_attributes(
'LayerAttributes',
checkpointable_objects=['non_trainable_variables', 'layers', 'metrics',
'layer_regularization_losses'],
functions=['call_and_return_conditional_losses', 'activity_regularizer_fn'],
copy_from=[CommonEndpoints]
)):
"""Layer checkpointable objects + functions that are saved to the SavedModel.
List of all attributes:
All attributes from CommonEndpoints
non_trainable_variables: List of non-trainable variables in the layer and
its sublayers.
layers: List of all sublayers.
metrics: List of all metrics in the layer and its sublayers.
call_and_return_conditional_losses: Function that takes inputs and returns a
tuple of (outputs of the call function, list of input-dependent losses).
The list of losses excludes the activity regularizer function, which is
separate to allow the deserialized Layer object to define a different
activity regularizer.
activity_regularizer_fn: Callable that returns the activity regularizer loss
layer_regularization_losses: List of losses owned only by this layer.
"""
class ModelAttributes(SerializedAttributes.with_attributes(
'ModelAttributes',
copy_from=[LayerAttributes])):
"""Model checkpointable objects + functions that are saved to the SavedModel.
List of all attributes:
All attributes from LayerAttributes (including CommonEndpoints)
"""
# TODO(kathywu): Add attributes `compile_losses` and `compile_metrics`, which
# list all losses and metrics defined by `model.compile`.
| apache-2.0 |
kerr-huang/SL4A | python/src/Lib/test/test_memoryio.py | 55 | 14313 | """Unit tests for memory-based file-like objects.
StringIO -- for unicode strings
BytesIO -- for bytes
"""
from __future__ import unicode_literals
import unittest
from test import test_support
import io
import sys
import array
try:
import _bytesio
has_c_implementation = True
except ImportError:
has_c_implementation = False
class MemoryTestMixin:
def write_ops(self, f, t):
self.assertEqual(f.write(t("blah.")), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(t("Hello.")), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(5), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(t(" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(t("h")), 1)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 12)
def test_write(self):
buf = self.buftype("hello world\n")
memio = self.ioclass(buf)
self.write_ops(memio, self.buftype)
self.assertEqual(memio.getvalue(), buf)
memio = self.ioclass()
self.write_ops(memio, self.buftype)
self.assertEqual(memio.getvalue(), buf)
self.assertRaises(TypeError, memio.write, None)
memio.close()
self.assertRaises(ValueError, memio.write, self.buftype(""))
def test_writelines(self):
buf = self.buftype("1234567890")
memio = self.ioclass()
self.assertEqual(memio.writelines([buf] * 100), None)
self.assertEqual(memio.getvalue(), buf * 100)
memio.writelines([])
self.assertEqual(memio.getvalue(), buf * 100)
memio = self.ioclass()
self.assertRaises(TypeError, memio.writelines, [buf] + [1])
self.assertEqual(memio.getvalue(), buf)
self.assertRaises(TypeError, memio.writelines, None)
memio.close()
self.assertRaises(ValueError, memio.writelines, [])
def test_writelines_error(self):
memio = self.ioclass()
def error_gen():
yield self.buftype('spam')
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, memio.writelines, error_gen())
def test_truncate(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertRaises(ValueError, memio.truncate, -1)
memio.seek(6)
self.assertEqual(memio.truncate(), 6)
self.assertEqual(memio.getvalue(), buf[:6])
self.assertEqual(memio.truncate(4), 4)
self.assertEqual(memio.getvalue(), buf[:4])
self.assertEqual(memio.tell(), 4)
memio.write(buf)
self.assertEqual(memio.getvalue(), buf[:4] + buf)
pos = memio.tell()
self.assertEqual(memio.truncate(None), pos)
self.assertEqual(memio.tell(), pos)
self.assertRaises(TypeError, memio.truncate, '0')
memio.close()
self.assertRaises(ValueError, memio.truncate, 0)
def test_init(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
memio = self.ioclass(None)
self.assertEqual(memio.getvalue(), self.EOF)
memio.__init__(buf * 2)
self.assertEqual(memio.getvalue(), buf * 2)
memio.__init__(buf)
self.assertEqual(memio.getvalue(), buf)
def test_read(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.read(0), self.EOF)
self.assertEqual(memio.read(1), buf[:1])
self.assertEqual(memio.read(4), buf[1:5])
self.assertEqual(memio.read(900), buf[5:])
self.assertEqual(memio.read(), self.EOF)
memio.seek(0)
self.assertEqual(memio.read(), buf)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.tell(), 10)
memio.seek(0)
self.assertEqual(memio.read(-1), buf)
memio.seek(0)
self.assertEqual(type(memio.read()), type(buf))
memio.seek(100)
self.assertEqual(type(memio.read()), type(buf))
memio.seek(0)
self.assertEqual(memio.read(None), buf)
self.assertRaises(TypeError, memio.read, '')
memio.close()
self.assertRaises(ValueError, memio.read)
def test_readline(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 2)
self.assertEqual(memio.readline(0), self.EOF)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), self.EOF)
memio.seek(0)
self.assertEqual(memio.readline(5), buf[:5])
self.assertEqual(memio.readline(5), buf[5:10])
self.assertEqual(memio.readline(5), buf[10:15])
memio.seek(0)
self.assertEqual(memio.readline(-1), buf)
memio.seek(0)
self.assertEqual(memio.readline(0), self.EOF)
buf = self.buftype("1234567890\n")
memio = self.ioclass((buf * 3)[:-1])
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf[:-1])
self.assertEqual(memio.readline(), self.EOF)
memio.seek(0)
self.assertEqual(type(memio.readline()), type(buf))
self.assertEqual(memio.readline(None), buf)
self.assertRaises(TypeError, memio.readline, '')
memio.close()
self.assertRaises(ValueError, memio.readline)
def test_readlines(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 10)
self.assertEqual(memio.readlines(), [buf] * 10)
memio.seek(5)
self.assertEqual(memio.readlines(), [buf[5:]] + [buf] * 9)
memio.seek(0)
self.assertEqual(memio.readlines(15), [buf] * 2)
memio.seek(0)
self.assertEqual(memio.readlines(-1), [buf] * 10)
memio.seek(0)
self.assertEqual(memio.readlines(0), [buf] * 10)
memio.seek(0)
self.assertEqual(type(memio.readlines()[0]), type(buf))
memio.seek(0)
self.assertEqual(memio.readlines(None), [buf] * 10)
self.assertRaises(TypeError, memio.readlines, '')
memio.close()
self.assertRaises(ValueError, memio.readlines)
def test_iterator(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 10)
self.assertEqual(iter(memio), memio)
self.failUnless(hasattr(memio, '__iter__'))
self.failUnless(hasattr(memio, 'next'))
i = 0
for line in memio:
self.assertEqual(line, buf)
i += 1
self.assertEqual(i, 10)
memio.seek(0)
i = 0
for line in memio:
self.assertEqual(line, buf)
i += 1
self.assertEqual(i, 10)
memio = self.ioclass(buf * 2)
memio.close()
self.assertRaises(ValueError, memio.next)
def test_getvalue(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
memio.read()
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(type(memio.getvalue()), type(buf))
memio = self.ioclass(buf * 1000)
self.assertEqual(memio.getvalue()[-3:], self.buftype("890"))
memio = self.ioclass(buf)
memio.close()
self.assertRaises(ValueError, memio.getvalue)
def test_seek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
memio.read(5)
self.assertRaises(ValueError, memio.seek, -1)
self.assertRaises(ValueError, memio.seek, 1, -1)
self.assertRaises(ValueError, memio.seek, 1, 3)
self.assertEqual(memio.seek(0), 0)
self.assertEqual(memio.seek(0, 0), 0)
self.assertEqual(memio.read(), buf)
self.assertEqual(memio.seek(3), 3)
self.assertEqual(memio.seek(0, 1), 3)
self.assertEqual(memio.read(), buf[3:])
self.assertEqual(memio.seek(len(buf)), len(buf))
self.assertEqual(memio.read(), self.EOF)
memio.seek(len(buf) + 1)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.seek(0, 2), len(buf))
self.assertEqual(memio.read(), self.EOF)
memio.close()
self.assertRaises(ValueError, memio.seek, 0)
def test_overseek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.seek(len(buf) + 1), 11)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.tell(), 11)
self.assertEqual(memio.getvalue(), buf)
memio.write(self.EOF)
self.assertEqual(memio.getvalue(), buf)
memio.write(buf)
self.assertEqual(memio.getvalue(), buf + self.buftype('\0') + buf)
def test_tell(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.tell(), 0)
memio.seek(5)
self.assertEqual(memio.tell(), 5)
memio.seek(10000)
self.assertEqual(memio.tell(), 10000)
memio.close()
self.assertRaises(ValueError, memio.tell)
def test_flush(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.flush(), None)
def test_flags(self):
memio = self.ioclass()
self.assertEqual(memio.writable(), True)
self.assertEqual(memio.readable(), True)
self.assertEqual(memio.seekable(), True)
self.assertEqual(memio.isatty(), False)
self.assertEqual(memio.closed, False)
memio.close()
self.assertEqual(memio.writable(), True)
self.assertEqual(memio.readable(), True)
self.assertEqual(memio.seekable(), True)
self.assertRaises(ValueError, memio.isatty)
self.assertEqual(memio.closed, True)
def test_subclassing(self):
buf = self.buftype("1234567890")
def test1():
class MemIO(self.ioclass):
pass
m = MemIO(buf)
return m.getvalue()
def test2():
class MemIO(self.ioclass):
def __init__(me, a, b):
self.ioclass.__init__(me, a)
m = MemIO(buf, None)
return m.getvalue()
self.assertEqual(test1(), buf)
self.assertEqual(test2(), buf)
class PyBytesIOTest(MemoryTestMixin, unittest.TestCase):
@staticmethod
def buftype(s):
return s.encode("ascii")
ioclass = io._BytesIO
EOF = b""
def test_read1(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertRaises(TypeError, memio.read1)
self.assertEqual(memio.read(), buf)
def test_readinto(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
b = bytearray(b"hello")
self.assertEqual(memio.readinto(b), 5)
self.assertEqual(b, b"12345")
self.assertEqual(memio.readinto(b), 5)
self.assertEqual(b, b"67890")
self.assertEqual(memio.readinto(b), 0)
self.assertEqual(b, b"67890")
b = bytearray(b"hello world")
memio.seek(0)
self.assertEqual(memio.readinto(b), 10)
self.assertEqual(b, b"1234567890d")
b = bytearray(b"")
memio.seek(0)
self.assertEqual(memio.readinto(b), 0)
self.assertEqual(b, b"")
self.assertRaises(TypeError, memio.readinto, '')
a = array.array(b'b', map(ord, b"hello world"))
memio = self.ioclass(buf)
memio.readinto(a)
self.assertEqual(a.tostring(), b"1234567890d")
memio.close()
self.assertRaises(ValueError, memio.readinto, b)
def test_relative_seek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.seek(-1, 1), 0)
self.assertEqual(memio.seek(3, 1), 3)
self.assertEqual(memio.seek(-4, 1), 0)
self.assertEqual(memio.seek(-1, 2), 9)
self.assertEqual(memio.seek(1, 1), 10)
self.assertEqual(memio.seek(1, 2), 11)
memio.seek(-3, 2)
self.assertEqual(memio.read(), buf[-3:])
memio.seek(0)
memio.seek(1, 1)
self.assertEqual(memio.read(), buf[1:])
def test_unicode(self):
memio = self.ioclass()
self.assertRaises(TypeError, self.ioclass, "1234567890")
self.assertRaises(TypeError, memio.write, "1234567890")
self.assertRaises(TypeError, memio.writelines, ["1234567890"])
def test_bytes_array(self):
buf = b"1234567890"
a = array.array(b'b', map(ord, buf))
memio = self.ioclass(a)
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(a), 10)
self.assertEqual(memio.getvalue(), buf)
class PyStringIOTest(MemoryTestMixin, unittest.TestCase):
buftype = unicode
ioclass = io.StringIO
EOF = ""
def test_relative_seek(self):
memio = self.ioclass()
self.assertRaises(IOError, memio.seek, -1, 1)
self.assertRaises(IOError, memio.seek, 3, 1)
self.assertRaises(IOError, memio.seek, -3, 1)
self.assertRaises(IOError, memio.seek, -1, 2)
self.assertRaises(IOError, memio.seek, 1, 1)
self.assertRaises(IOError, memio.seek, 1, 2)
# XXX: For the Python version of io.StringIO, this is highly
# dependent on the encoding used for the underlying buffer.
# def test_widechar(self):
# buf = self.buftype("\U0002030a\U00020347")
# memio = self.ioclass(buf)
#
# self.assertEqual(memio.getvalue(), buf)
# self.assertEqual(memio.write(buf), len(buf))
# self.assertEqual(memio.tell(), len(buf))
# self.assertEqual(memio.getvalue(), buf)
# self.assertEqual(memio.write(buf), len(buf))
# self.assertEqual(memio.tell(), len(buf) * 2)
# self.assertEqual(memio.getvalue(), buf + buf)
if has_c_implementation:
class CBytesIOTest(PyBytesIOTest):
ioclass = io.BytesIO
def test_main():
tests = [PyBytesIOTest, PyStringIOTest]
if has_c_implementation:
tests.extend([CBytesIOTest])
test_support.run_unittest(*tests)
if __name__ == '__main__':
test_main()
| apache-2.0 |
easmetz/inasafe | safe/report/test/test_impact_report.py | 2 | 9513 | # coding=utf-8
"""**Tests for report creation using composition.**
"""
__author__ = 'akbargumbira@gmail.com'
__date__ = '06/01/2015'
__copyright__ = ('Copyright 2013, Australia Indonesia Facility for '
'Disaster Reduction')
import unittest
import os
import logging
from qgis.core import QgsMapLayerRegistry, QgsRectangle
from safe.test.utilities import get_qgis_app
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
from safe.common.utilities import temp_dir, unique_filename
from safe.utilities.resources import resources_path
from safe.test.utilities import load_layer, standard_data_path
from safe.report.impact_report import ImpactReport
from safe.utilities.gis import qgis_version
LOGGER = logging.getLogger('InaSAFE')
class ImpactReportTest(unittest.TestCase):
"""Test the InaSAFE Map generator"""
def setUp(self):
"""Setup fixture run before each tests"""
# noinspection PyArgumentList,PyUnresolvedReferences
layer_registry = QgsMapLayerRegistry.instance()
layer_registry.removeAllMapLayers()
def test_get_map_title(self):
"""Getting the map title from the keywords"""
impact_layer_path = standard_data_path(
'impact', 'population_affected_entire_area.shp')
layer, _ = load_layer(impact_layer_path)
template = resources_path(
'qgis-composer-templates', 'a4-portrait-blue.qpt')
report = ImpactReport(IFACE, template, layer)
title = report.map_title
expected_title = 'People affected by flood prone areas'
message = 'Expected: %s\nGot:\n %s' % (expected_title, title)
self.assertEqual(title, expected_title, message)
def test_handle_missing_map_title(self):
"""Missing map title from the keywords fails gracefully"""
# Use hazard layer as it won't have 'map_title' keyword
layer_path = standard_data_path('hazard', 'tsunami_wgs84.tif')
layer, _ = load_layer(layer_path)
template = resources_path(
'qgis-composer-templates', 'a4-portrait-blue.qpt')
report = ImpactReport(IFACE, template, layer)
title = report.map_title
expected_title = None
message = 'Expected: %s\nGot:\n %s' % (expected_title, title)
self.assertEqual(title, expected_title, message)
def test_missing_elements(self):
"""Test missing elements set correctly."""
impact_layer_path = standard_data_path(
'impact', 'population_affected_entire_area.shp')
layer, _ = load_layer(impact_layer_path)
template = resources_path(
'qgis-composer-templates', 'a4-portrait-blue.qpt')
report = ImpactReport(IFACE, template, layer)
# There are missing elements in the template
component_ids = ['safe-logo', 'north-arrow', 'organisation-logo',
'impact-map', 'impact-legend',
'i-added-element-id-here-nooo']
report.component_ids = component_ids
expected_missing_elements = ['i-added-element-id-here-nooo']
message = 'The missing_elements should be %s, but it returns %s' % (
report.missing_elements, expected_missing_elements)
self.assertEqual(
expected_missing_elements, report.missing_elements, message)
def test_print_default_template(self):
"""Test printing report to pdf using default template works."""
impact_layer_path = standard_data_path(
'impact', 'population_affected_entire_area.shp')
layer, _ = load_layer(impact_layer_path)
# noinspection PyUnresolvedReferences,PyArgumentList
QgsMapLayerRegistry.instance().addMapLayer(layer)
# noinspection PyCallingNonCallable
rect = QgsRectangle(106.8194, -6.2108, 106.8201, -6.1964)
CANVAS.setExtent(rect)
CANVAS.refresh()
template = resources_path(
'qgis-composer-templates', 'a4-portrait-blue.qpt')
report = ImpactReport(IFACE, template, layer)
out_path = unique_filename(
prefix='map_default_template_test',
suffix='.pdf',
dir=temp_dir('test'))
report.print_map_to_pdf(out_path)
# Check the file exists
message = 'Rendered output does not exist: %s' % out_path
self.assertTrue(os.path.exists(out_path), message)
# Check the file is not corrupt
message = 'The output file %s is corrupt' % out_path
out_size = os.stat(out_path).st_size
self.assertTrue(out_size > 0, message)
# Check the components in composition are default components
if qgis_version() < 20500:
safe_logo = report.composition.getComposerItemById(
'inasafe-logo').pictureFile()
north_arrow = report.composition.getComposerItemById(
'north-arrow').pictureFile()
org_logo = report.composition.getComposerItemById(
'organisation-logo').pictureFile()
else:
safe_logo = report.composition.getComposerItemById(
'white-inasafe-logo').picturePath()
north_arrow = report.composition.getComposerItemById(
'north-arrow').picturePath()
org_logo = report.composition.getComposerItemById(
'organisation-logo').picturePath()
expected_safe_logo = resources_path(
'img', 'logos', 'inasafe-logo-url-white.svg')
expected_north_arrow = resources_path(
'img', 'north_arrows', 'simple_north_arrow.png')
expected_org_logo = resources_path('img', 'logos', 'supporters.png')
message = (
'The safe logo path is not the default one: %s isn\'t %s' %
(expected_safe_logo, safe_logo))
self.assertEqual(expected_safe_logo, safe_logo, message)
message = 'The north arrow path is not the default one'
self.assertEqual(expected_north_arrow, north_arrow, message)
message = 'The organisation logo path is not the default one'
self.assertEqual(expected_org_logo, org_logo, message)
def test_custom_logo(self):
"""Test that setting user-defined logo works."""
# LOGGER.info('Testing custom_logo')
impact_layer_path = standard_data_path(
'impact', 'population_affected_entire_area.shp')
layer, _ = load_layer(impact_layer_path)
# noinspection PyUnresolvedReferences,PyArgumentList
QgsMapLayerRegistry.instance().addMapLayer(layer)
# noinspection PyCallingNonCallable
rect = QgsRectangle(106.8194, -6.2108, 106.8201, -6.1964)
CANVAS.setExtent(rect)
CANVAS.refresh()
template = resources_path(
'qgis-composer-templates', 'a4-portrait-blue.qpt')
report = ImpactReport(IFACE, template, layer)
# Set custom logo
custom_logo_path = resources_path('img', 'logos', 'supporters.png')
report.organisation_logo = custom_logo_path
out_path = unique_filename(
prefix='map_custom_logo_test', suffix='.pdf', dir=temp_dir('test'))
report.print_map_to_pdf(out_path)
# Check the file exists
message = 'Rendered output does not exist: %s' % out_path
self.assertTrue(os.path.exists(out_path), message)
# Check the file is not corrupt
message = 'The output file %s is corrupt' % out_path
out_size = os.stat(out_path).st_size
self.assertTrue(out_size > 0, message)
# Check the organisation logo in composition sets correctly to
# logo-flower
if qgis_version() < 20500:
custom_img_path = report.composition.getComposerItemById(
'organisation-logo').pictureFile()
else:
custom_img_path = report.composition.getComposerItemById(
'organisation-logo').picturePath()
message = 'The custom logo path is not set correctly'
self.assertEqual(custom_logo_path, custom_img_path, message)
@unittest.skip('Not included in Travis')
def test_print_impact_table(self):
"""Test print impact table to pdf."""
impact_layer_path = standard_data_path(
'impact', 'population_affected_entire_area.shp')
layer, _ = load_layer(impact_layer_path)
# noinspection PyUnresolvedReferences,PyArgumentList
QgsMapLayerRegistry.instance().addMapLayer(layer)
# noinspection PyCallingNonCallable
rect = QgsRectangle(106.8194, -6.2108, 106.8201, -6.1964)
CANVAS.setExtent(rect)
CANVAS.refresh()
template = resources_path(
'qgis-composer-templates', 'a4-portrait-blue.qpt')
report = ImpactReport(IFACE, template, layer)
report.template = template # just to cover set template
out_path = unique_filename(
prefix='test_print_impact_table',
suffix='.pdf',
dir=temp_dir('test'))
report.print_impact_table(out_path)
# Check the file exists
message = 'Rendered output does not exist: %s' % out_path
self.assertTrue(os.path.exists(out_path), message)
# Check the file is not corrupt
message = 'The output file %s is corrupt' % out_path
out_size = os.stat(out_path).st_size
self.assertTrue(out_size > 0, message)
if __name__ == '__main__':
suite = unittest.makeSuite(ImpactReport)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| gpl-3.0 |
akshayharidas/fosswebsite | events/views.py | 2 | 2441 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect
# Create your views here.
from django.urls import reverse_lazy
from django.views.generic import CreateView, UpdateView, DeleteView
from events.forms import EventCreateForm
from events.models import Event
class EventCreateView(CreateView):
form_class = EventCreateForm
template_name = 'base/form.html'
success_url = '/events'
def get_context_data(self, **kwargs):
context = super(EventCreateView, self).get_context_data(**kwargs)
context['heading'] = 'New Workshop'
context['title'] = 'Workshops'
return context
def form_valid(self, form):
form.instance.user = self.request.user
return super(EventCreateView, self).form_valid(form)
class EventUpdateView(UpdateView):
form_class = EventCreateForm
template_name = 'base/form.html'
model = Event
def get(self, request, *args, **kwargs):
if not (request.user.is_superuser or request.user == self.get_object().user):
redirect('permission_denied')
return super(EventUpdateView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(EventUpdateView, self).get_context_data(**kwargs)
context['heading'] = 'Update Event'
context['title'] = 'Events'
return context
def form_valid(self, form):
form.instance.user = self.request.user
return super(EventUpdateView, self).form_valid(form)
def post(self, request, *args, **kwargs):
if not (request.user.is_superuser or request.user == self.get_object().user):
redirect('permission_denied')
return super(EventUpdateView, self).post(request, *args, **kwargs)
class EventDeleteView(DeleteView):
model = Event
template_name = 'event/confirm_delete.html'
success_url = reverse_lazy('events')
def get(self, request, *args, **kwargs):
if not (request.user.is_superuser or request.user == self.get_object().user):
redirect('permission_denied')
return super(EventUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
if not (request.user.is_superuser or request.user == self.get_object().user):
redirect('permission_denied')
return super(EventUpdateView, self).post(request, *args, **kwargs)
| mit |
tarikgwa/nfd | newfies/mod_sms/views.py | 5 | 34205 | #
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2012 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <info@star2billing.com>
#
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required, permission_required
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from django.db.models import Count
from django.core.urlresolvers import reverse
from django.template.context import RequestContext
from django.utils.translation import ugettext as _
from dialer_contact.models import Contact
from dialer_contact.constants import CONTACT_STATUS
from dialer_campaign.function_def import date_range, user_dialer_setting, \
dialer_setting_limit
from frontend.function_def import calculate_date
from frontend.constants import SEARCH_TYPE
from frontend_notification.views import frontend_send_notification
from django_lets_go.common_functions import get_pagination_vars, ceil_strdate,\
percentage, getvar, unset_session_var
from mod_utils.helper import Export_choice
from mod_sms.models import SMSCampaign, SMSCampaignSubscriber, SMSMessage
from mod_sms.constants import SMS_CAMPAIGN_STATUS, SMS_CAMPAIGN_COLUMN_NAME,\
SMS_REPORT_COLUMN_NAME, COLOR_SMS_DISPOSITION, SMS_NOTIFICATION_NAME,\
SMS_SUBSCRIBER_STATUS, SMS_MESSAGE_STATUS
from mod_sms.forms import SMSCampaignForm, SMSDashboardForm, SMSSearchForm,\
SMSCampaignSearchForm, DuplicateSMSCampaignForm
from mod_sms.function_def import check_sms_dialer_setting, get_sms_notification_status
from datetime import datetime
from django.utils.timezone import utc
from dateutil.relativedelta import relativedelta
import tablib
import time
redirect_url_to_smscampaign_list = '/sms_campaign/'
@login_required
def update_sms_campaign_status_admin(request, pk, status):
"""SMS Campaign Status (e.g. start|stop|pause|abort) can be changed from
admin interface (via sms campaign list)"""
smscampaign = SMSCampaign.objects.get(pk=pk)
recipient = smscampaign.common_sms_campaign_status(status)
sms_notification_status = get_sms_notification_status(int(status))
frontend_send_notification(request, sms_notification_status, recipient)
return HttpResponseRedirect(reverse("admin:mod_sms_smscampaign_changelist"))
@login_required
def update_sms_campaign_status_cust(request, pk, status):
"""SMS Campaign Status (e.g. start|stop|pause|abort) can be changed from
customer interface (via sms campaign list)"""
smscampaign = SMSCampaign.objects.get(pk=pk)
recipient = smscampaign.common_sms_campaign_status(status)
sms_notification_status = get_sms_notification_status(int(status))
frontend_send_notification(request, sms_notification_status, recipient)
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
# SMSCampaign
@permission_required('mod_sms.view_smscampaign', login_url='/')
@login_required
def sms_campaign_list(request):
"""List all sms campaigns for the logged in user
**Attributes**:
* ``template`` - mod_sms/list.html
**Logic Description**:
* List all sms campaigns belonging to the logged in user
"""
form = SMSCampaignSearchForm(request.user, request.POST or None)
sort_col_field_list = ['id', 'name', 'startingdate', 'status', 'totalcontact']
pag_vars = get_pagination_vars(request, sort_col_field_list, default_sort_field='id')
phonebook_id = ''
status = 'all'
post_var_with_page = 0
if form.is_valid():
field_list = ['phonebook_id', 'status']
unset_session_var(request, field_list)
post_var_with_page = 1
phonebook_id = getvar(request, 'phonebook_id', setsession=True)
status = getvar(request, 'status', setsession=True)
if request.GET.get('page') or request.GET.get('sort_by'):
post_var_with_page = 1
phonebook_id = request.session.get('session_phonebook_id')
status = request.session.get('session_status')
form = SMSCampaignSearchForm(request.user, initial={'status': status,
'phonebook_id': phonebook_id})
if post_var_with_page == 0:
# default
# unset session var
field_list = ['status', 'phonebook_id']
unset_session_var(request, field_list)
kwargs = {}
if phonebook_id and phonebook_id != '0':
kwargs['phonebook__id__in'] = [int(phonebook_id)]
if status and status != 'all':
kwargs['status'] = status
smscampaign_list = SMSCampaign.objects.filter(user=request.user).order_by(pag_vars['sort_order'])
smscampaign_count = smscampaign_list.count()
if kwargs:
all_smscampaign_list = smscampaign_list.filter(**kwargs).order_by(pag_vars['sort_order'])
smscampaign_list = all_smscampaign_list[pag_vars['start_page']:pag_vars['end_page']]
smscampaign_count = all_smscampaign_list.count()
data = {
'form': form,
'smscampaign_list': smscampaign_list,
'total_campaign': smscampaign_count,
'SMS_CAMPAIGN_COLUMN_NAME': SMS_CAMPAIGN_COLUMN_NAME,
'col_name_with_order': pag_vars['col_name_with_order'],
'msg': request.session.get('msg'),
'error_msg': request.session.get('error_msg'),
'info_msg': request.session.get('info_msg'),
}
request.session['msg'] = ''
request.session['error_msg'] = ''
request.session['info_msg'] = ''
return render_to_response('mod_sms/list.html', data, context_instance=RequestContext(request))
@permission_required('mod_sms.add_smscampaign', login_url='/')
@login_required
def sms_campaign_add(request):
"""Add a new sms campaign for the logged in user
**Attributes**:
* ``form`` - SMSCampaignForm
* ``template`` - mod_sms/change.html
**Logic Description**:
* Before adding a sms campaign, check dialer setting limit if
applicable to the user.
* Add the new sms campaign which will belong to the logged in user
via SMSCampaignForm & get redirected to sms campaign list
"""
# If dialer setting is not attached with user, redirect to sms campaign list
if not user_dialer_setting(request.user):
request.session['error_msg'] = \
_("in order to add a sms campaign, you need to have your \
settings configured properly, please contact the admin.")
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
# Check dialer setting limit
if request.user and request.method != 'POST':
# check Max Number of running campaign
if check_sms_dialer_setting(request, check_for="smscampaign"):
msg = _("you have too many sms campaigns. Max allowed %(limit)s")\
% {'limit': dialer_setting_limit(request, limit_for="smscampaign")}
request.session['msg'] = msg
# sms campaign limit reached
frontend_send_notification(request, SMS_NOTIFICATION_NAME.sms_campaign_limit_reached)
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
form = SMSCampaignForm(request.user, request.POST or None)
# Add sms campaign
if form.is_valid():
obj = form.save(commit=False)
obj.user = User.objects.get(username=request.user)
obj.stoppeddate = obj.expirationdate
obj.save()
form.save_m2m()
request.session["msg"] = _('"%(name)s" is added.') % {'name': request.POST['name']}
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
data = {
'form': form,
'action': 'add',
}
return render_to_response('mod_sms/change.html', data, context_instance=RequestContext(request))
@permission_required('mod_sms.delete_smsmessage', login_url='/')
@login_required
def sms_campaign_del(request, object_id):
"""Delete/Stop sms campaign for the logged in user
**Attributes**:
* ``object_id`` - Selected sms campaign object
* ``object_list`` - Selected sms campaign objects
**Logic Description**:
* Delete/Stop the selected sms campaign from the sms campaign list
"""
stop_sms_campaign = request.GET.get('stop_sms_campaign', False)
try:
# When object_id is not 0
sms_campaign = get_object_or_404(SMSCampaign, pk=object_id, user=request.user)
# Delete/Stop sms campaign
if sms_campaign:
if stop_sms_campaign:
sms_campaign.status = SMS_CAMPAIGN_STATUS.END
sms_campaign.save()
request.session["msg"] = _('"%(name)s" is stopped.') % {'name': sms_campaign.name}
else:
request.session["msg"] = _('"%(name)s" is deleted.') % {'name': sms_campaign.name}
sms_campaign.delete()
except:
# When object_id is 0 (Multiple records delete)
values = request.POST.getlist('select')
values = ", ".join(["%s" % el for el in values])
sms_campaign_list = SMSCampaign.objects.extra(where=['id IN (%s)' % values])
if sms_campaign_list:
if stop_sms_campaign:
sms_campaign_list.update(status=SMS_CAMPAIGN_STATUS.END)
request.session["msg"] = _('%(count)s sms campaign(s) are stopped.') % {'count': sms_campaign_list.count()}
else:
request.session["msg"] = _('%(count)s sms campaign(s) are deleted.') % {'count': sms_campaign_list.count()}
sms_campaign_list.delete()
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
@permission_required('mod_sms.change_smsmessage', login_url='/')
@login_required
def sms_campaign_change(request, object_id):
"""Update/Delete sms campaign for the logged in user
**Attributes**:
* ``object_id`` - Selected campaign object
* ``form`` - SMSCampaignForm
* ``template`` - mod_sms/change.html
**Logic Description**:
* Update/delete selected sms campaign from the sms campaign list
via SMSCampaignForm & get redirected to the sms campaign list
"""
# If dialer setting is not attached with user, redirect to sms campaign list
if not user_dialer_setting(request.user):
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
sms_campaign = get_object_or_404(SMSCampaign, pk=object_id, user=request.user)
form = SMSCampaignForm(request.user, request.POST or None, instance=sms_campaign)
if form.is_valid():
# Delete sms campaign
if request.POST.get('delete'):
sms_campaign_del(request, object_id)
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
else:
# Update sms campaign
obj = form.save()
obj.save()
request.session["msg"] = _('"%(name)s" is updated.') % {'name': request.POST['name']}
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
data = {
'form': form,
'action': 'update',
}
return render_to_response('mod_sms/change.html', data, context_instance=RequestContext(request))
@login_required
def sms_campaign_duplicate(request, id):
"""
Duplicate sms campaign via DuplicateSMSCampaignForm
**Attributes**:
* ``id`` - Selected sms campaign object
* ``form`` - DuplicateSMSCampaignForm
* ``template`` - mod_sms/sms_campaign_duplicate.html
"""
# If dialer setting is not attached with user, redirect to sms campaign list
if not user_dialer_setting(request.user):
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
form = DuplicateSMSCampaignForm(request.user, request.POST or None)
request.session['error_msg'] = ''
if request.method == 'POST':
if form.is_valid():
sms_campaign_obj = SMSCampaign.objects.get(pk=id)
sms_campaign_obj.pk = None
sms_campaign_obj.campaign_code = request.POST.get('campaign_code')
sms_campaign_obj.name = request.POST.get('name')
sms_campaign_obj.status = SMS_CAMPAIGN_STATUS.PAUSE
sms_campaign_obj.startingdate = datetime.utcnow().replace(tzinfo=utc)
sms_campaign_obj.expirationdate = datetime.utcnow().replace(tzinfo=utc) + relativedelta(days=+1)
sms_campaign_obj.stoppeddate = datetime.utcnow().replace(tzinfo=utc) + relativedelta(days=+1)
sms_campaign_obj.imported_phonebook = ''
sms_campaign_obj.totalcontact = 0
sms_campaign_obj.save()
# Many to many field
for pb in request.POST.getlist('phonebook'):
sms_campaign_obj.phonebook.add(pb)
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
else:
request.session['error_msg'] = True
data = {
'sms_campaign_id': id,
'form': form,
'err_msg': request.session.get('error_msg'),
}
request.session['error_msg'] = ''
return render_to_response('mod_sms/sms_campaign_duplicate.html', data, context_instance=RequestContext(request))
@login_required
def sms_campaign_text_message(request, object_id):
"""
Get sms campaign's text message
**Attributes**:
* ``object_id`` - Selected sms campaign object
* ``template`` - mod_sms/sms_campaign_text_message.html
"""
# If dialer setting is not attached with user, redirect to sms campaign list
if not user_dialer_setting(request.user):
return HttpResponseRedirect(redirect_url_to_smscampaign_list)
sms_campaign = get_object_or_404(SMSCampaign, pk=object_id, user=request.user)
data = {
'sms_campaign': sms_campaign,
}
request.session['error_msg'] = ''
return render_to_response('mod_sms/sms_campaign_text_message.html', data, context_instance=RequestContext(request))
@permission_required('mod_sms.view_sms_dashboard', login_url='/')
@login_required
def sms_dashboard(request, on_index=None):
"""SMS dashboard gives the following information
* No of SMSCampaigns for logged in user
* Total phonebook contacts
* Total SMSCampaigns contacts
* Amount of contact reached today
* Disposition of sms via pie chart
* SMS count shown on graph by days/hours
**Attributes**:
* ``template`` - mod_sms/sms_dashboard.html
* ``form`` - SMSDashboardForm
"""
# All sms_campaign for logged in User
sms_campaign_id_list = SMSCampaign.objects.values_list('id', flat=True).filter(user=request.user).order_by('id')
# Contacts count which are active and belong to those phonebook(s) which is
# associated with all sms campaign
pb_active_contact_count = Contact.objects.filter(
phonebook__smscampaign__in=sms_campaign_id_list,
status=CONTACT_STATUS.ACTIVE).count()
form = SMSDashboardForm(request.user, request.POST or None)
total_record = dict()
total_sms_count = 0
total_unsent = 0
total_sent = 0
total_delivered = 0
total_failed = 0
total_no_route = 0
total_unauthorized = 0
select_graph_for = 'sms count' # default
search_type = SEARCH_TYPE.D_Last_24_hours # default Last 24 hours
selected_sms_campaign = ''
if sms_campaign_id_list:
selected_sms_campaign = sms_campaign_id_list[0] # default sms campaign id
# selected_sms_campaign should not be empty
if selected_sms_campaign:
if form.is_valid():
selected_sms_campaign = request.POST['smscampaign']
search_type = request.POST['search_type']
end_date = datetime.utcnow().replace(tzinfo=utc)
start_date = calculate_date(search_type)
# date_length is used to do group by starting_date
if int(search_type) >= SEARCH_TYPE.B_Last_7_days: # all options except 30 days
date_length = 13
if int(search_type) == SEARCH_TYPE.C_Yesterday: # yesterday
now = datetime.utcnow().replace(tzinfo=utc)
start_date = datetime(now.year, now.month, now.day, 0, 0, 0, 0).replace(tzinfo=utc) \
- relativedelta(days=1)
end_date = datetime(now.year, now.month, now.day, 23, 59, 59, 999999).replace(tzinfo=utc) \
- relativedelta(days=1)
if int(search_type) >= SEARCH_TYPE.E_Last_12_hours:
date_length = 16
else:
date_length = 10 # Last 30 days option
select_data = {
"send_date": "SUBSTR(CAST(send_date as CHAR(30)),1," + str(date_length) + ")"}
# This calls list is used by pie chart
list_sms = SMSMessage.objects.filter(
sender=request.user,
sms_campaign_id=selected_sms_campaign,
send_date__range=(start_date, end_date))\
.extra(select=select_data)\
.values('send_date', 'status')\
.annotate(Count('send_date'))\
.order_by('send_date')
for i in list_sms:
# convert unicode date string into date
if i['status'] == 'Unsent':
total_unsent += i['send_date__count']
elif i['status'] == 'Sent':
total_sent += i['send_date__count']
elif i['status'] == 'Delivered':
total_delivered += i['send_date__count']
elif i['status'] == 'Failed':
total_failed += i['send_date__count']
elif i['status'] == 'No_Route':
total_no_route += i['send_date__count']
else:
total_unauthorized += i['send_date__count'] # Unauthorized
total_sms_count += i['send_date__count']
list_sms = SMSMessage.objects.filter(
sender=request.user,
sms_campaign_id=selected_sms_campaign,
send_date__range=(start_date, end_date))\
.extra(select=select_data).values('send_date')\
.annotate(Count('send_date')).order_by('send_date')
mintime = start_date
maxtime = end_date
sms_dict = {}
sms_dict_with_min = {}
for data in list_sms:
if int(search_type) >= SEARCH_TYPE.B_Last_7_days:
ctime = datetime(int(data['send_date'][0:4]),
int(data['send_date'][5:7]),
int(data['send_date'][8:10]),
int(data['send_date'][11:13]),
0, 0, 0).replace(tzinfo=utc)
if int(search_type) >= SEARCH_TYPE.E_Last_12_hours:
ctime = datetime(int(data['send_date'][0:4]),
int(data['send_date'][5:7]),
int(data['send_date'][8:10]),
int(data['send_date'][11:13]),
int(data['send_date'][14:16]),
0, 0).replace(tzinfo=utc)
else:
ctime = datetime(int(data['send_date'][0:4]),
int(data['send_date'][5:7]),
int(data['send_date'][8:10]),
0, 0, 0, 0).replace(tzinfo=utc)
if ctime > maxtime:
maxtime = ctime
elif ctime < mintime:
mintime = ctime
# all options except 30 days
if int(search_type) >= SEARCH_TYPE.B_Last_7_days:
sms_dict[int(ctime.strftime("%Y%m%d%H"))] = {
'sms_count': data['send_date__count']
}
sms_dict_with_min[int(ctime.strftime("%Y%m%d%H%M"))] = {
'sms_count': data['send_date__count']
}
else:
# Last 30 days option
sms_dict[int(ctime.strftime("%Y%m%d"))] = {
'sms_count': data['send_date__count']
}
dateList = date_range(mintime, maxtime, q=search_type)
i = 0
total_record = {}
for date in dateList:
inttime = int(date.strftime("%Y%m%d"))
# last 7 days | yesterday | last 24 hrs
if (int(search_type) == SEARCH_TYPE.B_Last_7_days
or int(search_type) == SEARCH_TYPE.C_Yesterday
or int(search_type) == SEARCH_TYPE.D_Last_24_hours):
for option in range(0, 24):
day_time = int(str(inttime) + str(option).zfill(2))
graph_day = datetime(int(date.strftime("%Y")),
int(date.strftime("%m")),
int(date.strftime("%d")),
int(str(option).zfill(2))).replace(tzinfo=utc)
dt = int(1000 * time.mktime(graph_day.timetuple()))
total_record[dt] = {'sms_count': 0}
if day_time in sms_dict.keys():
total_record[dt]['sms_count'] += sms_dict[day_time]['sms_count']
# last 12 hrs | last 6 hrs | last 1 hrs
elif (int(search_type) == SEARCH_TYPE.E_Last_12_hours
or int(search_type) == SEARCH_TYPE.F_Last_6_hours
or int(search_type) == SEARCH_TYPE.G_Last_hour):
for hour in range(0, 24):
for minute in range(0, 60):
hr_time = int(str(inttime) + str(hour).zfill(2) + str(minute).zfill(2))
graph_day = datetime(int(date.strftime("%Y")),
int(date.strftime("%m")),
int(date.strftime("%d")),
int(str(hour).zfill(2)),
int(str(minute).zfill(2))).replace(tzinfo=utc)
dt = int(1000 * time.mktime(graph_day.timetuple()))
total_record[dt] = {'sms_count': 0}
if hr_time in sms_dict_with_min.keys():
total_record[dt]['sms_count'] += sms_dict_with_min[hr_time]['sms_count']
else:
# Last 30 days option
graph_day = datetime(int(date.strftime("%Y")),
int(date.strftime("%m")),
int(date.strftime("%d"))).replace(tzinfo=utc)
dt = int(1000 * time.mktime(graph_day.timetuple()))
total_record[dt] = {'sms_count': 0}
if inttime in sms_dict.keys():
total_record[dt]['sms_count'] += sms_dict[inttime]['sms_count']
# sorting on date col
total_record = total_record.items()
total_record = sorted(total_record, key=lambda k: k[0])
# lineWithFocusChart
final_charttype = "lineWithFocusChart"
xdata = []
ydata = []
for i in total_record:
xdata.append(i[0])
ydata.append(i[1]['sms_count'])
tooltip_date = "%d %b %y %H:%M %p"
extra_serie1 = {
"tooltip": {"y_start": "", "y_end": " SMS"},
"date_format": tooltip_date
}
final_chartdata = {
'x': xdata,
'name1': 'SMS', 'y1': ydata, 'extra1': extra_serie1,
}
# Contacts which are successfully messaged for running sms campaign
reached_contact = 0
if sms_campaign_id_list:
now = datetime.utcnow().replace(tzinfo=utc)
start_date = datetime(now.year, now.month, now.day, 0, 0, 0, 0).replace(tzinfo=utc)
end_date = datetime(now.year, now.month, now.day, 23, 59, 59, 999999).replace(tzinfo=utc)
sms_campaign_subscriber = SMSCampaignSubscriber.objects.filter(
sms_campaign_id__in=sms_campaign_id_list,
status=SMS_SUBSCRIBER_STATUS.COMPLETE,
updated_date__range=(start_date, end_date)).count()
reached_contact += sms_campaign_subscriber
# PieChart
sms_analytic_charttype = "pieChart"
xdata = []
ydata = []
sms_analytic_chartdata = {'x': xdata, 'y1': ydata}
if total_sms_count != 0:
for i in SMS_MESSAGE_STATUS:
xdata.append(i[0].upper())
# Y-axis order depend upon SMS_MESSAGE_STATUS
# 'UNSENT', 'SENT', 'DELIVERED', 'FAILED', 'NO_ROUTE', 'UNAUTHORIZED'
ydata = [
percentage(total_unsent, total_sms_count),
percentage(total_sent, total_sms_count),
percentage(total_delivered, total_sms_count),
percentage(total_failed, total_sms_count),
percentage(total_no_route, total_sms_count),
percentage(total_unauthorized, total_sms_count),
]
color_list = [
COLOR_SMS_DISPOSITION['UNSENT'],
COLOR_SMS_DISPOSITION['SENT'],
COLOR_SMS_DISPOSITION['DELIVERED'],
COLOR_SMS_DISPOSITION['FAILED'],
COLOR_SMS_DISPOSITION['NO_ROUTE'],
COLOR_SMS_DISPOSITION['UNAUTHORIZED'],
]
extra_serie = {
"tooltip": {"y_start": "", "y_end": " %"},
"color_list": color_list
}
kwargs1 = {}
kwargs1['resize'] = True
sms_analytic_chartdata = {
'x': xdata, 'y1': ydata, 'extra1': extra_serie,
'kwargs1': kwargs1,
}
data = {
'form': form,
'SEARCH_TYPE': SEARCH_TYPE,
'pb_active_contact_count': pb_active_contact_count,
'reached_contact': reached_contact,
'total_record': total_record,
'select_graph_for': select_graph_for,
'total_sms_count': total_sms_count,
'total_unsent': total_unsent,
'total_sent': total_sent,
'total_delivered': total_delivered,
'total_failed': total_failed,
'total_no_route': total_no_route,
'total_unauthorized': total_unauthorized,
'unsent_color': COLOR_SMS_DISPOSITION['UNSENT'],
'sent_color': COLOR_SMS_DISPOSITION['SENT'],
'delivered_color': COLOR_SMS_DISPOSITION['DELIVERED'],
'failed_color': COLOR_SMS_DISPOSITION['FAILED'],
'no_route_color': COLOR_SMS_DISPOSITION['NO_ROUTE'],
'unauthorized_color': COLOR_SMS_DISPOSITION['UNAUTHORIZED'],
'final_chartcontainer': 'lineplusbarwithfocuschart_container',
'final_chartdata': final_chartdata,
'final_charttype': final_charttype,
'final_extra': {
'x_is_date': True,
'x_axis_format': '%d %b %Y',
'tag_script_js': True,
'jquery_on_ready': False,
},
'sms_analytic_chartcontainer': 'piechart_container',
'sms_analytic_charttype': sms_analytic_charttype,
'sms_analytic_chartdata': sms_analytic_chartdata,
'sms_analytic_extra': {
'x_is_date': False,
'x_axis_format': '',
'tag_script_js': True,
'jquery_on_ready': False,
},
}
if on_index == 'yes':
return data
return render_to_response('mod_sms/sms_dashboard.html', data, context_instance=RequestContext(request))
@login_required
@permission_required('mod_sms.view_sms_report', login_url='/')
def sms_report(request):
"""SMS Report
**Attributes**:
* ``form`` - SMSSearchForm
* ``template`` - mod_sms/sms_report.html
**Logic Description**:
* Get SMS list according to search parameters for logged-in user
**Important variable**:
* ``request.session['sms_record_kwargs']`` - stores sms kwargs
"""
sort_col_field_list = ['send_date', 'recipient_number', 'uuid', 'status', 'status_message', 'gateway']
pag_vars = get_pagination_vars(request, sort_col_field_list, default_sort_field='send_date')
from_date = ''
to_date = ''
status = 'all'
smscampaign = ''
form = SMSSearchForm(request.user, request.POST or None)
action = 'tabs-1'
kwargs = {}
post_var_with_page = 0
if form.is_valid():
post_var_with_page = 1
field_list = ['start_date', 'end_date', 'status', 'smscampaign']
unset_session_var(request, field_list)
from_date = getvar(request, 'from_date')
to_date = getvar(request, 'to_date')
start_date = ceil_strdate(str(from_date), 'start')
end_date = ceil_strdate(str(to_date), 'end')
converted_start_date = start_date.strftime('%Y-%m-%d')
converted_end_date = end_date.strftime('%Y-%m-%d')
request.session['session_start_date'] = converted_start_date
request.session['session_end_date'] = converted_end_date
status = getvar(request, 'status', setsession=True)
smscampaign = getvar(request, 'smscampaign', setsession=True)
if request.GET.get('page') or request.GET.get('sort_by'):
post_var_with_page = 1
start_date = request.session.get('session_start_date')
end_date = request.session.get('session_end_date')
start_date = ceil_strdate(start_date, 'start')
end_date = ceil_strdate(end_date, 'end')
status = request.session.get('session_status')
smscampaign = request.session.get('session_smscampaign')
form = SMSSearchForm(request.user,
initial={'from_date': start_date.strftime('%Y-%m-%d'),
'to_date': end_date.strftime('%Y-%m-%d'),
'status': status,
'smscampaign': smscampaign})
if post_var_with_page == 0:
# default
tday = datetime.utcnow().replace(tzinfo=utc)
from_date = tday.strftime('%Y-%m-%d')
to_date = tday.strftime('%Y-%m-%d')
start_date = datetime(tday.year, tday.month, tday.day, 0, 0, 0, 0).replace(tzinfo=utc)
end_date = datetime(tday.year, tday.month, tday.day, 23, 59, 59, 999999).replace(tzinfo=utc)
status = 'all'
smscampaign = ''
form = SMSSearchForm(request.user, initial={'from_date': from_date, 'to_date': to_date,
'status': status, 'smscampaign': smscampaign})
# unset session var
request.session['session_start_date'] = start_date
request.session['session_end_date'] = end_date
request.session['session_status'] = status
request.session['session_smscampaign'] = smscampaign
kwargs['sender'] = request.user
if start_date and end_date:
kwargs['send_date__range'] = (start_date, end_date)
if start_date and end_date == '':
kwargs['send_date__gte'] = start_date
if start_date == '' and end_date:
kwargs['send_date__lte'] = end_date
if status and status != 'all':
kwargs['status__exact'] = status
if smscampaign and smscampaign != '0':
kwargs['sms_campaign_id'] = smscampaign
smslist = SMSMessage.objects.filter(**kwargs)
all_sms_list = smslist.values_list('id', flat=True)
sms_list = smslist.order_by(pag_vars['sort_order'])[pag_vars['start_page']:pag_vars['end_page']]
# Session variable is used to get record set with searched option
# into export file
request.session['sms_record_kwargs'] = kwargs
select_data = {"send_date": "SUBSTR(CAST(send_date as CHAR(30)),1,10)"}
# Get Total Rrecords from SMSMessage Report table for Daily SMS Report
total_data = all_sms_list.extra(select=select_data).values('send_date')\
.annotate(Count('send_date')).order_by('-send_date')
# Following code will count total sms
if total_data.count() != 0:
total_sms = sum([x['send_date__count'] for x in total_data])
else:
total_sms = 0
data = {
'form': form,
'from_date': from_date,
'all_sms_list': all_sms_list,
'sms_list': sms_list,
'sms_count': all_sms_list.count() if all_sms_list else 0,
'SMS_REPORT_COLUMN_NAME': SMS_REPORT_COLUMN_NAME,
'col_name_with_order': pag_vars['col_name_with_order'],
'start_date': start_date,
'end_date': end_date,
'to_date': to_date,
'action': action,
'status': status,
'total_data': total_data.reverse(),
'total_sms': total_sms,
}
return render_to_response('mod_sms/sms_report.html', data, context_instance=RequestContext(request))
@login_required
def export_sms_report(request):
"""Export CSV file of SMS record
**Important variable**:
* ``request.session['sms_record_kwargs']`` - stores sms query set
**Exported fields**: ['sender', 'recipient_number', 'send_date', 'uuid',
'status', 'status_message', 'gateway']
"""
format_type = request.GET['format']
# get the response object, this can be used as a stream.
response = HttpResponse(content_type='text/%s' % format_type)
# force download.
response['Content-Disposition'] = 'attachment;filename=sms_export.%s' % format_type
kwargs = {}
kwargs = request.session['sms_record_kwargs']
qs = SMSMessage.objects.filter(**kwargs)
headers = ('sender', 'recipient_number', 'send_date', 'uuid',
'status', 'status_message', 'gateway')
list_val = []
for i in qs:
send_date = i.send_date
if format_type == Export_choice.JSON or format_type == Export_choice.XLS:
send_date = str(i.send_date)
gateway = i.gateway.name if i.gateway else ''
list_val.append([
i.sender.username,
i.recipient_number,
send_date,
str(i.uuid),
i.status,
i.status_message,
gateway,
])
data = tablib.Dataset(*list_val, headers=headers)
if format_type == Export_choice.XLS:
response.write(data.xls)
elif format_type == Export_choice.CSV:
response.write(data.csv)
elif format_type == Export_choice.JSON:
response.write(data.json)
return response
| mpl-2.0 |
loopCM/chromium | third_party/libxml/src/check-xsddata-test-suite.py | 343 | 10682 | #!/usr/bin/python
import sys
import time
import os
import string
import StringIO
sys.path.insert(0, "python")
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
debug = 0
verbose = 0
quiet = 1
#
# the testsuite description
#
CONF=os.path.join(os.path.dirname(__file__), "test/xsdtest/xsdtestsuite.xml")
LOG="check-xsddata-test-suite.log"
log = open(LOG, "w")
nb_schemas_tests = 0
nb_schemas_success = 0
nb_schemas_failed = 0
nb_instances_tests = 0
nb_instances_success = 0
nb_instances_failed = 0
libxml2.lineNumbersDefault(1)
#
# Error and warnng callbacks
#
def callback(ctx, str):
global log
log.write("%s%s" % (ctx, str))
libxml2.registerErrorHandler(callback, "")
#
# Resolver callback
#
resources = {}
def resolver(URL, ID, ctxt):
global resources
if resources.has_key(URL):
return(StringIO.StringIO(resources[URL]))
log.write("Resolver failure: asked %s\n" % (URL))
log.write("resources: %s\n" % (resources))
return None
#
# handle a valid instance
#
def handle_valid(node, schema):
global log
global nb_instances_success
global nb_instances_failed
instance = node.prop("dtd")
if instance == None:
instance = ""
child = node.children
while child != None:
if child.type != 'text':
instance = instance + child.serialize()
child = child.next
mem = libxml2.debugMemory(1);
try:
doc = libxml2.parseDoc(instance)
except:
doc = None
if doc == None:
log.write("\nFailed to parse correct instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
nb_instances_failed = nb_instances_failed + 1
return
if debug:
print "instance line %d" % (node.lineNo())
try:
ctxt = schema.relaxNGNewValidCtxt()
ret = doc.relaxNGValidateDoc(ctxt)
del ctxt
except:
ret = -1
doc.freeDoc()
if mem != libxml2.debugMemory(1):
print "validating instance %d line %d leaks" % (
nb_instances_tests, node.lineNo())
if ret != 0:
log.write("\nFailed to validate correct instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
nb_instances_failed = nb_instances_failed + 1
else:
nb_instances_success = nb_instances_success + 1
#
# handle an invalid instance
#
def handle_invalid(node, schema):
global log
global nb_instances_success
global nb_instances_failed
instance = node.prop("dtd")
if instance == None:
instance = ""
child = node.children
while child != None:
if child.type != 'text':
instance = instance + child.serialize()
child = child.next
# mem = libxml2.debugMemory(1);
try:
doc = libxml2.parseDoc(instance)
except:
doc = None
if doc == None:
log.write("\nStrange: failed to parse incorrect instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
return
if debug:
print "instance line %d" % (node.lineNo())
try:
ctxt = schema.relaxNGNewValidCtxt()
ret = doc.relaxNGValidateDoc(ctxt)
del ctxt
except:
ret = -1
doc.freeDoc()
# if mem != libxml2.debugMemory(1):
# print "validating instance %d line %d leaks" % (
# nb_instances_tests, node.lineNo())
if ret == 0:
log.write("\nFailed to detect validation problem in instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
nb_instances_failed = nb_instances_failed + 1
else:
nb_instances_success = nb_instances_success + 1
#
# handle an incorrect test
#
def handle_correct(node):
global log
global nb_schemas_success
global nb_schemas_failed
schema = ""
child = node.children
while child != None:
if child.type != 'text':
schema = schema + child.serialize()
child = child.next
try:
rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema))
rngs = rngp.relaxNGParse()
except:
rngs = None
if rngs == None:
log.write("\nFailed to compile correct schema:\n-----\n")
log.write(schema)
log.write("\n-----\n")
nb_schemas_failed = nb_schemas_failed + 1
else:
nb_schemas_success = nb_schemas_success + 1
return rngs
def handle_incorrect(node):
global log
global nb_schemas_success
global nb_schemas_failed
schema = ""
child = node.children
while child != None:
if child.type != 'text':
schema = schema + child.serialize()
child = child.next
try:
rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema))
rngs = rngp.relaxNGParse()
except:
rngs = None
if rngs != None:
log.write("\nFailed to detect schema error in:\n-----\n")
log.write(schema)
log.write("\n-----\n")
nb_schemas_failed = nb_schemas_failed + 1
else:
# log.write("\nSuccess detecting schema error in:\n-----\n")
# log.write(schema)
# log.write("\n-----\n")
nb_schemas_success = nb_schemas_success + 1
return None
#
# resource handling: keep a dictionary of URL->string mappings
#
def handle_resource(node, dir):
global resources
try:
name = node.prop('name')
except:
name = None
if name == None or name == '':
log.write("resource has no name")
return;
if dir != None:
# name = libxml2.buildURI(name, dir)
name = dir + '/' + name
res = ""
child = node.children
while child != None:
if child.type != 'text':
res = res + child.serialize()
child = child.next
resources[name] = res
#
# dir handling: pseudo directory resources
#
def handle_dir(node, dir):
try:
name = node.prop('name')
except:
name = None
if name == None or name == '':
log.write("resource has no name")
return;
if dir != None:
# name = libxml2.buildURI(name, dir)
name = dir + '/' + name
dirs = node.xpathEval('dir')
for dir in dirs:
handle_dir(dir, name)
res = node.xpathEval('resource')
for r in res:
handle_resource(r, name)
#
# handle a testCase element
#
def handle_testCase(node):
global nb_schemas_tests
global nb_instances_tests
global resources
sections = node.xpathEval('string(section)')
log.write("\n ======== test %d line %d section %s ==========\n" % (
nb_schemas_tests, node.lineNo(), sections))
resources = {}
if debug:
print "test %d line %d" % (nb_schemas_tests, node.lineNo())
dirs = node.xpathEval('dir')
for dir in dirs:
handle_dir(dir, None)
res = node.xpathEval('resource')
for r in res:
handle_resource(r, None)
tsts = node.xpathEval('incorrect')
if tsts != []:
if len(tsts) != 1:
print "warning test line %d has more than one <incorrect> example" %(node.lineNo())
schema = handle_incorrect(tsts[0])
else:
tsts = node.xpathEval('correct')
if tsts != []:
if len(tsts) != 1:
print "warning test line %d has more than one <correct> example"% (node.lineNo())
schema = handle_correct(tsts[0])
else:
print "warning <testCase> line %d has no <correct> nor <incorrect> child" % (node.lineNo())
nb_schemas_tests = nb_schemas_tests + 1;
valids = node.xpathEval('valid')
invalids = node.xpathEval('invalid')
nb_instances_tests = nb_instances_tests + len(valids) + len(invalids)
if schema != None:
for valid in valids:
handle_valid(valid, schema)
for invalid in invalids:
handle_invalid(invalid, schema)
#
# handle a testSuite element
#
def handle_testSuite(node, level = 0):
global nb_schemas_tests, nb_schemas_success, nb_schemas_failed
global nb_instances_tests, nb_instances_success, nb_instances_failed
if verbose and level >= 0:
old_schemas_tests = nb_schemas_tests
old_schemas_success = nb_schemas_success
old_schemas_failed = nb_schemas_failed
old_instances_tests = nb_instances_tests
old_instances_success = nb_instances_success
old_instances_failed = nb_instances_failed
docs = node.xpathEval('documentation')
authors = node.xpathEval('author')
if docs != []:
msg = ""
for doc in docs:
msg = msg + doc.content + " "
if authors != []:
msg = msg + "written by "
for author in authors:
msg = msg + author.content + " "
if quiet == 0:
print msg
sections = node.xpathEval('section')
if verbose and sections != [] and level <= 0:
msg = ""
for section in sections:
msg = msg + section.content + " "
if quiet == 0:
print "Tests for section %s" % (msg)
for test in node.xpathEval('testCase'):
handle_testCase(test)
for test in node.xpathEval('testSuite'):
handle_testSuite(test, level + 1)
if verbose and level >= 0 :
if sections != []:
msg = ""
for section in sections:
msg = msg + section.content + " "
print "Result of tests for section %s" % (msg)
elif docs != []:
msg = ""
for doc in docs:
msg = msg + doc.content + " "
print "Result of tests for %s" % (msg)
if nb_schemas_tests != old_schemas_tests:
print "found %d test schemas: %d success %d failures" % (
nb_schemas_tests - old_schemas_tests,
nb_schemas_success - old_schemas_success,
nb_schemas_failed - old_schemas_failed)
if nb_instances_tests != old_instances_tests:
print "found %d test instances: %d success %d failures" % (
nb_instances_tests - old_instances_tests,
nb_instances_success - old_instances_success,
nb_instances_failed - old_instances_failed)
#
# Parse the conf file
#
libxml2.substituteEntitiesDefault(1);
testsuite = libxml2.parseFile(CONF)
#
# Error and warnng callbacks
#
def callback(ctx, str):
global log
log.write("%s%s" % (ctx, str))
libxml2.registerErrorHandler(callback, "")
libxml2.setEntityLoader(resolver)
root = testsuite.getRootElement()
if root.name != 'testSuite':
print "%s doesn't start with a testSuite element, aborting" % (CONF)
sys.exit(1)
if quiet == 0:
print "Running Relax NG testsuite"
handle_testSuite(root)
if quiet == 0 or nb_schemas_failed != 0:
print "\nTOTAL:\nfound %d test schemas: %d success %d failures" % (
nb_schemas_tests, nb_schemas_success, nb_schemas_failed)
if quiet == 0 or nb_instances_failed != 0:
print "found %d test instances: %d success %d failures" % (
nb_instances_tests, nb_instances_success, nb_instances_failed)
testsuite.freeDoc()
# Memory debug specific
libxml2.relaxNGCleanupTypes()
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
if quiet == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| bsd-3-clause |
bat-serjo/vivisect | envi/tests/msp430/imov.py | 7 | 9646 | from envi.archs.msp430.regs import *
checks = [
# MOV
(
'MOV r14, r15',
{ 'regs': [(REG_R14, 0x1122), (REG_R15, 0x3344)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f4e", 'data': "" },
{ 'regs': [(REG_R14, 0x1122), (REG_R15, 0x1122)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f4e", 'data': "" }
),
(
'MOV #0xaabb, r15',
{ 'regs': [], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "3f40bbaa", 'data': "" },
{ 'regs': [(REG_R15, 0xaabb)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "3f40bbaa", 'data': "" }
),
(
'MOV @r14, r15',
{ 'regs': [(REG_R14, 0x1002)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "2f4e", 'data': "00112233445566" },
{ 'regs': [(REG_R14, 0x1002), (REG_R15, 0x3322)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "2f4e", 'data': "00112233445566" }
),
(
'MOV r14, @r15',
{ 'regs': [(REG_R14, 0xaabb), (REG_R15, 0x1002)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "8f4e0000", 'data': "00112233445566" },
{ 'regs': [(REG_R14, 0xaabb), (REG_R15, 0x1002)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "8f4e0000", 'data': "0011bbaa445566" }
),
(
'MOV @r14+, r15',
{ 'regs': [(REG_R14, 0x1002)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "3f4e", 'data': "00112233445566" },
{ 'regs': [(REG_R14, 0x1004), (REG_R15, 0x3322)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "3f4e", 'data': "00112233445566" }
),
# PC
(
'MOV pc, r15',
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f40", 'data': "" },
{ 'regs': [(REG_R15, 0x4402)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f40", 'data': "" }
),
(
'MOV @pc, r15',
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "2f40aabb", 'data': "" },
{ 'regs': [(REG_R15, 0xbbaa)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "2f40aabb", 'data': "" }
),
(
'MOV r15, @pc',
{ 'regs': [(REG_R15, 0xaabb)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "804f0000", 'data': "" },
{ 'regs': [(REG_R15, 0xaabb)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "804f0000bbaa", 'data': "" }
),
# Constant Generators
# SR X(Rn) (0)
(
'MOV 0(sr), r15',
{ 'regs': [(REG_R15, 0xaabb)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "1f420000", 'data': "" },
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "1f420000", 'data': "" }
),
# SR @Rn 4
(
'MOV @sr, r15',
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "2f42", 'data': "" },
{ 'regs': [(REG_R15, 0x4)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "2f42", 'data': "" }
),
# SR @Rn+ 8
(
'MOV @sr+, r15',
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "3f42", 'data': "" },
{ 'regs': [(REG_R15, 0x8)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "3f42", 'data': "" }
),
# CG Rn 0
(
'MOV cg, r15',
{ 'regs': [(REG_R15, 0xaabb)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f43", 'data': "" },
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "0f43", 'data': "" }
),
# CG X(Rn) 1
(
'MOV 0(cg), r15',
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "1f430000", 'data': "" },
{ 'regs': [(REG_R15, 0x1)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "1f430000", 'data': "" }
),
# CG @Rn 2
(
'MOV @cg, r15',
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "2f43", 'data': "" },
{ 'regs': [(REG_R15, 0x2)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "2f43", 'data': "" }
),
# CG @Rn+ -1
(
'MOV @cg+, r15',
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "3f43", 'data': "" },
{ 'regs': [(REG_R15, 0xffff)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "3f43", 'data': "" }
),
# MOV.b
(
'MOV.b r14, r15',
{ 'regs': [(REG_R14, 0x1122), (REG_R15, 0x3344)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f4e", 'data': "" },
{ 'regs': [(REG_R14, 0x1122), (REG_R15, 0x22)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f4e", 'data': "" }
),
(
'MOV.b pc, r15',
{ 'regs': [], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f40", 'data': "" },
{ 'regs': [(REG_R15, 0x02)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f40", 'data': "" }
),
(
'MOV.b #0xaabb, r15',
{ 'regs': [], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "7f40bb00", 'data': "" },
{ 'regs': [(REG_R15, 0xbb)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "7f40bb00", 'data': "" }
),
(
'MOV.b @r14, r15',
{ 'regs': [(REG_R14, 0x1002)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "6f4e", 'data': "00112233445566" },
{ 'regs': [(REG_R14, 0x1002), (REG_R15, 0x22)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "6f4e", 'data': "00112233445566" }
),
(
'MOV.b r14, @r15',
{ 'regs': [(REG_R14, 0xaabb), (REG_R15, 0x1002)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "cf4e0000", 'data': "00112233445566" },
{ 'regs': [(REG_R14, 0xaabb), (REG_R15, 0x1002)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "cf4e0000", 'data': "0011bb33445566" }
),
(
'MOV.b @r14+, r15',
{ 'regs': [(REG_R14, 0x1002)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "7f4e", 'data': "00112233445566" },
{ 'regs': [(REG_R14, 0x1003), (REG_R15, 0x22)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "7f4e", 'data': "00112233445566" }
),
# PC
(
'MOV.b pc, r15',
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f40", 'data': "" },
{ 'regs': [(REG_R15, 0x02)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f40", 'data': "" }
),
(
'MOV.b @pc, r15',
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "6f40aabb", 'data': "" },
{ 'regs': [(REG_R15, 0xaa)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "6f40aabb", 'data': "" }
),
(
'MOV.b r15, @pc',
{ 'regs': [(REG_R15, 0xaabb)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "c04f0000", 'data': "" },
{ 'regs': [(REG_R15, 0xaabb)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "c04f0000bb", 'data': "" }
),
# Constant Generators
# SR X(Rn) (0)
(
'MOV.b 0(sr), r15',
{ 'regs': [(REG_R15, 0xaabb)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "5f420000", 'data': "" },
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "5f420000", 'data': "" }
),
# SR @Rn 4
(
'MOV.b @sr, r15',
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "6f42", 'data': "" },
{ 'regs': [(REG_R15, 0x4)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "6f42", 'data': "" }
),
# SR @Rn+ 8
(
'MOV.b @sr+, r15',
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "7f42", 'data': "" },
{ 'regs': [(REG_R15, 0x8)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "7f42", 'data': "" }
),
# CG Rn 0
(
'MOV.b cg, r15',
{ 'regs': [(REG_R15, 0xaabb)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f43", 'data': "" },
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "4f43", 'data': "" }
),
# CG X(Rn) 1
(
'MOV.b 0(cg), r15',
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "5f430000", 'data': "" },
{ 'regs': [(REG_R15, 0x1)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "5f430000", 'data': "" }
),
# CG @Rn 2
(
'MOV.b @cg, r15',
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "6f43", 'data': "" },
{ 'regs': [(REG_R15, 0x2)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "6f43", 'data': "" }
),
# CG @Rn+ -1
(
'MOV.b @cg+, r15',
{ 'regs': [(REG_R15, 0x0)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "7f43", 'data': "" },
{ 'regs': [(REG_R15, 0xff)], 'flags': [(SR_N, 0), (SR_Z, 0), (SR_C, 0), (SR_V, 0)], 'code': "7f43", 'data': "" }
),
]
| apache-2.0 |
glemaitre/UnbalancedDataset | examples/under-sampling/plot_random_under_sampler.py | 2 | 2013 | """
=====================
Random under-sampling
=====================
An illustration of the random under-sampling method.
"""
# Authors: Christos Aridas
# Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from imblearn.under_sampling import RandomUnderSampler
print(__doc__)
# Generate the dataset
X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
n_informative=3, n_redundant=1, flip_y=0,
n_features=20, n_clusters_per_class=1,
n_samples=200, random_state=10)
# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components=2)
# Fit and transform x to visualise inside a 2D feature space
X_vis = pca.fit_transform(X)
# Apply the random under-sampling
rus = RandomUnderSampler(return_indices=True)
X_resampled, y_resampled, idx_resampled = rus.fit_sample(X, y)
X_res_vis = pca.transform(X_resampled)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
idx_samples_removed = np.setdiff1d(np.arange(X_vis.shape[0]),
idx_resampled)
idx_class_0 = y_resampled == 0
plt.scatter(X_res_vis[idx_class_0, 0], X_res_vis[idx_class_0, 1],
alpha=.8, label='Class #0')
plt.scatter(X_res_vis[~idx_class_0, 0], X_res_vis[~idx_class_0, 1],
alpha=.8, label='Class #1')
plt.scatter(X_vis[idx_samples_removed, 0], X_vis[idx_samples_removed, 1],
alpha=.8, label='Removed samples')
# make nice plotting
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
ax.set_xlim([-6, 6])
ax.set_ylim([-6, 6])
plt.title('Under-sampling using random under-sampling')
plt.legend()
plt.tight_layout()
plt.show()
| mit |
CameronLonsdale/sec-tools | letsjusthackshit/lib/python3.5/site-packages/pip/_vendor/lockfile/mkdirlockfile.py | 536 | 3096 | from __future__ import absolute_import, division
import time
import os
import sys
import errno
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class MkdirLockFile(LockBase):
"""Lock file by creating a directory."""
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = MkdirLockFile('somefile')
>>> lock = MkdirLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded, timeout)
# Lock file itself is a directory. Place the unique file name into
# it.
self.unique_name = os.path.join(self.lock_file,
"%s.%s%s" % (self.hostname,
self.tname,
self.pid))
def acquire(self, timeout=None):
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
else:
wait = max(0, timeout / 10)
while True:
try:
os.mkdir(self.lock_file)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.EEXIST:
# Already locked.
if os.path.exists(self.unique_name):
# Already locked by me.
return
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
# Someone else has the lock.
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(wait)
else:
# Couldn't create the lock for some other reason
raise LockFailed("failed to create %s" % self.lock_file)
else:
open(self.unique_name, "wb").close()
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not os.path.exists(self.unique_name):
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.unique_name)
os.rmdir(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name))
def break_lock(self):
if os.path.exists(self.lock_file):
for name in os.listdir(self.lock_file):
os.unlink(os.path.join(self.lock_file, name))
os.rmdir(self.lock_file)
| mit |
badbytes/pymeg | pdf2py/pdf_functions.py | 1 | 1090 | class compute:
def fft(self):
from meg import fftmeg
self.results.fft = fftmeg.calc(self.data.data_block ,1/self.hdr.header_data.sample_period, epochs=self.data.numofepochs)
def filter(self, band=None, order=None, Wn=None):
#Band = low, high, bp
from meg import filtfilt
self.filtereddata = filtfilt.calc(self.data.data_block, 1/self.data.hdr.header_data.sample_period , Wn, order, band)
def badchan(self, thresh=3, maxhz=200, powernotch='yes'):
from meg import badchannels
#fftnull, badch, badchcomparison = badchannels.calc(datapdf, fft.pow, ch,thresh=2, freqarray=fft.freq,minhz=3, maxhz=200, powernotch='yes')
#REWRITE BADCHANNEL STUFF
def ica(self, numcomponents):
import mdp
ica = mdp.nodes.FastICANode(numcomponents)
ica.train(self.data.data_block)
comp = ica.execute(self.data.data_block)
def offset_correct(self, start=0, end=-1):
from meg import offset
self.results.offsetcorrecteddata = offset.correct(self.data.data_block, start=start, end=end)
| gpl-3.0 |
shengwen1994/calvin-base | calvin/runtime/south/plugins/storage/__init__.py | 2 | 1484 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import imp
from calvin.utilities import calconfig
# Spec
_modules = {'dht': {'dht': 'dht_server'}}
fw_modules = None
__all__ = []
if not fw_modules:
_dirname = os.path.dirname(__file__)
fw_modules = os.listdir(_dirname)
for i, fw_module in enumerate(fw_modules):
if not os.path.exists(os.path.join(_dirname, fw_module, '__init__.py')):
del fw_modules[i]
_conf = calconfig.get()
fw_path = _conf.get(None, 'framework')
if not fw_path in fw_modules:
raise Exception("No framework '%s' with that name, avalible ones are '%s'" % (fw_path, fw_modules))
for module, _classes in _modules.items():
for _name, _module in _classes.items():
module_obj = __import__("%s.%s.%s" % (fw_path, module, _module), globals=globals(), fromlist=[''])
globals()[_name] = module_obj
__all__.append(module_obj)
| apache-2.0 |
johnfrenchxyz/508-checklist | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/make.py | 388 | 91069 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
from gyp.common import GypError
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
output_dir = params['options'].generator_output or \
params['options'].toplevel_dir
builddir_name = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
output_dir, builddir_name, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': params['options'].toplevel_dir,
'qualified_out_dir': qualified_out_dir,
}
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_AIX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CPPFLAGS) $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CPPFLAGS) $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# C++ apps need to be linked with g++.
LINK ?= $(CXX.target)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?= $(CPPFLAGS_host) $(CFLAGS_host)
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?= $(CPPFLAGS_host) $(CXXFLAGS_host)
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = rm -rf "$@" && cp %(copy_archive_args)s "$<" "$@"
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
E=$$?;\\
if [ $$E -ne 0 ]; then\\
break;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py.
def _ValidateSourcesForOSX(spec, all_sources):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
"""
if spec.get('type', None) != 'static_library':
return
basenames = {}
for source in all_sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'libtool on OS X will generate' +
' warnings for them.')
raise GypError('Duplicate basenames in sources section, see list above')
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter(object):
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
if self.flavor == 'mac':
# libtool on OS X generates warnings for duplicate basenames in the same
# target.
_ValidateSourcesForOSX(spec, all_sources)
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs, actions,
command="%s_%d" % (name, count))
# Spaces in rule filenames are not supported, but rule variables have
# spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
# The spaces within the variables are valid, so remove the variables
# before checking.
variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
for output in outputs:
output = re.sub(variables_with_spaces, '', output)
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
_, ext = os.path.splitext(output)
if ext != '.xcassets':
# Make does not supports '.xcassets' emulation.
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
library_dirs = config.get('library_dirs', [])
ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
command = command,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False, command=None):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
command: (optional) command name to generate unambiguous labels
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
force_append = ' FORCE_DO_CMD' if force else ''
if order_only:
# Order only rule: Just write a simple rule.
# TODO(evanm): just make order_only a list of deps instead of this hack.
self.WriteLn('%s: | %s%s' %
(' '.join(outputs), ' '.join(inputs), force_append))
elif len(outputs) == 1:
# Regular rule, one output: Just write a simple rule.
self.WriteLn('%s: %s%s' % (outputs[0], ' '.join(inputs), force_append))
else:
# Regular rule, more than one output: Multiple outputs are tricky in
# make. We will write three rules:
# - All outputs depend on an intermediate file.
# - Make .INTERMEDIATE depend on the intermediate.
# - The intermediate file depends on the inputs and executes the
# actual command.
# - The intermediate recipe will 'touch' the intermediate file.
# - The multi-output rule will have an do-nothing recipe.
intermediate = "%s.intermediate" % (command if command else self.target)
self.WriteLn('%s: %s' % (' '.join(outputs), intermediate))
self.WriteLn('\t%s' % '@:');
self.WriteLn('%s: %s' % ('.INTERMEDIATE', intermediate))
self.WriteLn('%s: %s%s' %
(intermediate, ' '.join(inputs), force_append))
actions.insert(0, '$(call do_cmd,touch)')
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
# XXX(TooTallNate): disabling this code since we don't want this behavior...
#if (self.type == 'shared_library' and
# (self.flavor != 'mac' or self.toolset != 'target')):
# # Install all shared libs into a common directory (per toolset) for
# # convenient access with LD_LIBRARY_PATH.
# return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = cd $(srcdir); %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(
options.depth, options.generator_output, base_path, base_name)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(
options.toplevel_dir, options.generator_output, makefile_name)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
copy_archive_arguments = '-af'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
'copy_archive_args': copy_archive_arguments,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
})
elif flavor == 'openbsd':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
})
elif flavor == 'aix':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
'link_commands': LINK_COMMANDS_AIX,
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host',), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host',), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host',), 'g++'),
'LINK.host': GetEnvironFallback(('LINK_host',), '$(CXX.host)'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
wrappers = {}
for key, value in make_global_settings_array:
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
make_global_settings = ''
for key, value in make_global_settings_array:
if re.match('.*_wrapper', key):
continue
if value[0] != '$':
value = '$(abspath %s)' % value
wrapper = wrappers.get(key)
if wrapper:
value = '%s %s' % (wrapper, value)
del wrappers[key]
if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
env_key = key.replace('.', '_') # CC.host -> CC_host
if env_key in os.environ:
value = os.environ[env_key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
# TODO(ukai): define cmd when only wrapper is specified in
# make_global_settings.
header_params['make_global_settings'] = make_global_settings
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| mit |
henaras/horizon | openstack_dashboard/dashboards/project/instances/tests.py | 4 | 199728 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import sys
from django.conf import settings
from django.core.urlresolvers import reverse
from django.forms import widgets
from django import http
import django.test
from django.utils.datastructures import SortedDict
from django.utils import encoding
from django.utils.http import urlencode
from mox3.mox import IgnoreArg # noqa
from mox3.mox import IsA # noqa
from horizon import exceptions
from horizon import forms
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.project.instances import console
from openstack_dashboard.dashboards.project.instances import tables
from openstack_dashboard.dashboards.project.instances import tabs
from openstack_dashboard.dashboards.project.instances import workflows
from openstack_dashboard.test import helpers
from openstack_dashboard.usage import quotas
INDEX_URL = reverse('horizon:project:instances:index')
SEC_GROUP_ROLE_PREFIX = \
workflows.update_instance.INSTANCE_SEC_GROUP_SLUG + "_role_"
AVAILABLE = api.cinder.VOLUME_STATE_AVAILABLE
VOLUME_SEARCH_OPTS = dict(status=AVAILABLE, bootable=1)
SNAPSHOT_SEARCH_OPTS = dict(status=AVAILABLE)
class InstanceTests(helpers.TestCase):
@helpers.create_stubs({
api.nova: (
'flavor_list',
'server_list',
'tenant_absolute_limits',
'extension_supported',
),
api.glance: ('image_list_detailed',),
api.network: (
'floating_ip_simple_associate_supported',
'floating_ip_supported',
'servers_update_addresses',
),
})
def _get_index(self):
servers = self.servers.list()
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
return self.client.get(INDEX_URL)
def test_index(self):
res = self._get_index()
self.assertTemplateUsed(res,
'project/instances/index.html')
instances = res.context['instances_table'].data
self.assertItemsEqual(instances, self.servers.list())
self.assertNotContains(res, "Launch Instance (Quota exceeded)")
@helpers.create_stubs({api.nova: ('server_list',
'tenant_absolute_limits',)})
def test_index_server_list_exception(self):
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndRaise(self.exceptions.nova)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/instances/index.html')
self.assertEqual(len(res.context['instances_table'].data), 0)
self.assertMessageCount(res, error=1)
@helpers.create_stubs({
api.nova: ('flavor_list', 'server_list', 'flavor_get',
'tenant_absolute_limits', 'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('floating_ip_simple_associate_supported',
'floating_ip_supported',
'servers_update_addresses',),
})
def test_index_flavor_list_exception(self):
servers = self.servers.list()
flavors = self.flavors.list()
full_flavors = SortedDict([(f.id, f) for f in flavors])
search_opts = {'marker': None, 'paginate': True}
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndReturn(full_flavors[server.flavor["id"]])
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/instances/index.html')
instances = res.context['instances_table'].data
self.assertItemsEqual(instances, self.servers.list())
@helpers.create_stubs({
api.nova: ('flavor_list', 'server_list', 'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('floating_ip_simple_associate_supported',
'floating_ip_supported',
'servers_update_addresses',),
})
def test_index_with_instance_booted_from_volume(self):
volume_server = self.servers.first()
volume_server.image = ""
volume_server.image_name = "(not found)"
servers = self.servers.list()
servers[0] = volume_server
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/instances/index.html')
instances = res.context['instances_table'].data
self.assertEqual(len(instances), len(servers))
self.assertContains(res, "(not found)")
def test_index_with_console_link(self):
res = self._get_index()
instances_table = res.context['instances_table']
instances = res.context['instances_table'].data
console_link_rendered = False
for instance in instances:
for action in instances_table.get_row_actions(instance):
if isinstance(action, tables.ConsoleLink):
console_link_rendered = True
break
if console_link_rendered:
break
self.assertTrue(console_link_rendered)
@django.test.utils.override_settings(CONSOLE_TYPE=None)
def test_index_without_console_link(self):
res = self._get_index()
instances_table = res.context['instances_table']
instances = res.context['instances_table'].data
for instance in instances:
for action in instances_table.get_row_actions(instance):
self.assertNotIsInstance(action, tables.ConsoleLink)
@helpers.create_stubs({api.nova: ('server_list',
'flavor_list',
'server_delete',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_terminate_instance(self):
servers = self.servers.list()
server = servers[0]
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
api.nova.server_delete(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__terminate__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_list',
'flavor_list',
'server_delete',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_terminate_instance_exception(self):
servers = self.servers.list()
server = servers[0]
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
api.nova.server_delete(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__terminate__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_pause',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_pause_instance(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_pause(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_pause',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_pause_instance_exception(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_pause(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_unpause',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_unpause_instance(self):
servers = self.servers.list()
server = servers[0]
server.status = "PAUSED"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_unpause(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_unpause',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_unpause_instance_exception(self):
servers = self.servers.list()
server = servers[0]
server.status = "PAUSED"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_unpause(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_reboot',
'server_list',
'flavor_list',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_reboot_instance(self):
servers = self.servers.list()
server = servers[0]
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_reboot(IsA(http.HttpRequest), server.id,
soft_reboot=False)
self.mox.ReplayAll()
formData = {'action': 'instances__reboot__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_reboot',
'server_list',
'flavor_list',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_reboot_instance_exception(self):
servers = self.servers.list()
server = servers[0]
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_reboot(IsA(http.HttpRequest), server.id,
soft_reboot=False) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__reboot__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_reboot',
'server_list',
'flavor_list',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_soft_reboot_instance(self):
servers = self.servers.list()
server = servers[0]
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_reboot(IsA(http.HttpRequest), server.id,
soft_reboot=True)
self.mox.ReplayAll()
formData = {'action': 'instances__soft_reboot__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_suspend',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_suspend_instance(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_suspend(IsA(http.HttpRequest), unicode(server.id))
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_suspend',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_suspend_instance_exception(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_suspend(IsA(http.HttpRequest), unicode(server.id)) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_resume',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_resume_instance(self):
servers = self.servers.list()
server = servers[0]
server.status = "SUSPENDED"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_resume(IsA(http.HttpRequest), unicode(server.id))
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_resume',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_resume_instance_exception(self):
servers = self.servers.list()
server = servers[0]
server.status = "SUSPENDED"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_resume(IsA(http.HttpRequest),
unicode(server.id)) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_lock',
'server_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_lock_instance(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions', IsA(
http.HttpRequest)).MultipleTimes().AndReturn(True)
api.glance.image_list_detailed(IgnoreArg()).AndReturn((
self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(
IsA(http.HttpRequest),
search_opts=search_opts).AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_lock(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__lock__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_lock',
'server_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_lock_instance_exception(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions', IsA(
http.HttpRequest)).MultipleTimes().AndReturn(True)
api.glance.image_list_detailed(IgnoreArg()).AndReturn((
self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(
IsA(http.HttpRequest),
search_opts=search_opts).AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_lock(IsA(http.HttpRequest), server.id).AndRaise(
self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__lock__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_unlock',
'server_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_unlock_instance(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions', IsA(
http.HttpRequest)).MultipleTimes().AndReturn(True)
api.glance.image_list_detailed(IgnoreArg()).AndReturn((
self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(
IsA(http.HttpRequest),
search_opts=search_opts).AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_unlock(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__unlock__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_unlock',
'server_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_unlock_instance_exception(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions', IsA(
http.HttpRequest)).MultipleTimes().AndReturn(True)
api.glance.image_list_detailed(IgnoreArg()).AndReturn((
self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(
IsA(http.HttpRequest),
search_opts=search_opts).AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_unlock(IsA(http.HttpRequest), server.id).AndRaise(
self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__unlock__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({
api.nova: (
"server_get",
"instance_volumes_list",
"flavor_get",
"extension_supported"
),
api.network: (
"server_security_groups",
"servers_update_addresses",
"floating_ip_simple_associate_supported",
"floating_ip_supported"
)
})
def _get_instance_details(self, server, qs=None,
flavor_return=None, volumes_return=None,
security_groups_return=None,
flavor_exception=False):
url = reverse('horizon:project:instances:detail', args=[server.id])
if qs:
url += qs
if flavor_return is None:
flavor_return = self.flavors.first()
if volumes_return is None:
volumes_return = []
if security_groups_return is None:
security_groups_return = self.security_groups.list()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.servers_update_addresses(IsA(http.HttpRequest),
IgnoreArg())
api.nova.instance_volumes_list(IsA(http.HttpRequest),
server.id).AndReturn(volumes_return)
if flavor_exception:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor['id']) \
.AndRaise(self.exceptions.nova)
else:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor['id']) \
.AndReturn(flavor_return)
api.network.server_security_groups(IsA(http.HttpRequest), server.id) \
.AndReturn(security_groups_return)
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
return self.client.get(url)
def test_instance_details_volumes(self):
server = self.servers.first()
volumes = [self.volumes.list()[1]]
security_groups = self.security_groups.list()
res = self._get_instance_details(
server, volumes_return=volumes,
security_groups_return=security_groups)
self.assertItemsEqual(res.context['instance'].volumes, volumes)
self.assertItemsEqual(res.context['instance'].volumes, volumes)
def test_instance_details_volume_sorting(self):
server = self.servers.first()
volumes = self.volumes.list()[1:3]
security_groups = self.security_groups.list()
res = self._get_instance_details(
server, volumes_return=volumes,
security_groups_return=security_groups)
self.assertItemsEqual(res.context['instance'].volumes, volumes)
self.assertEqual(res.context['instance'].volumes[0].device,
"/dev/hda")
self.assertEqual(res.context['instance'].volumes[1].device,
"/dev/hdk")
def test_instance_details_metadata(self):
server = self.servers.first()
tg = tabs.InstanceDetailTabs(self.request, instance=server)
qs = "?%s=%s" % (tg.param_name, tg.get_tab("overview").get_id())
res = self._get_instance_details(server, qs)
self.assertContains(res, "<dd>keyName</dd>", 1)
self.assertContains(res, "<dt>someMetaLabel</dt>", 1)
self.assertContains(res, "<dd>someMetaData</dd>", 1)
self.assertContains(res, "<dt>some<b>html</b>label</dt>",
1)
self.assertContains(res, "<dd><!--</dd>", 1)
self.assertContains(res, "<dt>empty</dt>", 1)
# TODO(david-lyle): uncomment when fixed with Django 1.6
# self.assertContains(res, "<dd><em>N/A</em></dd>", 1)
def test_instance_details_fault(self):
server = self.servers.first()
server.status = 'ERROR'
server.fault = {"message": "NoValidHost",
"code": 500,
"details": "No valid host was found. \n "
"File \"/mnt/stack/nova/nova/"
"scheduler/filter_scheduler.py\", "
"line 105, in schedule_run_instance\n "
"raise exception.NoValidHost"
"(reason=\"\")\n",
"created": "2013-10-07T00:08:32Z"}
res = self._get_instance_details(server)
self.assertItemsEqual(res.context['instance'].fault, server.fault)
def test_instance_details_console_tab(self):
server = self.servers.first()
tg = tabs.InstanceDetailTabs(self.request, instance=server)
qs = "?%s=%s" % (tg.param_name, tg.get_tab("console").get_id())
res = self._get_instance_details(server, qs)
self.assertIn(tabs.ConsoleTab, res.context_data['tab_group'].tabs)
self.assertTemplateUsed(res,
'project/instances/_detail_console.html')
console_tab_rendered = False
for tab in res.context_data['tab_group'].get_loaded_tabs():
if isinstance(tab, tabs.ConsoleTab):
console_tab_rendered = True
break
self.assertTrue(console_tab_rendered)
@django.test.utils.override_settings(CONSOLE_TYPE=None)
def test_instance_details_console_tab_deactivated(self):
server = self.servers.first()
tg = tabs.InstanceDetailTabs(self.request, instance=server)
self.assertIsNone(tg.get_tab("console"))
res = self._get_instance_details(server)
self.assertTemplateNotUsed(res,
'project/instances/_detail_console.html')
for tab in res.context_data['tab_group'].get_loaded_tabs():
self.assertNotIsInstance(tab, tabs.ConsoleTab)
@helpers.create_stubs({api.nova: ('server_get',)})
def test_instance_details_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:detail',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ("server_get",)})
def test_instance_details_unauthorized(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id)\
.AndRaise(self.exceptions.nova_unauthorized)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:detail',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_instance_details_flavor_not_found(self):
server = self.servers.first()
res = self._get_instance_details(server, flavor_exception=True)
self.assertTemplateUsed(res,
'project/instances/_detail_overview.html')
self.assertContains(res, "Not available")
@helpers.create_stubs({api.nova: ('server_console_output',)})
def test_instance_log(self):
server = self.servers.first()
CONSOLE_OUTPUT = 'output'
api.nova.server_console_output(IsA(http.HttpRequest),
server.id, tail_length=None) \
.AndReturn(CONSOLE_OUTPUT)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:console',
args=[server.id])
tg = tabs.InstanceDetailTabs(self.request, instance=server)
qs = "?%s=%s" % (tg.param_name, tg.get_tab("log").get_id())
res = self.client.get(url + qs)
self.assertNoMessages()
self.assertIsInstance(res, http.HttpResponse)
self.assertContains(res, CONSOLE_OUTPUT)
@helpers.create_stubs({api.nova: ('server_console_output',)})
def test_instance_log_exception(self):
server = self.servers.first()
api.nova.server_console_output(IsA(http.HttpRequest),
server.id, tail_length=None) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:console',
args=[server.id])
tg = tabs.InstanceDetailTabs(self.request, instance=server)
qs = "?%s=%s" % (tg.param_name, tg.get_tab("log").get_id())
res = self.client.get(url + qs)
self.assertContains(res, "Unable to get log for")
def test_instance_log_invalid_input(self):
server = self.servers.first()
url = reverse('horizon:project:instances:console',
args=[server.id])
tg = tabs.InstanceDetailTabs(self.request, instance=server)
for length in ["-5", "x"]:
qs = "?%s=%s&length=%s" % (tg.param_name,
tg.get_tab("log").get_id(),
length)
res = self.client.get(url + qs)
self.assertContains(res, "Unable to get log for")
def test_instance_vnc(self):
server = self.servers.first()
CONSOLE_OUTPUT = '/vncserver'
CONSOLE_TITLE = '&title=%s(%s)' % (server.name, server.id)
CONSOLE_URL = CONSOLE_OUTPUT + CONSOLE_TITLE
console_mock = self.mox.CreateMock(api.nova.VNCConsole)
console_mock.url = CONSOLE_OUTPUT
self.mox.StubOutWithMock(api.nova, 'server_get')
self.mox.StubOutWithMock(console, 'get_console')
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
console.get_console(IgnoreArg(), 'VNC', server) \
.AndReturn(('VNC', CONSOLE_URL))
self.mox.ReplayAll()
url = reverse('horizon:project:instances:vnc',
args=[server.id])
res = self.client.get(url)
redirect = CONSOLE_URL
self.assertRedirectsNoFollow(res, redirect)
def test_instance_vnc_error(self):
server = self.servers.first()
self.mox.StubOutWithMock(api.nova, 'server_get')
self.mox.StubOutWithMock(console, 'get_console')
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
console.get_console(IgnoreArg(), 'VNC', server) \
.AndRaise(exceptions.NotAvailable('console'))
self.mox.ReplayAll()
url = reverse('horizon:project:instances:vnc',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_instance_spice(self):
server = self.servers.first()
CONSOLE_OUTPUT = '/spiceserver'
CONSOLE_TITLE = '&title=%s(%s)' % (server.name, server.id)
CONSOLE_URL = CONSOLE_OUTPUT + CONSOLE_TITLE
console_mock = self.mox.CreateMock(api.nova.SPICEConsole)
console_mock.url = CONSOLE_OUTPUT
self.mox.StubOutWithMock(console, 'get_console')
self.mox.StubOutWithMock(api.nova, 'server_get')
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
console.get_console(IgnoreArg(), 'SPICE', server) \
.AndReturn(('SPICE', CONSOLE_URL))
self.mox.ReplayAll()
url = reverse('horizon:project:instances:spice',
args=[server.id])
res = self.client.get(url)
redirect = CONSOLE_URL
self.assertRedirectsNoFollow(res, redirect)
def test_instance_spice_exception(self):
server = self.servers.first()
self.mox.StubOutWithMock(console, 'get_console')
self.mox.StubOutWithMock(api.nova, 'server_get')
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
console.get_console(IgnoreArg(), 'SPICE', server) \
.AndRaise(exceptions.NotAvailable('console'))
self.mox.ReplayAll()
url = reverse('horizon:project:instances:spice',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_instance_rdp(self):
server = self.servers.first()
CONSOLE_OUTPUT = '/rdpserver'
CONSOLE_TITLE = '&title=%s(%s)' % (server.name, server.id)
CONSOLE_URL = CONSOLE_OUTPUT + CONSOLE_TITLE
console_mock = self.mox.CreateMock(api.nova.RDPConsole)
console_mock.url = CONSOLE_OUTPUT
self.mox.StubOutWithMock(console, 'get_console')
self.mox.StubOutWithMock(api.nova, 'server_get')
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
console.get_console(IgnoreArg(), 'RDP', server) \
.AndReturn(('RDP', CONSOLE_URL))
self.mox.ReplayAll()
url = reverse('horizon:project:instances:rdp',
args=[server.id])
res = self.client.get(url)
redirect = CONSOLE_URL
self.assertRedirectsNoFollow(res, redirect)
def test_instance_rdp_exception(self):
server = self.servers.first()
self.mox.StubOutWithMock(console, 'get_console')
self.mox.StubOutWithMock(api.nova, 'server_get')
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
console.get_console(IgnoreArg(), 'RDP', server) \
.AndRaise(exceptions.NotAvailable('console'))
self.mox.ReplayAll()
url = reverse('horizon:project:instances:rdp',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_get',
'snapshot_create',
'server_list',
'flavor_list',
'server_delete'),
api.glance: ('image_list_detailed',)})
def test_create_instance_snapshot(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.snapshot_create(IsA(http.HttpRequest),
server.id,
"snapshot1").AndReturn(self.snapshots.first())
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None).AndReturn([[], False,
False])
self.mox.ReplayAll()
formData = {'instance_id': server.id,
'method': 'CreateSnapshot',
'name': 'snapshot1'}
url = reverse('horizon:project:images:snapshots:create',
args=[server.id])
redir_url = reverse('horizon:project:images:index')
res = self.client.post(url, formData)
self.assertRedirects(res, redir_url)
@django.test.utils.override_settings(
OPENSTACK_ENABLE_PASSWORD_RETRIEVE=False)
def test_instances_index_retrieve_password_action_disabled(self):
self. _test_instances_index_retrieve_password_action()
@django.test.utils.override_settings(
OPENSTACK_ENABLE_PASSWORD_RETRIEVE=True)
def test_instances_index_retrieve_password_action_enabled(self):
self._test_instances_index_retrieve_password_action()
@helpers.create_stubs({
api.nova: ('flavor_list', 'server_list', 'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('floating_ip_simple_associate_supported',
'floating_ip_supported',
'servers_update_addresses',),
})
def _test_instances_index_retrieve_password_action(self):
servers = self.servers.list()
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:index')
res = self.client.get(url)
for server in servers:
_action_id = ''.join(["instances__row_",
server.id,
"__action_decryptpassword"])
if settings.OPENSTACK_ENABLE_PASSWORD_RETRIEVE and \
server.status == "ACTIVE" and \
server.key_name is not None:
self.assertContains(res, _action_id)
else:
self.assertNotContains(res, _action_id)
@helpers.create_stubs({api.nova: ('get_password',)})
def test_decrypt_instance_password(self):
server = self.servers.first()
enc_password = "azerty"
api.nova.get_password(IsA(http.HttpRequest), server.id)\
.AndReturn(enc_password)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:decryptpassword',
args=[server.id,
server.key_name])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/instances/decryptpassword.html')
@helpers.create_stubs({api.nova: ('get_password',)})
def test_decrypt_instance_get_exception(self):
server = self.servers.first()
keypair = self.keypairs.first()
api.nova.get_password(IsA(http.HttpRequest), server.id)\
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:decryptpassword',
args=[server.id,
keypair])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
instance_update_get_stubs = {
api.nova: ('server_get',),
api.network: ('security_group_list',
'server_security_groups',)}
@helpers.create_stubs(instance_update_get_stubs)
def test_instance_update_get(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.network.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn([])
self.mox.ReplayAll()
url = reverse('horizon:project:instances:update', args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@helpers.create_stubs(instance_update_get_stubs)
def test_instance_update_get_server_get_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:update',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
def _instance_update_post(self, server_id, server_name, secgroups):
default_role_field_name = 'default_' + \
workflows.update_instance.INSTANCE_SEC_GROUP_SLUG + '_role'
formData = {'name': server_name,
default_role_field_name: 'member',
SEC_GROUP_ROLE_PREFIX + 'member': secgroups}
url = reverse('horizon:project:instances:update',
args=[server_id])
return self.client.post(url, formData)
instance_update_post_stubs = {
api.nova: ('server_get', 'server_update'),
api.network: ('security_group_list',
'server_security_groups',
'server_update_security_groups')}
@helpers.create_stubs(instance_update_post_stubs)
def test_instance_update_post(self):
server = self.servers.first()
secgroups = self.security_groups.list()[:3]
server_groups = [secgroups[0], secgroups[1]]
wanted_groups = [secgroups[1].id, secgroups[2].id]
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(secgroups)
api.network.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn(server_groups)
api.nova.server_update(IsA(http.HttpRequest),
server.id,
server.name).AndReturn(server)
api.network.server_update_security_groups(IsA(http.HttpRequest),
server.id,
wanted_groups)
self.mox.ReplayAll()
res = self._instance_update_post(server.id, server.name, wanted_groups)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs(instance_update_post_stubs)
def test_instance_update_post_api_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.network.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn([])
api.nova.server_update(IsA(http.HttpRequest), server.id, server.name) \
.AndRaise(self.exceptions.nova)
api.network.server_update_security_groups(
IsA(http.HttpRequest), server.id, [])
self.mox.ReplayAll()
res = self._instance_update_post(server.id, server.name, [])
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs(instance_update_post_stubs)
def test_instance_update_post_secgroup_api_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.network.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn([])
api.nova.server_update(IsA(http.HttpRequest),
server.id,
server.name).AndReturn(server)
api.network.server_update_security_groups(
IsA(http.HttpRequest),
server.id, []).AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self._instance_update_post(server.id, server.name, [])
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_snapshot_list',
'volume_list',),
api.neutron: ('network_list',
'profile_list',),
api.glance: ('image_list_detailed',)})
def test_launch_instance_get(self,
expect_password_fields=True,
block_device_mapping_v2=True,
custom_flavor_sort=None,
only_one_network=False,
disk_config=True,
config_drive=True,
test_with_profile=False):
image = self.images.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(block_device_mapping_v2)
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn([])
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
if only_one_network:
api.neutron.network_list(IsA(http.HttpRequest),
shared=True).AndReturn([])
else:
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(disk_config)
api.nova.extension_supported(
'ConfigDrive', IsA(http.HttpRequest)).AndReturn(config_drive)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:launch')
params = urlencode({"source_type": "image_id",
"source_id": image.id})
res = self.client.get("%s?%s" % (url, params))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(res.context['workflow'].name,
workflows.LaunchInstance.name)
step = workflow.get_step("setinstancedetailsaction")
self.assertEqual(step.action.initial['image_id'], image.id)
self.assertQuerysetEqual(
workflow.steps,
['<SetInstanceDetails: setinstancedetailsaction>',
'<SetAccessControls: setaccesscontrolsaction>',
'<SetNetwork: setnetworkaction>',
'<PostCreationStep: customizeaction>',
'<SetAdvanced: setadvancedaction>'])
if custom_flavor_sort == 'id':
# Reverse sorted by id
sorted_flavors = (
('eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee', 'm1.metadata'),
('dddddddd-dddd-dddd-dddd-dddddddddddd', 'm1.secret'),
('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'm1.massive'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'm1.tiny'),
)
elif custom_flavor_sort == 'name':
sorted_flavors = (
('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'm1.massive'),
('eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee', 'm1.metadata'),
('dddddddd-dddd-dddd-dddd-dddddddddddd', 'm1.secret'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'm1.tiny'),
)
elif custom_flavor_sort == helpers.my_custom_sort:
sorted_flavors = (
('dddddddd-dddd-dddd-dddd-dddddddddddd', 'm1.secret'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'm1.tiny'),
('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'm1.massive'),
('eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee', 'm1.metadata'),
)
else:
# Default - sorted by RAM
sorted_flavors = (
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'm1.tiny'),
('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'm1.massive'),
('dddddddd-dddd-dddd-dddd-dddddddddddd', 'm1.secret'),
('eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee', 'm1.metadata'),
)
select_options = '\n'.join([
'<option value="%s">%s</option>' % (f[0], f[1])
for f in sorted_flavors
])
self.assertContains(res, select_options)
password_field_label = 'Admin Pass'
if expect_password_fields:
self.assertContains(res, password_field_label)
else:
self.assertNotContains(res, password_field_label)
boot_from_image_field_label = 'Boot from image (creates a new volume)'
if block_device_mapping_v2:
self.assertContains(res, boot_from_image_field_label)
else:
self.assertNotContains(res, boot_from_image_field_label)
checked_label = '<label for="id_network_0"><input checked="checked"'
if only_one_network:
self.assertContains(res, checked_label)
else:
self.assertNotContains(res, checked_label)
disk_config_field_label = 'Disk Partition'
if disk_config:
self.assertContains(res, disk_config_field_label)
else:
self.assertNotContains(res, disk_config_field_label)
config_drive_field_label = 'Configuration Drive'
if config_drive:
self.assertContains(res, config_drive_field_label)
else:
self.assertNotContains(res, config_drive_field_label)
@django.test.utils.override_settings(
OPENSTACK_HYPERVISOR_FEATURES={'can_set_password': False})
def test_launch_instance_get_without_password(self):
self.test_launch_instance_get(expect_password_fields=False)
def test_launch_instance_get_no_block_device_mapping_v2_supported(self):
self.test_launch_instance_get(block_device_mapping_v2=False)
def test_launch_instance_get_no_disk_config_supported(self):
self.test_launch_instance_get(disk_config=False)
def test_launch_instance_get_no_config_drive_supported(self):
self.test_launch_instance_get(config_drive=False)
@django.test.utils.override_settings(
CREATE_INSTANCE_FLAVOR_SORT={
'key': 'id',
'reverse': True,
})
def test_launch_instance_get_custom_flavor_sort_by_id(self):
self.test_launch_instance_get(custom_flavor_sort='id')
@django.test.utils.override_settings(
CREATE_INSTANCE_FLAVOR_SORT={
'key': 'name',
'reverse': False,
})
def test_launch_instance_get_custom_flavor_sort_by_name(self):
self.test_launch_instance_get(custom_flavor_sort='name')
@django.test.utils.override_settings(
CREATE_INSTANCE_FLAVOR_SORT={
'key': helpers.my_custom_sort,
'reverse': False,
})
def test_launch_instance_get_custom_flavor_sort_by_callable(self):
self.test_launch_instance_get(
custom_flavor_sort=helpers.my_custom_sort)
@django.test.utils.override_settings(
CREATE_INSTANCE_FLAVOR_SORT={
'key': 'no_such_column',
'reverse': False,
})
def test_launch_instance_get_custom_flavor_sort_by_missing_column(self):
self.test_launch_instance_get(custom_flavor_sort='no_such_column')
def test_launch_instance_get_with_only_one_network(self):
self.test_launch_instance_get(only_one_network=True)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_instance_get_with_profile(self):
self.test_launch_instance_get(test_with_profile=True)
@helpers.create_stubs({api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_snapshot_list',
'volume_list',),
api.neutron: ('network_list',
'profile_list',),
api.glance: ('image_list_detailed',)})
def test_launch_instance_get_bootable_volumes(self,
block_device_mapping_v2=True,
only_one_network=False,
disk_config=True,
config_drive=True,
test_with_profile=False):
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(block_device_mapping_v2)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
if only_one_network:
api.neutron.network_list(IsA(http.HttpRequest),
shared=True).AndReturn([])
else:
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(disk_config)
api.nova.extension_supported(
'ConfigDrive', IsA(http.HttpRequest)).AndReturn(config_drive)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:launch')
res = self.client.get(url)
bootable_volumes = [v.id for v in self.volumes.list()
if (v.bootable == 'true' and
v.status == 'available')]
volume_sources = (res.context_data['workflow'].steps[0].
action.fields['volume_id'].choices)
volume_sources_ids = []
for volume in volume_sources:
self.assertTrue(volume[0].split(":vol")[0] in bootable_volumes or
volume[0] == '')
if volume[0] != '':
volume_sources_ids.append(volume[0].split(":vol")[0])
for volume in bootable_volumes:
self.assertTrue(volume in volume_sources_ids)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_instance_get_bootable_volumes_with_profile(self):
self.test_launch_instance_get_bootable_volumes(test_with_profile=True)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'server_create',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post(self,
disk_config=True,
config_drive=True,
test_with_profile=False,
test_with_multi_nics=False):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port_one = self.ports.first()
nics = [{"port-id": port_one.id}]
api.neutron.profile_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.neutron.port_create(IsA(http.HttpRequest),
self.networks.first().id,
policy_profile_id=policy_profile_id) \
.AndReturn(port_one)
if test_with_multi_nics:
port_two = self.ports.get(name="port5")
nics = [{"port-id": port_one.id},
{"port-id": port_two.id}]
# Add a second port to test multiple nics
api.neutron.port_create(IsA(http.HttpRequest),
self.networks.get(name="net4")['id'],
policy_profile_id=policy_profile_id) \
.AndReturn(port_two)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(disk_config)
api.nova.extension_supported(
'ConfigDrive', IsA(http.HttpRequest)).AndReturn(config_drive)
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn([])
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
if disk_config:
disk_config_value = u'AUTO'
else:
disk_config_value = None
if config_drive:
config_drive_value = True
else:
config_drive_value = None
api.nova.server_create(IsA(http.HttpRequest),
server.name,
image.id,
flavor.id,
keypair.name,
customization_script,
[sec_group.name],
block_device_mapping=None,
block_device_mapping_v2=None,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass=u'',
disk_config=disk_config_value,
config_drive=config_drive_value)
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': image.id,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'availability_zone': avail_zone.zoneName,
'volume_type': '',
'network': self.networks.first().id,
'count': 1}
if disk_config:
form_data['disk_config'] = 'AUTO'
if config_drive:
form_data['config_drive'] = True
if test_with_profile:
form_data['profile'] = self.policy_profiles.first().id
if test_with_multi_nics:
form_data['network'] = [self.networks.first().id,
self.networks.get(name="net4")['id']]
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_launch_instance_post_no_disk_config_supported(self):
self.test_launch_instance_post(disk_config=False)
def test_launch_instance_post_no_config_drive_supported(self):
self.test_launch_instance_post(config_drive=False)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_instance_post_with_profile(self):
self.test_launch_instance_post(test_with_profile=True)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_instance_post_with_profile_and_multi_nics(self):
self.test_launch_instance_post(test_with_profile=True,
test_with_multi_nics=True)
def _test_launch_instance_post_with_profile_and_port_error(
self,
test_with_multi_nics=False,
):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port_one = self.ports.first()
api.neutron.profile_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
if test_with_multi_nics:
api.neutron.port_create(IsA(http.HttpRequest),
self.networks.first().id,
policy_profile_id=policy_profile_id) \
.AndReturn(port_one)
# Add a second port which has the exception to test multiple nics
api.neutron.port_create(IsA(http.HttpRequest),
self.networks.get(name="net4")['id'],
policy_profile_id=policy_profile_id) \
.AndRaise(self.exceptions.neutron)
# Delete the first port
api.neutron.port_delete(IsA(http.HttpRequest),
port_one.id)
else:
api.neutron.port_create(IsA(http.HttpRequest),
self.networks.first().id,
policy_profile_id=policy_profile_id) \
.AndRaise(self.exceptions.neutron)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn([])
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': image.id,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'availability_zone': avail_zone.zoneName,
'volume_type': '',
'network': self.networks.first().id,
'count': 1,
'disk_config': 'AUTO',
'config_drive': True,
'profile': self.policy_profiles.first().id}
if test_with_multi_nics:
form_data['network'] = [self.networks.first().id,
self.networks.get(name="net4")['id']]
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create',
'port_delete',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post_with_profile_and_port_error(self):
self._test_launch_instance_post_with_profile_and_port_error()
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create',
'port_delete',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_lnch_inst_post_w_profile_and_multi_nics_w_port_error(self):
self._test_launch_instance_post_with_profile_and_port_error(
test_with_multi_nics=True)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'server_create',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post_boot_from_volume(
self,
test_with_profile=False,
test_with_bdmv2=False
):
flavor = self.flavors.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
if test_with_bdmv2:
volume_source_id = volume.id.split(':')[0]
block_device_mapping = None
block_device_mapping_2 = [
{'device_name': u'vda',
'source_type': 'volume',
'destination_type': 'volume',
'delete_on_termination': 0,
'uuid': volume_source_id,
'boot_index': '0',
'volume_size': 1
}
]
else:
block_device_mapping = {device_name: u"%s::0" % volume_choice}
block_device_mapping_2 = None
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(test_with_bdmv2)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
if test_with_profile:
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port = self.ports.first()
api.neutron.profile_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.neutron.port_create(
IsA(http.HttpRequest),
self.networks.first().id,
policy_profile_id=policy_profile_id).AndReturn(port)
nics = [{"port-id": port.id}]
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(test_with_bdmv2)
api.nova.server_create(IsA(http.HttpRequest),
server.name,
'',
flavor.id,
keypair.name,
customization_script,
[sec_group.name],
block_device_mapping=block_device_mapping,
block_device_mapping_v2=block_device_mapping_2,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass=u'',
disk_config=u'AUTO',
config_drive=True)
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'volume_id',
'source_id': volume_choice,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'availability_zone': avail_zone.zoneName,
'volume_size': '1',
'volume_id': volume_choice,
'device_name': device_name,
'network': self.networks.first().id,
'count': 1,
'disk_config': 'AUTO',
'config_drive': True}
if test_with_profile:
form_data['profile'] = self.policy_profiles.first().id
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_launch_instance_post_boot_from_volume_with_bdmv2(self):
self.test_launch_instance_post_boot_from_volume(test_with_bdmv2=True)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_instance_post_boot_from_volume_with_profile(self):
self.test_launch_instance_post_boot_from_volume(test_with_profile=True)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create'),
api.nova: ('server_create',
'extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'tenant_absolute_limits',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post_no_images_available_boot_from_volume(
self,
test_with_profile=False,
):
flavor = self.flavors.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
block_device_mapping = {device_name: u"%s::0" % volume_choice}
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
if test_with_profile:
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port = self.ports.first()
api.neutron.profile_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.neutron.port_create(
IsA(http.HttpRequest),
self.networks.first().id,
policy_profile_id=policy_profile_id).AndReturn(port)
nics = [{"port-id": port.id}]
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(False)
api.nova.server_create(IsA(http.HttpRequest),
server.name,
'',
flavor.id,
keypair.name,
customization_script,
[sec_group.name],
block_device_mapping=block_device_mapping,
block_device_mapping_v2=None,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass=u'',
disk_config='MANUAL',
config_drive=True)
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'volume_id',
# 'image_id': '',
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'availability_zone': avail_zone.zoneName,
'network': self.networks.first().id,
'volume_type': 'volume_id',
'volume_id': volume_choice,
'device_name': device_name,
'count': 1,
'disk_config': 'MANUAL',
'config_drive': True}
if test_with_profile:
form_data['profile'] = self.policy_profiles.first().id
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_lnch_inst_post_no_images_avail_boot_from_vol_with_profile(self):
self.test_launch_instance_post_no_images_available_boot_from_volume(
test_with_profile=True)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'tenant_absolute_limits',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post_no_images_available(self,
test_with_profile=False):
flavor = self.flavors.first()
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([[], False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
if test_with_profile:
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn([])
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': '',
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'availability_zone': avail_zone.zoneName,
'volume_type': '',
'count': 1}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertFormErrors(res, 1, "You must select an image.")
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_instance_post_no_images_available_with_profile(self):
self.test_launch_instance_post_no_images_available(
test_with_profile=True)
@helpers.create_stubs({
api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'server_create',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post_boot_from_snapshot(
self,
test_with_profile=False,
):
flavor = self.flavors.first()
keypair = self.keypairs.first()
server = self.servers.first()
avail_zone = self.availability_zones.first()
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([[], False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn([])
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
self.mox.ReplayAll()
bad_snapshot_id = 'a-bogus-id'
form_data = {'flavor': flavor.id,
'source_type': 'instance_snapshot_id',
'instance_snapshot_id': bad_snapshot_id,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'availability_zone': avail_zone.zoneName,
'network': self.networks.first().id,
'volume_id': '',
'volume_snapshot_id': '',
'image_id': '',
'device_name': 'vda',
'count': 1,
'profile': '',
'customization_script': ''}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertFormErrors(res, 1, "You must select a snapshot.")
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
api.network: ('security_group_list',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',)})
def test_launch_flavorlist_error(self,
test_with_profile=False):
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn([])
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:launch')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_flavorlist_error_with_profile(self):
self.test_launch_flavorlist_error(test_with_profile=True)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'server_create',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_form_keystone_exception(self,
test_with_profile=False):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE)]
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn(volumes)
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.nova.keypair_list(IgnoreArg()).AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port = self.ports.first()
api.neutron.profile_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.neutron.port_create(
IsA(http.HttpRequest),
self.networks.first().id,
policy_profile_id=policy_profile_id).AndReturn(port)
nics = [{"port-id": port.id}]
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
api.nova.server_create(IsA(http.HttpRequest),
server.name,
image.id,
flavor.id,
keypair.name,
customization_script,
[sec_group.name],
block_device_mapping=None,
block_device_mapping_v2=None,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass='password',
disk_config='AUTO',
config_drive=False) \
.AndRaise(self.exceptions.keystone)
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'source_id': image.id,
'volume_size': '1',
'image_id': image.id,
'availability_zone': avail_zone.zoneName,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'volume_type': '',
'network': self.networks.first().id,
'count': 1,
'admin_pass': 'password',
'confirm_admin_pass': 'password',
'disk_config': 'AUTO',
'config_drive': False}
if test_with_profile:
form_data['profile'] = self.policy_profiles.first().id
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_form_keystone_exception_with_profile(self):
self.test_launch_form_keystone_exception(test_with_profile=True)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_form_instance_count_error(self,
test_with_profile=False):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': image.id,
'availability_zone': avail_zone.zoneName,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'volume_type': 'volume_id',
'volume_id': volume_choice,
'device_name': device_name,
'count': 0}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertContains(res, "greater than or equal to 1")
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def _test_launch_form_count_error(self, resource,
avail, test_with_profile=False):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
quota_usages = self.quota_usages.first()
if resource == 'both':
quota_usages['cores']['available'] = avail
quota_usages['ram']['available'] = 512
else:
quota_usages[resource]['available'] = avail
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': image.id,
'availability_zone': avail_zone.zoneName,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'volume_type': 'volume_id',
'volume_id': volume_choice,
'device_name': device_name,
'count': 2}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
if resource == 'ram':
msg = ("The following requested resource(s) exceed quota(s): "
"RAM(Available: %s" % avail)
if resource == 'cores':
msg = ("The following requested resource(s) exceed quota(s): "
"Cores(Available: %s" % avail)
if resource == 'both':
msg = ("The following requested resource(s) exceed quota(s): "
"Cores(Available: %(avail)s, Requested: 2), RAM(Available: "
"512, Requested: 1024)" % {'avail': avail})
self.assertContains(res, msg)
def test_launch_form_cores_count_error(self):
self._test_launch_form_count_error('cores', 1, test_with_profile=False)
def test_launch_form_ram_count_error(self):
self._test_launch_form_count_error('ram', 512, test_with_profile=False)
def test_launch_form_ram_cores_count_error(self):
self._test_launch_form_count_error('both', 1, test_with_profile=False)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_form_instance_count_error_with_profile(self):
self.test_launch_form_instance_count_error(test_with_profile=True)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def _test_launch_form_instance_requirement_error(self, image, flavor,
test_with_profile=False):
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': image.id,
'availability_zone': avail_zone.zoneName,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'volume_type': 'volume_id',
'volume_id': volume_choice,
'device_name': device_name,
'count': 1}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
msg = "The flavor '%s' is too small" % flavor.name
self.assertContains(res, msg)
def test_launch_form_instance_requirement_error_disk(
self,
test_with_profile=False,
):
flavor = self.flavors.first()
image = self.images.first()
image.min_ram = flavor.ram
image.min_disk = flavor.disk + 1
self._test_launch_form_instance_requirement_error(image, flavor,
test_with_profile)
def test_launch_form_instance_requirement_error_ram(
self,
test_with_profile=False,
):
flavor = self.flavors.first()
image = self.images.first()
image.min_ram = flavor.ram + 1
image.min_disk = flavor.disk
self._test_launch_form_instance_requirement_error(image, flavor,
test_with_profile)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_form_instance_requirement_error_disk_with_profile(self):
self.test_launch_form_instance_requirement_error_disk(
test_with_profile=True)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_form_instance_requirement_error_ram_with_profile(self):
self.test_launch_form_instance_requirement_error_ram(
test_with_profile=True)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def _test_launch_form_instance_show_device_name(self, device_name,
widget_class,
widget_attrs):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
volume_choice = "%s:vol" % volume.id
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)).AndReturn(True)
api.nova.flavor_list(
IsA(http.HttpRequest)).AndReturn(self.flavors.list())
api.nova.keypair_list(
IsA(http.HttpRequest)).AndReturn(self.keypairs.list())
api.network.security_group_list(
IsA(http.HttpRequest)).AndReturn(self.security_groups.list())
api.nova.availability_zone_list(
IsA(http.HttpRequest)).AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}).AndReturn(
[self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}).AndReturn([[], False, False])
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(self.networks.list()[:1])
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn(self.networks.list()[1:])
api.nova.extension_supported(
'DiskConfig', IsA(http.HttpRequest)).AndReturn(True)
api.nova.extension_supported(
'ConfigDrive', IsA(http.HttpRequest)).AndReturn(True)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.nova.flavor_list(
IsA(http.HttpRequest)).AndReturn(self.flavors.list())
api.nova.tenant_absolute_limits(
IsA(http.HttpRequest)).AndReturn(self.limits['absolute'])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)).AndReturn(quota_usages)
api.nova.flavor_list(
IsA(http.HttpRequest)).AndReturn(self.flavors.list())
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'volume_image_id',
'image_id': image.id,
'availability_zone': avail_zone.zoneName,
'keypair': keypair.name,
'name': server.name,
'customization_script': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'volume_type': 'volume_id',
'volume_id': volume_choice,
'volume_size': max(
image.min_disk, image.size / 1024 ** 3),
'device_name': device_name,
'count': 1}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
widget_content = widget_class().render(**widget_attrs)
# In django 1.4, the widget's html attributes are not always rendered
# in the same order and checking the fully rendered widget fails.
for widget_part in widget_content.split():
self.assertContains(res, widget_part)
@django.test.utils.override_settings(
OPENSTACK_HYPERVISOR_FEATURES={'can_set_mount_point': True})
def test_launch_form_instance_device_name_showed(self):
self._test_launch_form_instance_show_device_name(
u'vda', widgets.TextInput, {
'name': 'device_name', 'value': 'vda',
'attrs': {'id': 'id_device_name'}}
)
@django.test.utils.override_settings(
OPENSTACK_HYPERVISOR_FEATURES={'can_set_mount_point': False})
def test_launch_form_instance_device_name_hidden(self):
self._test_launch_form_instance_show_device_name(
u'', widgets.HiddenInput, {
'name': 'device_name', 'value': '',
'attrs': {'id': 'id_device_name'}}
)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def _test_launch_form_instance_volume_size(self, image, volume_size, msg,
test_with_profile=False,
volumes=None):
flavor = self.flavors.get(name='m1.massive')
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
device_name = u'vda'
quota_usages = self.quota_usages.first()
quota_usages['cores']['available'] = 2000
if volumes is not None:
quota_usages['volumes']['available'] = volumes
else:
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
form_data = {
'flavor': flavor.id,
'source_type': 'volume_image_id',
'image_id': image.id,
'availability_zone': avail_zone.zoneName,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'volume_size': volume_size,
'device_name': device_name,
'count': 1
}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertContains(res, msg)
def test_launch_form_instance_volume_size_error(self,
test_with_profile=False):
image = self.images.get(name='protected_images')
volume_size = image.min_disk / 2
msg = ("The Volume size is too small for the '%s' image" %
image.name)
self._test_launch_form_instance_volume_size(image, volume_size, msg,
test_with_profile)
def test_launch_form_instance_non_int_volume_size(self,
test_with_profile=False):
image = self.images.get(name='protected_images')
msg = "Enter a whole number."
self._test_launch_form_instance_volume_size(image, 1.5, msg,
test_with_profile)
def test_launch_form_instance_volume_exceed_quota(self):
image = self.images.get(name='protected_images')
msg = "Requested volume exceeds quota: Available: 0, Requested: 1"
self._test_launch_form_instance_volume_size(image, image.min_disk,
msg, False, 0)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_form_instance_volume_size_error_with_profile(self):
self.test_launch_form_instance_volume_size_error(
test_with_profile=True)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_form_instance_non_int_volume_size_with_profile(self):
self.test_launch_form_instance_non_int_volume_size(
test_with_profile=True)
@helpers.create_stubs({
api.nova: ('flavor_list', 'server_list', 'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('floating_ip_simple_associate_supported',
'floating_ip_supported',
'servers_update_addresses',),
})
def test_launch_button_disabled_when_quota_exceeded(self):
servers = self.servers.list()
limits = self.limits['absolute']
limits['totalInstancesUsed'] = limits['maxTotalInstances']
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(limits)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
launch = tables.LaunchLink()
url = launch.get_link_url()
classes = list(launch.get_default_classes()) + list(launch.classes)
link_name = "%s (%s)" % (unicode(launch.verbose_name),
"Quota exceeded")
res = self.client.get(INDEX_URL)
expected_string = encoding.smart_str(u'''
<a href="%s" title="%s" class="%s disabled"
data-update-url=
"/project/instances/?action=launch&table=instances"
id="instances__action_launch">
<span class="fa fa-cloud-upload"></span>%s</a>
''' % (url, link_name, " ".join(classes), link_name), res._charset)
self.assertContains(res, expected_string, html=True,
msg_prefix="The launch button is not disabled")
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'tenant_absolute_limits',
'server_create',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_with_empty_device_name_allowed(self):
flavor = self.flavors.get(name='m1.massive')
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
nics = [{'net-id': self.networks.first().id, 'v4-fixed-ip': ''}]
device_name = u''
quota_usages = self.quota_usages.first()
quota_usages['cores']['available'] = 2000
device_mapping_v2 = [{'device_name': None, # device_name must be None
'source_type': 'image',
'destination_type': 'volume',
'delete_on_termination': 0,
'uuid': image.id,
'boot_index': '0',
'volume_size': image.size}]
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.server_create(IsA(http.HttpRequest),
server.name,
'',
flavor.id,
keypair.name,
customization_script,
[sec_group.name],
block_device_mapping=None,
block_device_mapping_v2=device_mapping_v2,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass=u'',
config_drive=False,
disk_config=u'')
self.mox.ReplayAll()
form_data = {
'flavor': flavor.id,
'source_type': 'volume_image_id',
'image_id': image.id,
'availability_zone': avail_zone.zoneName,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'volume_size': image.size,
'device_name': device_name,
'network': self.networks.first().id,
'count': 1
}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
@helpers.create_stubs({
api.nova: ('flavor_list', 'server_list', 'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('floating_ip_simple_associate_supported',
'floating_ip_supported',
'servers_update_addresses',),
})
def test_index_options_after_migrate(self):
servers = self.servers.list()
server = self.servers.first()
server.status = "VERIFY_RESIZE"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertContains(res, "instances__confirm")
self.assertContains(res, "instances__revert")
@helpers.create_stubs({api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'tenant_absolute_limits',),
api.network: ('security_group_list',),
cinder: ('volume_snapshot_list',
'volume_list',),
api.neutron: ('network_list',
'profile_list'),
api.glance: ('image_list_detailed',)})
def test_select_default_keypair_if_only_one(self,
test_with_profile=False):
keypair = self.keypairs.first()
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn([])
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn([keypair])
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:launch')
res = self.client.get(url)
self.assertContains(
res, "<option selected='selected' value='%(key)s'>"
"%(key)s</option>" % {'key': keypair.name},
html=True,
msg_prefix="The default key pair was not selected.")
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_select_default_keypair_if_only_one_with_profile(self):
self.test_select_default_keypair_if_only_one(test_with_profile=True)
@helpers.create_stubs({api.network: ('floating_ip_target_get_by_instance',
'tenant_floating_ip_allocate',
'floating_ip_associate',
'servers_update_addresses',),
api.glance: ('image_list_detailed',),
api.nova: ('server_list',
'flavor_list')})
def test_associate_floating_ip(self):
servers = self.servers.list()
server = servers[0]
fip = self.q_floating_ips.first()
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
api.network.floating_ip_target_get_by_instance(
IsA(http.HttpRequest),
server.id).AndReturn(server.id)
api.network.tenant_floating_ip_allocate(
IsA(http.HttpRequest)).AndReturn(fip)
api.network.floating_ip_associate(
IsA(http.HttpRequest), fip.id, server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__associate-simple__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.network: ('floating_ip_target_list_by_instance',
'tenant_floating_ip_list',
'floating_ip_disassociate',
'servers_update_addresses',),
api.glance: ('image_list_detailed',),
api.nova: ('server_list',
'flavor_list')})
def test_disassociate_floating_ip(self):
servers = self.servers.list()
server = servers[0]
fip = self.q_floating_ips.first()
fip.port_id = server.id
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
api.network.floating_ip_target_list_by_instance(
IsA(http.HttpRequest),
server.id).AndReturn([server.id, ])
api.network.tenant_floating_ip_list(
IsA(http.HttpRequest)).AndReturn([fip])
api.network.floating_ip_disassociate(
IsA(http.HttpRequest), fip.id)
self.mox.ReplayAll()
formData = {'action': 'instances__disassociate__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_get',
'flavor_list',
'tenant_absolute_limits',
'extension_supported')})
def test_instance_resize_get(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:resize', args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
config_drive_field_label = 'Configuration Drive'
self.assertNotContains(res, config_drive_field_label)
option = '<option value="%s">%s</option>'
for flavor in self.flavors.list():
if flavor.id == server.flavor['id']:
self.assertNotContains(res, option % (flavor.id, flavor.name))
else:
self.assertContains(res, option % (flavor.id, flavor.name))
@helpers.create_stubs({api.nova: ('server_get',
'flavor_list',)})
def test_instance_resize_get_server_get_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:resize',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_get',
'flavor_list',)})
def test_instance_resize_get_flavor_list_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:resize',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_get',
'flavor_list',
'flavor_get',
'tenant_absolute_limits',
'extension_supported')})
def test_instance_resize_get_current_flavor_not_found(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor['id']) \
.AndRaise(self.exceptions.nova)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:resize', args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
def _instance_resize_post(self, server_id, flavor_id, disk_config):
formData = {'flavor': flavor_id,
'default_role': 'member',
'disk_config': disk_config}
url = reverse('horizon:project:instances:resize',
args=[server_id])
return self.client.post(url, formData)
instance_resize_post_stubs = {
api.nova: ('server_get', 'server_resize',
'flavor_list', 'flavor_get',
'extension_supported')}
@helpers.create_stubs(instance_resize_post_stubs)
def test_instance_resize_post(self):
server = self.servers.first()
flavors = [flavor for flavor in self.flavors.list()
if flavor.id != server.flavor['id']]
flavor = flavors[0]
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.server_resize(IsA(http.HttpRequest), server.id, flavor.id,
'AUTO').AndReturn([])
self.mox.ReplayAll()
res = self._instance_resize_post(server.id, flavor.id, u'AUTO')
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs(instance_resize_post_stubs)
def test_instance_resize_post_api_exception(self):
server = self.servers.first()
flavors = [flavor for flavor in self.flavors.list()
if flavor.id != server.flavor['id']]
flavor = flavors[0]
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.server_resize(IsA(http.HttpRequest), server.id, flavor.id,
'AUTO') \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self._instance_resize_post(server.id, flavor.id, 'AUTO')
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.nova: ('extension_supported',)})
def test_rebuild_instance_get(self, expect_password_fields=True):
server = self.servers.first()
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:rebuild', args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/instances/rebuild.html')
password_field_label = 'Rebuild Password'
if expect_password_fields:
self.assertContains(res, password_field_label)
else:
self.assertNotContains(res, password_field_label)
@django.test.utils.override_settings(
OPENSTACK_HYPERVISOR_FEATURES={'can_set_password': False})
def test_rebuild_instance_get_without_set_password(self):
self.test_rebuild_instance_get(expect_password_fields=False)
def _instance_rebuild_post(self, server_id, image_id,
password=None, confirm_password=None,
disk_config=None):
form_data = {'instance_id': server_id,
'image': image_id,
'disk_config': disk_config}
if password is not None:
form_data.update(password=password)
if confirm_password is not None:
form_data.update(confirm_password=confirm_password)
url = reverse('horizon:project:instances:rebuild',
args=[server_id])
return self.client.post(url, form_data)
instance_rebuild_post_stubs = {
api.nova: ('server_rebuild',
'extension_supported'),
api.glance: ('image_list_detailed',)}
@helpers.create_stubs(instance_rebuild_post_stubs)
def test_rebuild_instance_post_with_password(self):
server = self.servers.first()
image = self.images.first()
password = u'testpass'
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.server_rebuild(IsA(http.HttpRequest),
server.id,
image.id,
password,
'AUTO').AndReturn([])
self.mox.ReplayAll()
res = self._instance_rebuild_post(server.id, image.id,
password=password,
confirm_password=password,
disk_config='AUTO')
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs(instance_rebuild_post_stubs)
def test_rebuild_instance_post_with_password_equals_none(self):
server = self.servers.first()
image = self.images.first()
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.server_rebuild(IsA(http.HttpRequest),
server.id,
image.id,
None,
'AUTO') \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self._instance_rebuild_post(server.id, image.id,
password=None,
confirm_password=None,
disk_config='AUTO')
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs(instance_rebuild_post_stubs)
def test_rebuild_instance_post_password_do_not_match(self):
server = self.servers.first()
image = self.images.first()
pass1 = u'somepass'
pass2 = u'notsomepass'
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
self.mox.ReplayAll()
res = self._instance_rebuild_post(server.id, image.id,
password=pass1,
confirm_password=pass2,
disk_config='MANUAL')
self.assertContains(res, "Passwords do not match.")
@helpers.create_stubs(instance_rebuild_post_stubs)
def test_rebuild_instance_post_with_empty_string(self):
server = self.servers.first()
image = self.images.first()
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.server_rebuild(IsA(http.HttpRequest),
server.id,
image.id,
None,
'AUTO').AndReturn([])
self.mox.ReplayAll()
res = self._instance_rebuild_post(server.id, image.id,
password=u'',
confirm_password=u'',
disk_config=u'AUTO')
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs(instance_rebuild_post_stubs)
def test_rebuild_instance_post_api_exception(self):
server = self.servers.first()
image = self.images.first()
password = u'testpass'
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.server_rebuild(IsA(http.HttpRequest),
server.id,
image.id,
password,
'AUTO') \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self._instance_rebuild_post(server.id, image.id,
password=password,
confirm_password=password,
disk_config='AUTO')
self.assertRedirectsNoFollow(res, INDEX_URL)
@django.test.utils.override_settings(API_RESULT_PAGE_SIZE=2)
@helpers.create_stubs({
api.nova: ('flavor_list', 'server_list', 'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('floating_ip_simple_associate_supported',
'floating_ip_supported',
'servers_update_addresses',),
})
def test_index_form_action_with_pagination(self):
"""The form action on the next page should have marker
object from the previous page last element.
"""
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 2)
servers = self.servers.list()[:3]
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.MultipleTimes().AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers[:page_size], True])
api.network.servers_update_addresses(
IsA(http.HttpRequest), servers[:page_size])
api.nova.server_list(IsA(http.HttpRequest), search_opts={
'marker': servers[page_size - 1].id, 'paginate': True}) \
.AndReturn([servers[page_size:], False])
api.network.servers_update_addresses(
IsA(http.HttpRequest), servers[page_size:])
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/instances/index.html')
# get first page with 2 items
self.assertEqual(len(res.context['instances_table'].data), page_size)
# update INDEX_URL with marker object
params = "=".join([tables.InstancesTable._meta.pagination_param,
servers[page_size - 1].id])
next_page_url = "?".join([reverse('horizon:project:instances:index'),
params])
form_action = 'action="%s"' % next_page_url
res = self.client.get(next_page_url)
# get next page with remaining items (item 3)
self.assertEqual(len(res.context['instances_table'].data), 1)
# ensure that marker object exists in form action
self.assertContains(res, form_action, count=1)
@django.test.utils.override_settings(API_RESULT_PAGE_SIZE=2)
@helpers.create_stubs({api.nova: ('server_list',
'flavor_list',
'server_delete',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_terminate_instance_with_pagination(self):
"""Instance should be deleted from
the next page.
"""
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 2)
servers = self.servers.list()[:3]
server = servers[-1]
search_opts = {'marker': servers[page_size - 1].id, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers[page_size:], False])
api.network.servers_update_addresses(IsA(http.HttpRequest),
servers[page_size:])
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
api.nova.server_delete(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
# update INDEX_URL with marker object
params = "=".join([tables.InstancesTable._meta.pagination_param,
servers[page_size - 1].id])
next_page_url = "?".join([reverse('horizon:project:instances:index'),
params])
formData = {'action': 'instances__terminate__%s' % server.id}
res = self.client.post(next_page_url, formData)
self.assertRedirectsNoFollow(res, next_page_url)
self.assertMessageCount(success=1)
class SimpleFile(object):
def __init__(self, name, data, size):
self.name = name
self.data = data
self._size = size
def read(self):
return self.data
def test_clean_file_upload_form_oversize_data(self):
t = workflows.create_instance.CustomizeAction(self.request, {})
upload_str = 'user data'
files = {'script_upload':
self.SimpleFile('script_name',
upload_str,
(16 * 1024) + 1)}
self.assertRaises(
forms.ValidationError,
t.clean_uploaded_files,
'script',
files)
def test_clean_file_upload_form_invalid_data(self):
t = workflows.create_instance.CustomizeAction(self.request, {})
upload_str = '\x81'
files = {'script_upload':
self.SimpleFile('script_name',
upload_str,
sys.getsizeof(upload_str))}
self.assertRaises(
forms.ValidationError,
t.clean_uploaded_files,
'script',
files)
def test_clean_file_upload_form_valid_data(self):
t = workflows.create_instance.CustomizeAction(self.request, {})
precleaned = 'user data'
upload_str = 'user data'
files = {'script_upload':
self.SimpleFile('script_name',
upload_str,
sys.getsizeof(upload_str))}
cleaned = t.clean_uploaded_files('script', files)
self.assertEqual(
cleaned,
precleaned)
class InstanceAjaxTests(helpers.TestCase):
@helpers.create_stubs({api.nova: ("server_get",
"flavor_get",
"extension_supported"),
api.neutron: ("is_extension_supported",)})
def test_row_update(self):
server = self.servers.first()
instance_id = server.id
flavor_id = server.flavor["id"]
flavors = self.flavors.list()
full_flavors = SortedDict([(f.id, f) for f in flavors])
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'security-group')\
.MultipleTimes().AndReturn(True)
api.nova.server_get(IsA(http.HttpRequest), instance_id)\
.AndReturn(server)
api.nova.flavor_get(IsA(http.HttpRequest), flavor_id)\
.AndReturn(full_flavors[flavor_id])
self.mox.ReplayAll()
params = {'action': 'row_update',
'table': 'instances',
'obj_id': instance_id,
}
res = self.client.get('?'.join((INDEX_URL, urlencode(params))),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertContains(res, server.name)
@helpers.create_stubs({api.nova: ("server_get",
"flavor_get",
"extension_supported"),
api.neutron: ("is_extension_supported",)})
def test_row_update_instance_error(self):
server = self.servers.first()
instance_id = server.id
flavor_id = server.flavor["id"]
flavors = self.flavors.list()
full_flavors = SortedDict([(f.id, f) for f in flavors])
server.status = 'ERROR'
server.fault = {"message": "NoValidHost",
"code": 500,
"details": "No valid host was found. \n "
"File \"/mnt/stack/nova/nova/"
"scheduler/filter_scheduler.py\", "
"line 105, in schedule_run_instance\n "
"raise exception.NoValidHost"
"(reason=\"\")\n",
"created": "2013-10-07T00:08:32Z"}
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'security-group')\
.MultipleTimes().AndReturn(True)
api.nova.server_get(IsA(http.HttpRequest), instance_id)\
.AndReturn(server)
api.nova.flavor_get(IsA(http.HttpRequest), flavor_id)\
.AndReturn(full_flavors[flavor_id])
self.mox.ReplayAll()
params = {'action': 'row_update',
'table': 'instances',
'obj_id': instance_id,
}
res = self.client.get('?'.join((INDEX_URL, urlencode(params))),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertContains(res, server.name)
self.assertTrue(res.has_header('X-Horizon-Messages'))
messages = json.loads(res['X-Horizon-Messages'])
self.assertEqual(len(messages), 1)
# (Pdb) messages
# [[u'error', u'Failed to launch instance "server_1": \
# There is not enough capacity for this flavor in the \
# selected availability zone. Try again later or select \
# a different availability zone.', u'']]
self.assertEqual(messages[0][0], 'error')
self.assertTrue(messages[0][1].startswith('Failed'))
@helpers.create_stubs({api.nova: ("server_get",
"flavor_get",
"extension_supported"),
api.neutron: ("is_extension_supported",)})
def test_row_update_flavor_not_found(self):
server = self.servers.first()
instance_id = server.id
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'security-group')\
.MultipleTimes().AndReturn(True)
api.nova.server_get(IsA(http.HttpRequest), instance_id)\
.AndReturn(server)
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"])\
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
params = {'action': 'row_update',
'table': 'instances',
'obj_id': instance_id,
}
res = self.client.get('?'.join((INDEX_URL, urlencode(params))),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertContains(res, server.name)
self.assertContains(res, "Not available")
class ConsoleManagerTests(helpers.TestCase):
def setup_consoles(self):
# Need to refresh with mocks or will fail since mox do not detect
# the api_call() as mocked.
console.CONSOLES = SortedDict([
('VNC', api.nova.server_vnc_console),
('SPICE', api.nova.server_spice_console),
('RDP', api.nova.server_rdp_console),
('SERIAL', api.nova.server_serial_console)])
def _get_console_vnc(self, server):
console_mock = self.mox.CreateMock(api.nova.VNCConsole)
console_mock.url = '/VNC'
self.mox.StubOutWithMock(api.nova, 'server_vnc_console')
api.nova.server_vnc_console(IgnoreArg(), server.id) \
.AndReturn(console_mock)
self.mox.ReplayAll()
self.setup_consoles()
def test_get_console_vnc(self):
server = self.servers.first()
self._get_console_vnc(server)
url = '/VNC&title=%s(%s)' % (server.name, server.id)
data = console.get_console(self.request, 'VNC', server)[1]
self.assertEqual(data, url)
def _get_console_spice(self, server):
console_mock = self.mox.CreateMock(api.nova.SPICEConsole)
console_mock.url = '/SPICE'
self.mox.StubOutWithMock(api.nova, 'server_spice_console')
api.nova.server_spice_console(IgnoreArg(), server.id) \
.AndReturn(console_mock)
self.mox.ReplayAll()
self.setup_consoles()
def test_get_console_spice(self):
server = self.servers.first()
self._get_console_spice(server)
url = '/SPICE&title=%s(%s)' % (server.name, server.id)
data = console.get_console(self.request, 'SPICE', server)[1]
self.assertEqual(data, url)
def _get_console_rdp(self, server):
console_mock = self.mox.CreateMock(api.nova.RDPConsole)
console_mock.url = '/RDP'
self.mox.StubOutWithMock(api.nova, 'server_rdp_console')
api.nova.server_rdp_console(IgnoreArg(), server.id) \
.AndReturn(console_mock)
self.mox.ReplayAll()
self.setup_consoles()
def test_get_console_rdp(self):
server = self.servers.first()
self._get_console_rdp(server)
url = '/RDP&title=%s(%s)' % (server.name, server.id)
data = console.get_console(self.request, 'RDP', server)[1]
self.assertEqual(data, url)
def _get_console_serial(self, server):
console_mock = self.mox.CreateMock(api.nova.SerialConsole)
console_mock.url = '/SERIAL'
self.mox.StubOutWithMock(api.nova, 'server_serial_console')
api.nova.server_serial_console(IgnoreArg(), server.id) \
.AndReturn(console_mock)
self.mox.ReplayAll()
self.setup_consoles()
def test_get_console_serial(self):
server = self.servers.first()
self._get_console_serial(server)
url = '/SERIAL'
data = console.get_console(self.request, 'SERIAL', server)[1]
self.assertEqual(data, url)
def test_get_console_auto_iterate_available(self):
server = self.servers.first()
console_mock = self.mox.CreateMock(api.nova.RDPConsole)
console_mock.url = '/RDP'
self.mox.StubOutWithMock(api.nova, 'server_vnc_console')
api.nova.server_vnc_console(IgnoreArg(), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.StubOutWithMock(api.nova, 'server_spice_console')
api.nova.server_spice_console(IgnoreArg(), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.StubOutWithMock(api.nova, 'server_rdp_console')
api.nova.server_rdp_console(IgnoreArg(), server.id) \
.AndReturn(console_mock)
self.mox.ReplayAll()
self.setup_consoles()
url = '/RDP&title=%s(%s)' % (server.name, server.id)
data = console.get_console(self.request, 'AUTO', server)[1]
self.assertEqual(data, url)
def test_get_console_auto_iterate_serial_available(self):
server = self.servers.first()
console_mock = self.mox.CreateMock(api.nova.SerialConsole)
console_mock.url = '/SERIAL'
self.mox.StubOutWithMock(api.nova, 'server_vnc_console')
api.nova.server_vnc_console(IgnoreArg(), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.StubOutWithMock(api.nova, 'server_spice_console')
api.nova.server_spice_console(IgnoreArg(), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.StubOutWithMock(api.nova, 'server_rdp_console')
api.nova.server_rdp_console(IgnoreArg(), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.StubOutWithMock(api.nova, 'server_serial_console')
api.nova.server_serial_console(IgnoreArg(), server.id) \
.AndReturn(console_mock)
self.mox.ReplayAll()
self.setup_consoles()
url = '/SERIAL'
data = console.get_console(self.request, 'AUTO', server)[1]
self.assertEqual(data, url)
def test_invalid_console_type_raise_value_error(self):
self.assertRaises(exceptions.NotAvailable,
console.get_console, None, 'FAKE', None)
@helpers.create_stubs({api.neutron: ('network_list_for_tenant',)})
def test_interface_attach_get(self):
server = self.servers.first()
api.neutron.network_list_for_tenant(IsA(http.HttpRequest),
self.tenant.id) \
.AndReturn(self.networks.list()[:1])
self.mox.ReplayAll()
url = reverse('horizon:project:instances:attach_interface',
args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res,
'project/instances/attach_interface.html')
@helpers.create_stubs({api.neutron: ('network_list_for_tenant',),
api.nova: ('interface_attach',)})
def test_interface_attach_post(self):
server = self.servers.first()
network = api.neutron.network_list_for_tenant(IsA(http.HttpRequest),
self.tenant.id) \
.AndReturn(self.networks.list()[:1])
api.nova.interface_attach(IsA(http.HttpRequest), server.id,
net_id=network[0].id)
self.mox.ReplayAll()
form_data = {'instance_id': server.id,
'network': network[0].id}
url = reverse('horizon:project:instances:attach_interface',
args=[server.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.neutron: ('port_list',)})
def test_interface_detach_get(self):
server = self.servers.first()
api.neutron.port_list(IsA(http.HttpRequest),
device_id=server.id)\
.AndReturn([self.ports.first()])
self.mox.ReplayAll()
url = reverse('horizon:project:instances:detach_interface',
args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res,
'project/instances/detach_interface.html')
@helpers.create_stubs({api.neutron: ('port_list',),
api.nova: ('interface_detach',)})
def test_interface_detach_post(self):
server = self.servers.first()
port = self.ports.first()
api.neutron.port_list(IsA(http.HttpRequest),
device_id=server.id)\
.AndReturn([port])
api.nova.interface_detach(IsA(http.HttpRequest), server.id, port.id)
self.mox.ReplayAll()
form_data = {'instance_id': server.id,
'port': port.id}
url = reverse('horizon:project:instances:detach_interface',
args=[server.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
| apache-2.0 |
mzdanieltest/pex | pex/commands/bdist_pex.py | 1 | 3409 | import os
from distutils import log
from setuptools import Command
from pex.bin.pex import build_pex, configure_clp
from pex.common import die
from pex.compatibility import ConfigParser, StringIO, string
from pex.variables import ENV
# Suppress checkstyle violations due to setuptools command requirements.
class bdist_pex(Command): # noqa
description = "create a PEX file from a source distribution" # noqa
user_options = [ # noqa
('bdist-all', None, 'pexify all defined entry points'),
('bdist-dir=', None, 'the directory into which pexes will be written, default: dist.'),
('pex-args=', None, 'additional arguments to the pex tool'),
]
boolean_options = [ # noqa
'bdist-all',
]
def initialize_options(self):
self.bdist_all = False
self.bdist_dir = None
self.pex_args = ''
def finalize_options(self):
self.pex_args = self.pex_args.split()
def _write(self, pex_builder, target, script=None):
builder = pex_builder.clone()
if script is not None:
builder.set_script(script)
builder.build(target)
def parse_entry_points(self):
def split_and_strip(entry_point):
console_script, entry_point = entry_point.split('=', 2)
return console_script.strip(), entry_point.strip()
raw_entry_points = self.distribution.entry_points
if isinstance(raw_entry_points, string):
parser = ConfigParser()
parser.readfp(StringIO(raw_entry_points))
if parser.has_section('console_scripts'):
return dict(parser.items('console_scripts'))
elif isinstance(raw_entry_points, dict):
try:
return dict(split_and_strip(script)
for script in raw_entry_points.get('console_scripts', []))
except ValueError:
pass
elif raw_entry_points is not None:
die('When entry_points is provided, it must be a string or dict.')
return {}
def run(self):
name = self.distribution.get_name()
version = self.distribution.get_version()
parser, options_builder = configure_clp()
package_dir = os.path.dirname(os.path.realpath(os.path.expanduser(
self.distribution.script_name)))
if self.bdist_dir is None:
self.bdist_dir = os.path.join(package_dir, 'dist')
options, reqs = parser.parse_args(self.pex_args)
if options.entry_point or options.script:
die('Must not specify entry_point or script to --pex-args')
reqs = [package_dir] + reqs
with ENV.patch(PEX_VERBOSE=str(options.verbosity), PEX_ROOT=options.pex_root):
pex_builder = build_pex(reqs, options, options_builder)
console_scripts = self.parse_entry_points()
target = os.path.join(self.bdist_dir, name + '-' + version + '.pex')
if self.bdist_all:
# Write all entry points into unversioned pex files.
for script_name in console_scripts:
target = os.path.join(self.bdist_dir, script_name)
log.info('Writing %s to %s' % (script_name, target))
self._write(pex_builder, target, script=script_name)
elif name in console_scripts:
# The package has a namesake entry point, so use it.
log.info('Writing %s to %s' % (name, target))
self._write(pex_builder, target, script=name)
else:
# The package has no namesake entry point, so build an environment pex.
log.info('Writing environment pex into %s' % target)
self._write(pex_builder, target, script=None)
| apache-2.0 |
zzcclp/spark | python/pyspark/cloudpickle/cloudpickle_fast.py | 25 | 30485 | """
New, fast version of the CloudPickler.
This new CloudPickler class can now extend the fast C Pickler instead of the
previous Python implementation of the Pickler class. Because this functionality
is only available for Python versions 3.8+, a lot of backward-compatibility
code is also removed.
Note that the C Pickler sublassing API is CPython-specific. Therefore, some
guards present in cloudpickle.py that were written to handle PyPy specificities
are not present in cloudpickle_fast.py
"""
import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _is_importable,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items,
)
if pickle.HIGHEST_PROTOCOL >= 5 and not PYPY:
# Shorthands similar to pickle.dump/pickle.dumps
def dump(obj, file, protocol=None, buffer_callback=None):
"""Serialize obj as bytes streamed into file
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
CloudPickler(
file, protocol=protocol, buffer_callback=buffer_callback
).dump(obj)
def dumps(obj, protocol=None, buffer_callback=None):
"""Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
with io.BytesIO() as file:
cp = CloudPickler(
file, protocol=protocol, buffer_callback=buffer_callback
)
cp.dump(obj)
return file.getvalue()
else:
# Shorthands similar to pickle.dump/pickle.dumps
def dump(obj, file, protocol=None):
"""Serialize obj as bytes streamed into file
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
CloudPickler(file, protocol=protocol).dump(obj)
def dumps(obj, protocol=None):
"""Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
with io.BytesIO() as file:
cp = CloudPickler(file, protocol=protocol)
cp.dump(obj)
return file.getvalue()
load, loads = pickle.load, pickle.loads
# COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS
# -------------------------------------------------
def _class_getnewargs(obj):
type_kwargs = {}
if "__slots__" in obj.__dict__:
type_kwargs["__slots__"] = obj.__slots__
__dict__ = obj.__dict__.get('__dict__', None)
if isinstance(__dict__, property):
type_kwargs['__dict__'] = __dict__
return (type(obj), obj.__name__, _get_bases(obj), type_kwargs,
_get_or_create_tracker_id(obj), None)
def _enum_getnewargs(obj):
members = dict((e.name, e.value) for e in obj)
return (obj.__bases__, obj.__name__, obj.__qualname__, members,
obj.__module__, _get_or_create_tracker_id(obj), None)
# COLLECTION OF OBJECTS RECONSTRUCTORS
# ------------------------------------
def _file_reconstructor(retval):
return retval
# COLLECTION OF OBJECTS STATE GETTERS
# -----------------------------------
def _function_getstate(func):
# - Put func's dynamic attributes (stored in func.__dict__) in state. These
# attributes will be restored at unpickling time using
# f.__dict__.update(state)
# - Put func's members into slotstate. Such attributes will be restored at
# unpickling time by iterating over slotstate and calling setattr(func,
# slotname, slotvalue)
slotstate = {
"__name__": func.__name__,
"__qualname__": func.__qualname__,
"__annotations__": func.__annotations__,
"__kwdefaults__": func.__kwdefaults__,
"__defaults__": func.__defaults__,
"__module__": func.__module__,
"__doc__": func.__doc__,
"__closure__": func.__closure__,
}
f_globals_ref = _extract_code_globals(func.__code__)
f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in
func.__globals__}
closure_values = (
list(map(_get_cell_contents, func.__closure__))
if func.__closure__ is not None else ()
)
# Extract currently-imported submodules used by func. Storing these modules
# in a smoke _cloudpickle_subimports attribute of the object's state will
# trigger the side effect of importing these modules at unpickling time
# (which is necessary for func to work correctly once depickled)
slotstate["_cloudpickle_submodules"] = _find_imported_submodules(
func.__code__, itertools.chain(f_globals.values(), closure_values))
slotstate["__globals__"] = f_globals
state = func.__dict__
return state, slotstate
def _class_getstate(obj):
clsdict = _extract_class_dict(obj)
clsdict.pop('__weakref__', None)
if issubclass(type(obj), abc.ABCMeta):
# If obj is an instance of an ABCMeta subclass, dont pickle the
# cache/negative caches populated during isinstance/issubclass
# checks, but pickle the list of registered subclasses of obj.
clsdict.pop('_abc_cache', None)
clsdict.pop('_abc_negative_cache', None)
clsdict.pop('_abc_negative_cache_version', None)
registry = clsdict.pop('_abc_registry', None)
if registry is None:
# in Python3.7+, the abc caches and registered subclasses of a
# class are bundled into the single _abc_impl attribute
clsdict.pop('_abc_impl', None)
(registry, _, _, _) = abc._get_dump(obj)
clsdict["_abc_impl"] = [subclass_weakref()
for subclass_weakref in registry]
else:
# In the above if clause, registry is a set of weakrefs -- in
# this case, registry is a WeakSet
clsdict["_abc_impl"] = [type_ for type_ in registry]
if "__slots__" in clsdict:
# pickle string length optimization: member descriptors of obj are
# created automatically from obj's __slots__ attribute, no need to
# save them in obj's state
if isinstance(obj.__slots__, str):
clsdict.pop(obj.__slots__)
else:
for k in obj.__slots__:
clsdict.pop(k, None)
clsdict.pop('__dict__', None) # unpicklable property object
return (clsdict, {})
def _enum_getstate(obj):
clsdict, slotstate = _class_getstate(obj)
members = dict((e.name, e.value) for e in obj)
# Cleanup the clsdict that will be passed to _rehydrate_skeleton_class:
# Those attributes are already handled by the metaclass.
for attrname in ["_generate_next_value_", "_member_names_",
"_member_map_", "_member_type_",
"_value2member_map_"]:
clsdict.pop(attrname, None)
for member in members:
clsdict.pop(member)
# Special handling of Enum subclasses
return clsdict, slotstate
# COLLECTIONS OF OBJECTS REDUCERS
# -------------------------------
# A reducer is a function taking a single argument (obj), and that returns a
# tuple with all the necessary data to re-construct obj. Apart from a few
# exceptions (list, dict, bytes, int, etc.), a reducer is necessary to
# correctly pickle an object.
# While many built-in objects (Exceptions objects, instances of the "object"
# class, etc), are shipped with their own built-in reducer (invoked using
# obj.__reduce__), some do not. The following methods were created to "fill
# these holes".
def _code_reduce(obj):
"""codeobject reducer"""
if hasattr(obj, "co_posonlyargcount"): # pragma: no branch
args = (
obj.co_argcount, obj.co_posonlyargcount,
obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
obj.co_flags, obj.co_code, obj.co_consts, obj.co_names,
obj.co_varnames, obj.co_filename, obj.co_name,
obj.co_firstlineno, obj.co_lnotab, obj.co_freevars,
obj.co_cellvars
)
else:
args = (
obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals,
obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts,
obj.co_names, obj.co_varnames, obj.co_filename,
obj.co_name, obj.co_firstlineno, obj.co_lnotab,
obj.co_freevars, obj.co_cellvars
)
return types.CodeType, args
def _cell_reduce(obj):
"""Cell (containing values of a function's free variables) reducer"""
try:
obj.cell_contents
except ValueError: # cell is empty
return _make_empty_cell, ()
else:
return _make_cell, (obj.cell_contents, )
def _classmethod_reduce(obj):
orig_func = obj.__func__
return type(obj), (orig_func,)
def _file_reduce(obj):
"""Save a file"""
import io
if not hasattr(obj, "name") or not hasattr(obj, "mode"):
raise pickle.PicklingError(
"Cannot pickle files that do not map to an actual file"
)
if obj is sys.stdout:
return getattr, (sys, "stdout")
if obj is sys.stderr:
return getattr, (sys, "stderr")
if obj is sys.stdin:
raise pickle.PicklingError("Cannot pickle standard input")
if obj.closed:
raise pickle.PicklingError("Cannot pickle closed files")
if hasattr(obj, "isatty") and obj.isatty():
raise pickle.PicklingError(
"Cannot pickle files that map to tty objects"
)
if "r" not in obj.mode and "+" not in obj.mode:
raise pickle.PicklingError(
"Cannot pickle files that are not opened for reading: %s"
% obj.mode
)
name = obj.name
retval = io.StringIO()
try:
# Read the whole file
curloc = obj.tell()
obj.seek(0)
contents = obj.read()
obj.seek(curloc)
except IOError as e:
raise pickle.PicklingError(
"Cannot pickle file %s as it cannot be read" % name
) from e
retval.write(contents)
retval.seek(curloc)
retval.name = name
return _file_reconstructor, (retval,)
def _getset_descriptor_reduce(obj):
return getattr, (obj.__objclass__, obj.__name__)
def _mappingproxy_reduce(obj):
return types.MappingProxyType, (dict(obj),)
def _memoryview_reduce(obj):
return bytes, (obj.tobytes(),)
def _module_reduce(obj):
if _is_importable(obj):
return subimport, (obj.__name__,)
else:
obj.__dict__.pop('__builtins__', None)
return dynamic_subimport, (obj.__name__, vars(obj))
def _method_reduce(obj):
return (types.MethodType, (obj.__func__, obj.__self__))
def _logger_reduce(obj):
return logging.getLogger, (obj.name,)
def _root_logger_reduce(obj):
return logging.getLogger, ()
def _property_reduce(obj):
return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__)
def _weakset_reduce(obj):
return weakref.WeakSet, (list(obj),)
def _dynamic_class_reduce(obj):
"""
Save a class that can't be stored as module global.
This method is used to serialize classes that are defined inside
functions, or that otherwise can't be serialized as attribute lookups
from global modules.
"""
if Enum is not None and issubclass(obj, Enum):
return (
_make_skeleton_enum, _enum_getnewargs(obj), _enum_getstate(obj),
None, None, _class_setstate
)
else:
return (
_make_skeleton_class, _class_getnewargs(obj), _class_getstate(obj),
None, None, _class_setstate
)
def _class_reduce(obj):
"""Select the reducer depending on the dynamic nature of the class obj"""
if obj is type(None): # noqa
return type, (None,)
elif obj is type(Ellipsis):
return type, (Ellipsis,)
elif obj is type(NotImplemented):
return type, (NotImplemented,)
elif obj in _BUILTIN_TYPE_NAMES:
return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],)
elif not _is_importable(obj):
return _dynamic_class_reduce(obj)
return NotImplemented
def _dict_keys_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_keys, (list(obj), )
def _dict_values_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_values, (list(obj), )
def _dict_items_reduce(obj):
return _make_dict_items, (dict(obj), )
# COLLECTIONS OF OBJECTS STATE SETTERS
# ------------------------------------
# state setters are called at unpickling time, once the object is created and
# it has to be updated to how it was at unpickling time.
def _function_setstate(obj, state):
"""Update the state of a dynaamic function.
As __closure__ and __globals__ are readonly attributes of a function, we
cannot rely on the native setstate routine of pickle.load_build, that calls
setattr on items of the slotstate. Instead, we have to modify them inplace.
"""
state, slotstate = state
obj.__dict__.update(state)
obj_globals = slotstate.pop("__globals__")
obj_closure = slotstate.pop("__closure__")
# _cloudpickle_subimports is a set of submodules that must be loaded for
# the pickled function to work correctly at unpickling time. Now that these
# submodules are depickled (hence imported), they can be removed from the
# object's state (the object state only served as a reference holder to
# these submodules)
slotstate.pop("_cloudpickle_submodules")
obj.__globals__.update(obj_globals)
obj.__globals__["__builtins__"] = __builtins__
if obj_closure is not None:
for i, cell in enumerate(obj_closure):
try:
value = cell.cell_contents
except ValueError: # cell is empty
continue
cell_set(obj.__closure__[i], value)
for k, v in slotstate.items():
setattr(obj, k, v)
def _class_setstate(obj, state):
state, slotstate = state
registry = None
for attrname, attr in state.items():
if attrname == "_abc_impl":
registry = attr
else:
setattr(obj, attrname, attr)
if registry is not None:
for subclass in registry:
obj.register(subclass)
return obj
class CloudPickler(Pickler):
# set of reducers defined and used by cloudpickle (private)
_dispatch_table = {}
_dispatch_table[classmethod] = _classmethod_reduce
_dispatch_table[io.TextIOWrapper] = _file_reduce
_dispatch_table[logging.Logger] = _logger_reduce
_dispatch_table[logging.RootLogger] = _root_logger_reduce
_dispatch_table[memoryview] = _memoryview_reduce
_dispatch_table[property] = _property_reduce
_dispatch_table[staticmethod] = _classmethod_reduce
_dispatch_table[CellType] = _cell_reduce
_dispatch_table[types.CodeType] = _code_reduce
_dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce
_dispatch_table[types.ModuleType] = _module_reduce
_dispatch_table[types.MethodType] = _method_reduce
_dispatch_table[types.MappingProxyType] = _mappingproxy_reduce
_dispatch_table[weakref.WeakSet] = _weakset_reduce
_dispatch_table[typing.TypeVar] = _typevar_reduce
_dispatch_table[_collections_abc.dict_keys] = _dict_keys_reduce
_dispatch_table[_collections_abc.dict_values] = _dict_values_reduce
_dispatch_table[_collections_abc.dict_items] = _dict_items_reduce
dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table)
# function reducers are defined as instance methods of CloudPickler
# objects, as they rely on a CloudPickler attribute (globals_ref)
def _dynamic_function_reduce(self, func):
"""Reduce a function that is not pickleable via attribute lookup."""
newargs = self._function_getnewargs(func)
state = _function_getstate(func)
return (types.FunctionType, newargs, state, None, None,
_function_setstate)
def _function_reduce(self, obj):
"""Reducer for function objects.
If obj is a top-level attribute of a file-backed module, this
reducer returns NotImplemented, making the CloudPickler fallback to
traditional _pickle.Pickler routines to save obj. Otherwise, it reduces
obj using a custom cloudpickle reducer designed specifically to handle
dynamic functions.
As opposed to cloudpickle.py, There no special handling for builtin
pypy functions because cloudpickle_fast is CPython-specific.
"""
if _is_importable(obj):
return NotImplemented
else:
return self._dynamic_function_reduce(obj)
def _function_getnewargs(self, func):
code = func.__code__
# base_globals represents the future global namespace of func at
# unpickling time. Looking it up and storing it in
# CloudpiPickler.globals_ref allow functions sharing the same globals
# at pickling time to also share them once unpickled, at one condition:
# since globals_ref is an attribute of a CloudPickler instance, and
# that a new CloudPickler is created each time pickle.dump or
# pickle.dumps is called, functions also need to be saved within the
# same invocation of cloudpickle.dump/cloudpickle.dumps (for example:
# cloudpickle.dumps([f1, f2])). There is no such limitation when using
# CloudPickler.dump, as long as the multiple invocations are bound to
# the same CloudPickler.
base_globals = self.globals_ref.setdefault(id(func.__globals__), {})
if base_globals == {}:
# Add module attributes used to resolve relative imports
# instructions inside func.
for k in ["__package__", "__name__", "__path__", "__file__"]:
if k in func.__globals__:
base_globals[k] = func.__globals__[k]
# Do not bind the free variables before the function is created to
# avoid infinite recursion.
if func.__closure__ is None:
closure = None
else:
closure = tuple(
_make_empty_cell() for _ in range(len(code.co_freevars)))
return code, base_globals, None, None, closure
def dump(self, obj):
try:
return Pickler.dump(self, obj)
except RuntimeError as e:
if "recursion" in e.args[0]:
msg = (
"Could not pickle object as excessively deep recursion "
"required."
)
raise pickle.PicklingError(msg) from e
else:
raise
if pickle.HIGHEST_PROTOCOL >= 5:
# `CloudPickler.dispatch` is only left for backward compatibility - note
# that when using protocol 5, `CloudPickler.dispatch` is not an
# extension of `Pickler.dispatch` dictionary, because CloudPickler
# subclasses the C-implemented Pickler, which does not expose a
# `dispatch` attribute. Earlier versions of the protocol 5 CloudPickler
# used `CloudPickler.dispatch` as a class-level attribute storing all
# reducers implemented by cloudpickle, but the attribute name was not a
# great choice given the meaning of `Cloudpickler.dispatch` when
# `CloudPickler` extends the pure-python pickler.
dispatch = dispatch_table
# Implementation of the reducer_override callback, in order to
# efficiently serialize dynamic functions and classes by subclassing
# the C-implemented Pickler.
# TODO: decorrelate reducer_override (which is tied to CPython's
# implementation - would it make sense to backport it to pypy? - and
# pickle's protocol 5 which is implementation agnostic. Currently, the
# availability of both notions coincide on CPython's pickle and the
# pickle5 backport, but it may not be the case anymore when pypy
# implements protocol 5
def __init__(self, file, protocol=None, buffer_callback=None):
if protocol is None:
protocol = DEFAULT_PROTOCOL
Pickler.__init__(
self, file, protocol=protocol, buffer_callback=buffer_callback
)
# map functions __globals__ attribute ids, to ensure that functions
# sharing the same global namespace at pickling time also share
# their global namespace at unpickling time.
self.globals_ref = {}
self.proto = int(protocol)
def reducer_override(self, obj):
"""Type-agnostic reducing callback for function and classes.
For performance reasons, subclasses of the C _pickle.Pickler class
cannot register custom reducers for functions and classes in the
dispatch_table. Reducer for such types must instead implemented in
the special reducer_override method.
Note that method will be called for any object except a few
builtin-types (int, lists, dicts etc.), which differs from reducers
in the Pickler's dispatch_table, each of them being invoked for
objects of a specific type only.
This property comes in handy for classes: although most classes are
instances of the ``type`` metaclass, some of them can be instances
of other custom metaclasses (such as enum.EnumMeta for example). In
particular, the metaclass will likely not be known in advance, and
thus cannot be special-cased using an entry in the dispatch_table.
reducer_override, among other things, allows us to register a
reducer that will be called for any class, independently of its
type.
Notes:
* reducer_override has the priority over dispatch_table-registered
reducers.
* reducer_override can be used to fix other limitations of
cloudpickle for other types that suffered from type-specific
reducers, such as Exceptions. See
https://github.com/cloudpipe/cloudpickle/issues/248
"""
if sys.version_info[:2] < (3, 7) and _is_parametrized_type_hint(obj): # noqa # pragma: no branch
return (
_create_parametrized_type_hint,
parametrized_type_hint_getinitargs(obj)
)
t = type(obj)
try:
is_anyclass = issubclass(t, type)
except TypeError: # t is not a class (old Boost; see SF #502085)
is_anyclass = False
if is_anyclass:
return _class_reduce(obj)
elif isinstance(obj, types.FunctionType):
return self._function_reduce(obj)
else:
# fallback to save_global, including the Pickler's
# distpatch_table
return NotImplemented
else:
# When reducer_override is not available, hack the pure-Python
# Pickler's types.FunctionType and type savers. Note: the type saver
# must override Pickler.save_global, because pickle.py contains a
# hard-coded call to save_global when pickling meta-classes.
dispatch = Pickler.dispatch.copy()
def __init__(self, file, protocol=None):
if protocol is None:
protocol = DEFAULT_PROTOCOL
Pickler.__init__(self, file, protocol=protocol)
# map functions __globals__ attribute ids, to ensure that functions
# sharing the same global namespace at pickling time also share
# their global namespace at unpickling time.
self.globals_ref = {}
assert hasattr(self, 'proto')
def _save_reduce_pickle5(self, func, args, state=None, listitems=None,
dictitems=None, state_setter=None, obj=None):
save = self.save
write = self.write
self.save_reduce(
func, args, state=None, listitems=listitems,
dictitems=dictitems, obj=obj
)
# backport of the Python 3.8 state_setter pickle operations
save(state_setter)
save(obj) # simple BINGET opcode as obj is already memoized.
save(state)
write(pickle.TUPLE2)
# Trigger a state_setter(obj, state) function call.
write(pickle.REDUCE)
# The purpose of state_setter is to carry-out an
# inplace modification of obj. We do not care about what the
# method might return, so its output is eventually removed from
# the stack.
write(pickle.POP)
def save_global(self, obj, name=None, pack=struct.pack):
"""
Save a "global".
The name of this method is somewhat misleading: all types get
dispatched here.
"""
if obj is type(None): # noqa
return self.save_reduce(type, (None,), obj=obj)
elif obj is type(Ellipsis):
return self.save_reduce(type, (Ellipsis,), obj=obj)
elif obj is type(NotImplemented):
return self.save_reduce(type, (NotImplemented,), obj=obj)
elif obj in _BUILTIN_TYPE_NAMES:
return self.save_reduce(
_builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj)
if sys.version_info[:2] < (3, 7) and _is_parametrized_type_hint(obj): # noqa # pragma: no branch
# Parametrized typing constructs in Python < 3.7 are not
# compatible with type checks and ``isinstance`` semantics. For
# this reason, it is easier to detect them using a
# duck-typing-based check (``_is_parametrized_type_hint``) than
# to populate the Pickler's dispatch with type-specific savers.
self.save_reduce(
_create_parametrized_type_hint,
parametrized_type_hint_getinitargs(obj),
obj=obj
)
elif name is not None:
Pickler.save_global(self, obj, name=name)
elif not _is_importable(obj, name=name):
self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj)
else:
Pickler.save_global(self, obj, name=name)
dispatch[type] = save_global
def save_function(self, obj, name=None):
""" Registered with the dispatch to handle all function types.
Determines what kind of function obj is (e.g. lambda, defined at
interactive prompt, etc) and handles the pickling appropriately.
"""
if _is_importable(obj, name=name):
return Pickler.save_global(self, obj, name=name)
elif PYPY and isinstance(obj.__code__, builtin_code_type):
return self.save_pypy_builtin_func(obj)
else:
return self._save_reduce_pickle5(
*self._dynamic_function_reduce(obj), obj=obj
)
def save_pypy_builtin_func(self, obj):
"""Save pypy equivalent of builtin functions.
PyPy does not have the concept of builtin-functions. Instead,
builtin-functions are simple function instances, but with a
builtin-code attribute.
Most of the time, builtin functions should be pickled by attribute.
But PyPy has flaky support for __qualname__, so some builtin
functions such as float.__new__ will be classified as dynamic. For
this reason only, we created this special routine. Because
builtin-functions are not expected to have closure or globals,
there is no additional hack (compared the one already implemented
in pickle) to protect ourselves from reference cycles. A simple
(reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note
also that PyPy improved their support for __qualname__ in v3.6, so
this routing should be removed when cloudpickle supports only PyPy
3.6 and later.
"""
rv = (types.FunctionType, (obj.__code__, {}, obj.__name__,
obj.__defaults__, obj.__closure__),
obj.__dict__)
self.save_reduce(*rv, obj=obj)
dispatch[types.FunctionType] = save_function
| apache-2.0 |
gangadhar-kadam/laganerp | erpnext/stock/doctype/bin/bin.py | 12 | 2262 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, nowdate
import frappe.defaults
from frappe.model.document import Document
class Bin(Document):
def validate(self):
if self.get("__islocal") or not self.stock_uom:
self.stock_uom = frappe.db.get_value('Item', self.item_code, 'stock_uom')
self.validate_mandatory()
self.projected_qty = flt(self.actual_qty) + flt(self.ordered_qty) + \
flt(self.indented_qty) + flt(self.planned_qty) - flt(self.reserved_qty)
def validate_mandatory(self):
qf = ['actual_qty', 'reserved_qty', 'ordered_qty', 'indented_qty']
for f in qf:
if (not getattr(self, f, None)) or (not self.get(f)):
self.set(f, 0.0)
def update_stock(self, args):
self.update_qty(args)
if args.get("actual_qty"):
from erpnext.stock.stock_ledger import update_entries_after
if not args.get("posting_date"):
args["posting_date"] = nowdate()
# update valuation and qty after transaction for post dated entry
update_entries_after({
"item_code": self.item_code,
"warehouse": self.warehouse,
"posting_date": args.get("posting_date"),
"posting_time": args.get("posting_time")
})
def update_qty(self, args):
# update the stock values (for current quantities)
self.actual_qty = flt(self.actual_qty) + flt(args.get("actual_qty"))
self.ordered_qty = flt(self.ordered_qty) + flt(args.get("ordered_qty"))
self.reserved_qty = flt(self.reserved_qty) + flt(args.get("reserved_qty"))
self.indented_qty = flt(self.indented_qty) + flt(args.get("indented_qty"))
self.planned_qty = flt(self.planned_qty) + flt(args.get("planned_qty"))
self.projected_qty = flt(self.actual_qty) + flt(self.ordered_qty) + \
flt(self.indented_qty) + flt(self.planned_qty) - flt(self.reserved_qty)
self.save()
def get_first_sle(self):
sle = frappe.db.sql("""
select * from `tabStock Ledger Entry`
where item_code = %s
and warehouse = %s
order by timestamp(posting_date, posting_time) asc, name asc
limit 1
""", (self.item_code, self.warehouse), as_dict=1)
return sle and sle[0] or None | agpl-3.0 |
t0in4/django | django/db/backends/sqlite3/introspection.py | 204 | 11332 | import re
from collections import namedtuple
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('default',))
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict(object):
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
size = get_field_size(key)
if size is not None:
return ('CharField', {'max_length': size})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name""")
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [
FieldInfo(
info['name'],
info['type'],
None,
info['size'],
None,
None,
info['null_ok'],
info['default'],
) for info in self._table_info(cursor, table_name)
]
def column_name_converter(self, name):
"""
SQLite will in some cases, e.g. when returning columns from views and
subselects, return column names in 'alias."column"' format instead of
simply 'column'.
Affects SQLite < 3.7.15, fixed by http://www.sqlite.org/src/info/5526e0aa3c
"""
# TODO: remove when SQLite < 3.7.15 is sufficiently old.
# 3.7.13 ships in Debian stable as of 2014-03-21.
if self.connection.Database.sqlite_version_info < (3, 7, 15):
return name.split('.')[-1].strip('"')
else:
return name
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
try:
results = cursor.fetchone()[0].strip()
except TypeError:
# It might be a view, then no results will be returned
return relations
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_desc in results.split(','):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
if field_desc.startswith("FOREIGN KEY"):
# Find name of the target FK field
m = re.match('FOREIGN KEY\(([^\)]*)\).*', field_desc, re.I)
field_name = m.groups()[0].strip('"')
else:
field_name = field_desc.split()[0].strip('"')
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_desc in other_table_results.split(','):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
other_name = other_desc.split(' ', 1)[0].strip('"')
if other_name == column:
relations[field_name] = (other_name, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_indexes(self, cursor, table_name):
indexes = {}
for info in self._table_info(cursor, table_name):
if info['pk'] != 0:
indexes[info['name']] = {'primary_key': True,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name] = {'primary_key': indexes.get(name, {}).get("primary_key", False),
'unique': unique}
return indexes
def get_primary_key_column(self, cursor, table_name):
"""
Get the column name of the primary key for the given table.
"""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
results = row[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
for field_desc in results.split(','):
field_desc = field_desc.strip()
m = re.search('"(.*)".*PRIMARY KEY( AUTOINCREMENT)?$', field_desc)
if m:
return m.groups()[0]
return None
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, default_value, pk
return [{
'name': field[1],
'type': field[2],
'size': get_field_size(field[2]),
'null_ok': not field[3],
'default': field[4],
'pk': field[5], # undocumented
} for field in cursor.fetchall()]
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# Sqlite3 3.8.9+ has 5 columns, however older versions only give 3
# columns. Discard last 2 columns if there.
number, index, unique = row[:3]
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": False,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": False,
"check": False,
"index": False,
}
return constraints
| bsd-3-clause |
HybridF5/tempest | tempest/api/orchestration/stacks/test_templates.py | 25 | 2094 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
from tempest import test
class TemplateYAMLTestJSON(base.BaseOrchestrationTest):
template = """
HeatTemplateFormatVersion: '2012-12-12'
Description: |
Template which creates only a new user
Resources:
CfnUser:
Type: AWS::IAM::User
"""
@classmethod
def resource_setup(cls):
super(TemplateYAMLTestJSON, cls).resource_setup()
cls.stack_name = data_utils.rand_name('heat')
cls.stack_identifier = cls.create_stack(cls.stack_name, cls.template)
cls.client.wait_for_stack_status(cls.stack_identifier,
'CREATE_COMPLETE')
cls.stack_id = cls.stack_identifier.split('/')[1]
cls.parameters = {}
@test.idempotent_id('47430699-c368-495e-a1db-64c26fd967d7')
def test_show_template(self):
"""Getting template used to create the stack."""
self.client.show_template(self.stack_identifier)
@test.idempotent_id('ed53debe-8727-46c5-ab58-eba6090ec4de')
def test_validate_template(self):
"""Validating template passing it content."""
self.client.validate_template(self.template,
self.parameters)
class TemplateAWSTestJSON(TemplateYAMLTestJSON):
template = """
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template which creates only a new user",
"Resources" : {
"CfnUser" : {
"Type" : "AWS::IAM::User"
}
}
}
"""
| apache-2.0 |
jhjacobsen/RFNN | caffe-rfnn/python/caffe/test/test_python_layer_with_param_str.py | 8 | 1925 | import unittest
import tempfile
import os
import six
import caffe
class SimpleParamLayer(caffe.Layer):
"""A layer that just multiplies by the numeric value of its param string"""
def setup(self, bottom, top):
try:
self.value = float(self.param_str)
except ValueError:
raise ValueError("Parameter string must be a legible float")
def reshape(self, bottom, top):
top[0].reshape(*bottom[0].data.shape)
def forward(self, bottom, top):
top[0].data[...] = self.value * bottom[0].data
def backward(self, top, propagate_down, bottom):
bottom[0].diff[...] = self.value * top[0].diff
def python_param_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("""name: 'pythonnet' force_backward: true
input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }
layer { type: 'Python' name: 'mul10' bottom: 'data' top: 'mul10'
python_param { module: 'test_python_layer_with_param_str'
layer: 'SimpleParamLayer' param_str: '10' } }
layer { type: 'Python' name: 'mul2' bottom: 'mul10' top: 'mul2'
python_param { module: 'test_python_layer_with_param_str'
layer: 'SimpleParamLayer' param_str: '2' } }""")
return f.name
class TestLayerWithParam(unittest.TestCase):
def setUp(self):
net_file = python_param_net_file()
self.net = caffe.Net(net_file, caffe.TRAIN)
os.remove(net_file)
def test_forward(self):
x = 8
self.net.blobs['data'].data[...] = x
self.net.forward()
for y in self.net.blobs['mul2'].data.flat:
self.assertEqual(y, 2 * 10 * x)
def test_backward(self):
x = 7
self.net.blobs['mul2'].diff[...] = x
self.net.backward()
for y in self.net.blobs['data'].diff.flat:
self.assertEqual(y, 2 * 10 * x)
| mit |
whitehorse-io/encarnia | Encarnia/server/conf/inlinefuncs.py | 11 | 1808 | """
Inlinefunc
Inline functions allow for direct conversion of text users mark in a
special way. Inlinefuncs are deactivated by default. To activate, add
INLINEFUNC_ENABLED = True
to your settings file. The default inlinefuncs are found in
evennia.utils.inlinefunc.
In text, usage is straightforward:
$funcname([arg1,[arg2,...]])
Example 1 (using the "pad" inlinefunc):
say This is $pad("a center-padded text", 50,c,-) of width 50.
->
John says, "This is -------------- a center-padded text--------------- of width 50."
Example 2 (using nested "pad" and "time" inlinefuncs):
say The time is $pad($time(), 30)right now.
->
John says, "The time is Oct 25, 11:09 right now."
To add more inline functions, add them to this module, using
the following call signature:
def funcname(text, *args, **kwargs)
where `text` is always the part between {funcname(args) and
{/funcname and the *args are taken from the appropriate part of the
call. If no {/funcname is given, `text` will be the empty string.
It is important that the inline function properly clean the
incoming `args`, checking their type and replacing them with sane
defaults if needed. If impossible to resolve, the unmodified text
should be returned. The inlinefunc should never cause a traceback.
While the inline function should accept **kwargs, the keyword is
never accepted as a valid call - this is only intended to be used
internally by Evennia, notably to send the `session` keyword to
the function; this is the session of the object viewing the string
and can be used to customize it to each session.
"""
#def capitalize(text, *args, **kwargs):
# "Silly capitalize example. Used as {capitalize() ... {/capitalize"
# session = kwargs.get("session")
# return text.capitalize()
| mit |
openmb/stb-gui | lib/python/Components/Converter/MovieInfo.py | 9 | 2671 | from Components.Converter.Converter import Converter
from Components.Element import cached, ElementError
from enigma import iServiceInformation, eServiceReference
from ServiceReference import ServiceReference
class MovieInfo(Converter, object):
MOVIE_SHORT_DESCRIPTION = 0 # meta description when available.. when not .eit short description
MOVIE_META_DESCRIPTION = 1 # just meta description when available
MOVIE_REC_SERVICE_NAME = 2 # name of recording service
MOVIE_REC_FILESIZE = 3 # filesize of recording
def __init__(self, type):
if type == "ShortDescription":
self.type = self.MOVIE_SHORT_DESCRIPTION
elif type == "MetaDescription":
self.type = self.MOVIE_META_DESCRIPTION
elif type == "RecordServiceName":
self.type = self.MOVIE_REC_SERVICE_NAME
elif type == "FileSize":
self.type = self.MOVIE_REC_FILESIZE
else:
raise ElementError("'%s' is not <ShortDescription|MetaDescription|RecordServiceName|FileSize> for MovieInfo converter" % type)
Converter.__init__(self, type)
@cached
def getText(self):
service = self.source.service
info = self.source.info
event = self.source.event
if info and service:
if self.type == self.MOVIE_SHORT_DESCRIPTION:
if (service.flags & eServiceReference.flagDirectory) == eServiceReference.flagDirectory:
# Short description for Directory is the full path
return service.getPath()
return (info.getInfoString(service, iServiceInformation.sDescription)
or (event and event.getShortDescription())
or service.getPath())
elif self.type == self.MOVIE_META_DESCRIPTION:
return ((event and (event.getExtendedDescription() or event.getShortDescription()))
or info.getInfoString(service, iServiceInformation.sDescription)
or service.getPath())
elif self.type == self.MOVIE_REC_SERVICE_NAME:
rec_ref_str = info.getInfoString(service, iServiceInformation.sServiceref)
return ServiceReference(rec_ref_str).getServiceName()
elif self.type == self.MOVIE_REC_FILESIZE:
if (service.flags & eServiceReference.flagDirectory) == eServiceReference.flagDirectory:
return _("Directory")
filesize = info.getInfoObject(service, iServiceInformation.sFileSize)
if filesize is not None:
if filesize >= 104857600000: #100000*1024*1024
return _("%.0f GB") % (filesize / 1073741824.0)
elif filesize >= 1073741824: #1024*1024*1024
return _("%.2f GB") % (filesize / 1073741824.0)
elif filesize >= 1048576:
return _("%.0f MB") % (filesize / 1048576.0)
elif filesize >= 1024:
return _("%.0f kB") % (filesize / 1024.0)
return _("%d B") % filesize
return ""
text = property(getText)
| gpl-2.0 |
lordB8r/polls | ENV/lib/python2.7/site-packages/django/contrib/auth/management/commands/changepassword.py | 126 | 1975 | import getpass
from optparse import make_option
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Specifies the database to use. Default is "default".'),
)
help = "Change a user's password for django.contrib.auth."
requires_model_validation = False
def _get_pass(self, prompt="Password: "):
p = getpass.getpass(prompt=prompt)
if not p:
raise CommandError("aborted")
return p
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("need exactly one or zero arguments for username")
if args:
username, = args
else:
username = getpass.getuser()
UserModel = get_user_model()
try:
u = UserModel._default_manager.using(options.get('database')).get(**{
UserModel.USERNAME_FIELD: username
})
except UserModel.DoesNotExist:
raise CommandError("user '%s' does not exist" % username)
self.stdout.write("Changing password for user '%s'\n" % u)
MAX_TRIES = 3
count = 0
p1, p2 = 1, 2 # To make them initially mismatch.
while p1 != p2 and count < MAX_TRIES:
p1 = self._get_pass()
p2 = self._get_pass("Password (again): ")
if p1 != p2:
self.stdout.write("Passwords do not match. Please try again.\n")
count = count + 1
if count == MAX_TRIES:
raise CommandError("Aborting password change for user '%s' after %s attempts" % (u, count))
u.set_password(p1)
u.save()
return "Password changed successfully for user '%s'" % u
| mit |
Averroes/idapython | Scripts/VirusTotal.py | 16 | 11221 | # -----------------------------------------------------------------------
# VirusTotal IDA Plugin
# By Elias Bachaalany <elias at hex-rays.com>
# (c) Hex-Rays 2011
#
# Special thanks:
# - VirusTotal team
# - Bryce Boe for his VirusTotal Python code
#
import idaapi
import idc
from idaapi import Choose2, plugin_t
import BboeVt as vt
import webbrowser
import urllib
import os
PLUGIN_TEST = 0
# -----------------------------------------------------------------------
# Configuration file
VT_CFGFILE = os.path.join(idaapi.get_user_idadir(), "virustotal.cfg")
# -----------------------------------------------------------------------
# VirusTotal Icon in PNG format
VT_ICON = (
"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A\x00\x00\x00\x0D\x49\x48\x44\x52"
"\x00\x00\x00\x10\x00\x00\x00\x10\x04\x03\x00\x00\x00\xED\xDD\xE2"
"\x52\x00\x00\x00\x30\x50\x4C\x54\x45\x03\x8B\xD3\x5C\xB4\xE3\x9C"
"\xD1\xED\xF7\xFB\xFD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\xD3\xF2\x42\x61\x00\x00\x00"
"\x4B\x49\x44\x41\x54\x78\x9C\x2D\xCA\xC1\x0D\x80\x30\x0C\x43\x51"
"\x27\x2C\x50\x89\x05\x40\x2C\x40\xEB\xFD\x77\xC3\x76\xC9\xE9\xEB"
"\xC5\x20\x5F\xE8\x1A\x0F\x97\xA3\xD0\xE4\x1D\xF9\x49\xD1\x59\x29"
"\x4C\x43\x9B\xD0\x15\x01\xB5\x4A\x9C\xE4\x70\x14\x39\xB3\x31\xF8"
"\x15\x70\x04\xF4\xDA\x20\x39\x02\x8A\x0D\xA8\x0F\x94\xA7\x09\x0E"
"\xC5\x16\x2D\x54\x00\x00\x00\x00\x49\x45\x4E\x44\xAE\x42\x60\x82")
# -----------------------------------------------------------------------
class VirusTotalConfig(object):
def __init__(self):
self.Default()
def Default(self):
self.md5sum = GetInputMD5()
self.infile = idaapi.dbg_get_input_path()
if not self.infile:
self.infile = ""
# Persistent options
self.apikey = ""
self.options = 1 | 2
def Read(self):
"""
Read configuration from file
"""
if not os.path.exists(VT_CFGFILE):
return
f = open(VT_CFGFILE, 'r')
lines = f.readlines()
for i, line in enumerate(lines):
line = line.strip()
if i == 0:
self.apikey = line
elif i == 1:
self.options = int(line)
else:
break
def Write(self):
"""
Write configuration to file
"""
lines = (self.apikey.strip(), str(self.options))
try:
f = open(VT_CFGFILE, 'w')
f.write("\n".join(lines))
f.close()
except:
pass
# -----------------------------------------------------------------------
def VtReport(apikey, filename=None, md5sum=None):
if filename is None and md5sum is None:
return (False, "No parameters passed!")
# Check filename existance
if filename is not None and not os.path.exists(filename):
return (False, "Input file '%s' does not exist!" % filename)
#print("fn=%s md5=%s" % (filename, md5sum))
# Get file report from VirusTotal
try:
vt.set_apikey(apikey)
result = vt.get_file_report(filename=filename, md5sum=md5sum)
except Exception as e:
return (False, "Exception:\n%s" % str(e))
# Already analyzed?
if result is not None:
# Transform the results
items = []
for av, mwname in result.items():
mwname = str(mwname) if mwname else "n/a"
av = str(av)
items.append([av, mwname])
result = items
return (True, result)
# -----------------------------------------------------------------------
class VirusTotalChooser(Choose2):
"""
Chooser class to display results from VT
"""
def __init__(self, title, items, icon, embedded=False):
Choose2.__init__(self,
title,
[ ["Antivirus", 20], ["Result", 40] ],
embedded=embedded)
self.items = items
self.icon = icon
def GetItems(self):
return self.items
def SetItems(self, items):
self.items = [] if items is None else items
def OnClose(self):
pass
def OnGetLine(self, n):
return self.items[n]
def OnGetSize(self):
return len(self.items)
def OnSelectLine(self, n):
# Google search for the malware name and the antivirus name
s = urllib.urlencode({"q" : " ".join(self.items[n])})
webbrowser.open_new_tab("http://www.google.com/search?%s" % s)
# --------------------------------------------------------------------------
class VirusTotalForm(Form):
def __init__(self, icon):
self.EChooser = VirusTotalChooser("E1", [], icon, embedded=True)
Form.__init__(self, r"""STARTITEM {id:txtInput}
VirusTotal - IDAPython plugin v1.0 (c) Hex-Rays
{FormChangeCb}
<#API key#~A~pi key:{txtApiKey}>
Options:
<#Open results in a chooser when form closes#~P~opout results on close:{rOptRemember}>
<#Use MD5 checksum#~M~D5Sum:{rOptMD5}>
<#Use file on disk#~F~ile:{rOptFile}>{grpOptions}>
<#Type input (file or MD5 string)#~I~nput:{txtInput}>
<Results:{cEChooser}>
<#Get reports from VT#~R~eport:{btnReport}>
""", {
'FormChangeCb': Form.FormChangeCb(self.OnFormChange),
'txtApiKey' : Form.StringInput(swidth=80),
'grpOptions' : Form.ChkGroupControl(("rOptRemember", "rOptMD5", "rOptFile")),
'txtInput' : Form.FileInput(open=True),
'btnReport' : Form.ButtonInput(self.OnReportClick),
'cEChooser' : Form.EmbeddedChooserControl(self.EChooser)
})
def OnReportClick(self, code=0):
pass
def OnFormChange(self, fid):
if fid == self.rOptMD5.id or fid == self.rOptFile.id:
input = (self.cfg.md5sum, self.cfg.infile)
if fid == self.rOptMD5.id:
c1 = self.rOptMD5
c2 = self.rOptFile
idx = 0
else:
c1 = self.rOptFile
c2 = self.rOptMD5
idx = 1
v = not self.GetControlValue(c1)
if v: idx = not idx
# Uncheck the opposite input type
self.SetControlValue(c2, v)
# Set input field depending on input type
self.SetControlValue(self.txtInput, input[idx])
#
# Report button
#
elif fid == self.btnReport.id:
input = self.GetControlValue(self.txtInput)
as_file = self.GetControlValue(self.rOptFile)
apikey = self.GetControlValue(self.txtApiKey)
ok, r = VtReport(self.cfg.apikey,
filename=input if as_file else None,
md5sum=None if as_file else input)
# Error?
if not ok:
idc.Warning(r)
return 1
# Pass the result
self.EChooser.SetItems(r)
# We have results and it was a file? Print its MD5
if r and as_file:
print("%s: %s" % (vt.LAST_FILE_HASH, input))
# Refresh the embedded chooser control
# (Could also clear previous results if not were retrieved during this run)
self.RefreshField(self.cEChooser)
# Store the input for the caller
self.cfg.input = input
# No results and file as input was supplied?
if r is None:
if as_file:
# Propose to upload
if idc.AskYN(0, "HIDECANCEL\nNo previous results. Do you want to submit the file:\n\n'%s'\n\nto VirusTotal?" % input) == 0:
return 1
try:
r = vt.scan_file(input)
except Exception as e:
idc.Warning("Exceptio during upload: %s" % str(e))
else:
if r is None:
idc.Warning("Failed to upload the file!")
else:
idc.Warning("File uploaded. Check again later to get the analysis report. Scan id: %s" % r)
else:
idc.Warning("No results found for hash: %s" % input)
return 1
def Show(self, cfg):
# Compile the form once
if not self.Compiled():
_, args = self.Compile()
#print args[0]
# Populate the form
self.txtApiKey.value = cfg.apikey
self.grpOptions.value = cfg.options
self.txtInput.value = cfg.infile if self.rOptFile.checked else cfg.md5sum
# Remember the config
self.cfg = cfg
# Execute the form
ok = self.Execute()
# Forget the cfg
del self.cfg
# Success?
if ok != 0:
# Update config
cfg.options = self.grpOptions.value
cfg.apikey = self.txtApiKey.value
# Popup results?
if self.rOptRemember.checked:
ok = 2
return ok
# -----------------------------------------------------------------------
class VirusTotalPlugin_t(plugin_t):
flags = idaapi.PLUGIN_UNL
comment = "VirusTotal plugin for IDA"
help = ""
wanted_name = "VirusTotal report"
wanted_hotkey = "Alt-F8"
def init(self):
# Some initialization
self.icon_id = 0
return idaapi.PLUGIN_OK
def run(self, arg=0):
# Load icon from the memory and save its id
self.icon_id = idaapi.load_custom_icon(data=VT_ICON, format="png")
if self.icon_id == 0:
raise RuntimeError("Failed to load icon data!")
# Create config object
cfg = VirusTotalConfig()
# Read previous config
cfg.Read()
# Create form
f = VirusTotalForm(self.icon_id)
# Show the form
ok = f.Show(cfg)
if ok == 0:
f.Free()
return
# Save configuration
cfg.Write()
# Spawn a non-modal chooser w/ the results if any
if ok == 2 and f.EChooser.GetItems():
VirusTotalChooser(
"VirusTotal results [%s]" % cfg.input,
f.EChooser.GetItems(),
self.icon_id).Show()
f.Free()
return
def term(self):
# Free the custom icon
if self.icon_id != 0:
idaapi.free_custom_icon(self.icon_id)
# -----------------------------------------------------------------------
def PLUGIN_ENTRY():
return VirusTotalPlugin_t()
# --------------------------------------------------------------------------
if PLUGIN_TEST:
# Create form
f = PLUGIN_ENTRY()
f.init()
f.run()
f.term()
| bsd-3-clause |
makielab/django-oscar | oscar/apps/customer/alerts/views.py | 1 | 4677 | from django.http import HttpResponseRedirect, Http404
from django.views import generic
from django.db.models import get_model
from django.shortcuts import get_object_or_404
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django import http
from django.views.generic import TemplateView
from django.views.generic.list import MultipleObjectMixin
from oscar.core.loading import get_class
from oscar.apps.customer.alerts import utils
from oscar.apps.customer.mixins import PageTitleMixin
Product = get_model('catalogue', 'Product')
ProductAlert = get_model('customer', 'ProductAlert')
ProductAlertForm = get_class('customer.forms', 'ProductAlertForm')
class ProductAlertListView(PageTitleMixin, generic.ListView):
model = ProductAlert
template_name = 'customer/alerts/alert_list.html'
context_object_name = 'alerts'
page_title = _('Product Alerts')
active_tab = 'alerts'
def get_queryset(self):
return ProductAlert.objects.select_related().filter(
user=self.request.user,
date_closed=None,
)
class ProductAlertCreateView(generic.CreateView):
"""
View to create a new product alert based on a registered user
or an email address provided by an anonymous user.
"""
model = ProductAlert
form_class = ProductAlertForm
template_name = 'customer/alerts/form.html'
def get_context_data(self, **kwargs):
ctx = super(ProductAlertCreateView, self).get_context_data(**kwargs)
ctx['product'] = self.product
ctx['alert_form'] = ctx.pop('form')
return ctx
def get(self, request, *args, **kwargs):
product = get_object_or_404(Product, pk=self.kwargs['pk'])
return http.HttpResponseRedirect(product.get_absolute_url())
def post(self, request, *args, **kwargs):
self.product = get_object_or_404(Product, pk=self.kwargs['pk'])
return super(ProductAlertCreateView, self).post(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(ProductAlertCreateView, self).get_form_kwargs()
kwargs['user'] = self.request.user
kwargs['product'] = self.product
return kwargs
def form_valid(self, form):
response = super(ProductAlertCreateView, self).form_valid(form)
if self.object.is_anonymous:
utils.send_alert_confirmation(self.object)
return response
def get_success_url(self):
if self.object.user:
msg = _("An alert has been created")
else:
msg = _("A confirmation email has been sent to %s") % self.object.email
messages.success(self.request, msg)
return self.object.product.get_absolute_url()
class ProductAlertConfirmView(generic.RedirectView):
permanent = False
def get(self, request, *args, **kwargs):
self.alert = get_object_or_404(ProductAlert, key=kwargs['key'])
self.update_alert()
return super(ProductAlertConfirmView, self).get(request, *args, **kwargs)
def update_alert(self):
if self.alert.can_be_confirmed:
self.alert.confirm()
messages.success(self.request, _("Your stock alert is now active"))
else:
messages.error(self.request, _("Your stock alert cannot be confirmed"))
def get_redirect_url(self, **kwargs):
return self.alert.product.get_absolute_url()
class ProductAlertCancelView(generic.RedirectView):
"""
This function allows canceling alerts by supplying the key (used for
anonymously created alerts) or the pk (used for alerts created by a
authenticated user).
Specifying the redirect url is possible by supplying a 'next' GET parameter.
It defaults to showing the associated product page.
"""
def get(self, request, *args, **kwargs):
if 'key' in kwargs:
self.alert = get_object_or_404(ProductAlert, key=kwargs['key'])
elif 'pk' in kwargs and request.user.is_authenticated():
self.alert = get_object_or_404(ProductAlert, user=self.request.user, pk=kwargs['pk'])
else:
raise Http404
self.update_alert()
return super(ProductAlertCancelView, self).get(request, *args, **kwargs)
def update_alert(self):
if self.alert.can_be_cancelled:
self.alert.cancel()
messages.success(self.request, _("Your stock alert has been cancelled"))
else:
messages.error(self.request, _("Your stock alert cannot be cancelled"))
def get_redirect_url(self, **kwargs):
return self.request.GET.get('next', self.alert.product.get_absolute_url())
| bsd-3-clause |
eJRF/ejrf | questionnaire/forms/questions.py | 1 | 7964 | from django.forms import ModelForm
from django import forms
from django.forms.models import model_to_dict
from django.forms.util import ErrorList
from questionnaire.models import Question, QuestionOption, Questionnaire
from questionnaire.utils.answer_type import AnswerTypes
class QuestionForm(ModelForm):
KNOWN_OPTIONS = ["Yes, No",
"Yes, No, NR",
"Yes, No, NR, ND",
"Male, Female, Both",
"Local currency, US $",
"National, Sub national",
]
question_options = []
def __init__(self, region=None, *args, **kwargs):
super(QuestionForm, self).__init__(*args, **kwargs)
self.region = region
self.fields['answer_type'].choices = self._set_answer_type_choices()
self.fields['answer_type'].label = 'Response Type'
self.fields['text'].label = 'Display label (Online)'
self.fields['export_label'].label = 'Export label (Detail)'
self.fields['theme'].empty_label = 'Select theme'
self.fields['answer_sub_type'].label = 'Response sub type'
self.fields['answer_sub_type'].attrs = "data-ng-show='allOptions'"
self.fields['answer_sub_type'].choices = self._set_subtype_choices()
class Meta:
ANGULAR_OPTIONS_AND_FILTER = "option.text for option in allOptions track by option.value"
model = Question
fields = (
'text', 'export_label', 'instructions', 'answer_type', 'answer_sub_type', 'theme', 'is_primary')
widgets = {'text': forms.Textarea(attrs={"rows": 6, "cols": 50}),
'instructions': forms.Textarea(attrs={"maxlength": 750, "cols":50, "rows": 10},),
'answer_type': forms.Select(attrs={"data-ng-model": "answerType"}),
'answer_sub_type': forms.Select(attrs={"data-ng-model": "answerSubType",
"data-ng-options": ANGULAR_OPTIONS_AND_FILTER}),
'theme': forms.Select(),
'export_label': forms.Textarea(attrs={"rows": 2, "cols": 50})}
def _set_subtype_choices(self):
choices = self.fields['answer_sub_type'].choices
choices[0] = ('', 'Select a Sub-Type', )
return choices
def clean(self):
self._clean_options()
self._clean_export_label()
self._clean_answer_sub_type()
self._clean_is_primary()
return super(QuestionForm, self).clean()
def _clean_is_primary(self):
answer_type = self.cleaned_data.get('answer_type', None)
is_primary = self.cleaned_data.get('is_primary', None)
if answer_type == AnswerTypes.MULTIPLE_RESPONSE and is_primary:
message = "%s questions cannot be primary" % answer_type
self._errors['is_primary'] = self.error_class([message])
del self.cleaned_data['is_primary']
def _clean_options(self):
answer_type = self.cleaned_data.get('answer_type', None)
options = dict(self.data).get('options', [])
options = filter(None, options)
if (answer_type and AnswerTypes.is_mutlichoice_or_multiple(answer_type)) and len(options) < 1:
message = "%s questions must have at least one option" % answer_type
self._errors['answer_type'] = self.error_class([message])
del self.cleaned_data['answer_type']
def _clean_answer_sub_type(self):
answer_type = self.cleaned_data.get('answer_type', None)
answer_sub_type = self.cleaned_data.get('answer_sub_type', None)
if answer_type and AnswerTypes.has_subtype(answer_type) and not AnswerTypes.is_valid_sub_type(answer_type,
answer_sub_type):
message = "This field is required if you select '%s'" % answer_type
self._errors['answer_sub_type'] = self.error_class([message])
del self.cleaned_data['answer_sub_type']
def _clean_export_label(self):
export_label = self.cleaned_data.get('export_label', None)
if not export_label:
message = "All questions must have export label."
self._errors['export_label'] = self.error_class([message])
del self.cleaned_data['export_label']
return export_label
def save(self, commit=True):
if self._editing_published_question():
question = self._duplicate_question()
self._reassign_to_unpublished_questionnaires(question)
self._save_options_if_multichoice(question)
return question
return self._save(commit)
def _reassign_to_unpublished_questionnaires(self, question):
unpublished_in_questionnaire = self.instance.questionnaires().exclude(status=Questionnaire.PUBLISHED)
for questionnaire in unpublished_in_questionnaire:
for group in self.instance.question_groups_in(questionnaire):
group.question.remove(self.instance)
group.question.add(question)
self._assign_order(question, group)
def _assign_order(self, question, group):
parent_group = group.parent or group
order = self.instance.orders.get(question_group=parent_group)
question_order = order.order
order.delete()
question.orders.create(question_group=parent_group, order=question_order)
def _duplicate_question(self):
attributes = model_to_dict(self.instance, exclude=('id',))
attributes.update({'parent': self.instance})
del attributes['region']
del attributes['theme']
return Question.objects.create(region=self.instance.region, theme=self.instance.theme, **attributes)
def _editing_published_question(self):
if not (self.instance and self.instance.id):
return False
published_in_questionnaire = self.instance.questionnaires().filter(status=Questionnaire.PUBLISHED)
if published_in_questionnaire.exists():
return True
return False
def _save(self, commit=True):
question = super(QuestionForm, self).save(commit=False)
question.UID = Question.next_uid()
if self.region:
question.region = self.region
if commit:
question.save()
self._save_options_if_multichoice(question)
return question
def _save_options_if_multichoice(self, question):
options = dict(self.data).get('options', [])
options = filter(lambda text: text.strip(), options)
if options and AnswerTypes.is_mutlichoice_or_multiple(question.answer_type):
QuestionOption.objects.filter(question=question).exclude(text__in=options).delete()
for index, option in enumerate(options):
option, _ = QuestionOption.objects.get_or_create(text=option.strip(), question=question)
option.order = index
option.save()
def _set_answer_type_choices(self):
choices = self.fields['answer_type'].choices
choices[0] = ('', 'Response type', )
return choices
class QuestionOptionForm(forms.Form):
options = forms.ModelMultipleChoiceField(queryset=None)
def __init__(self, *args, **kwargs):
self.question = kwargs.pop('question')
super(QuestionOptionForm, self).__init__(*args, **kwargs)
self.fields['options'].queryset = self._set_choices()
def _set_choices(self):
return self.question.options.all()
def save(self):
options = dict(self.data).get('options', [])
if options and AnswerTypes.is_mutlichoice_or_multiple(self.question.answer_type):
for index, option in enumerate(options):
option = QuestionOption.objects.get(id=option, question=self.question)
option.order = index
option.save()
return self.question | bsd-3-clause |
iniverno/RnR-LLC | simics-3.0-install/simics-3.0.31/scripts/workspace_setup.py | 1 | 23832 | # -*- python -*-
## Copyright 2005-2007 Virtutech AB
##
## The contents herein are Source Code which are a subset of Licensed
## Software pursuant to the terms of the Virtutech Simics Software
## License Agreement (the "Agreement"), and are being distributed under
## the Agreement. You should have received a copy of the Agreement with
## this Licensed Software; if not, please contact Virtutech for a copy
## of the Agreement prior to using this Licensed Software.
##
## By using this Source Code, you agree to be bound by all of the terms
## of the Agreement, and use of this Source Code is subject to the terms
## the Agreement.
##
## This Source Code and any derivatives thereof are provided on an "as
## is" basis. Virtutech makes no warranties with respect to the Source
## Code or any derivatives thereof and disclaims all implied warranties,
## including, without limitation, warranties of merchantability and
## fitness for a particular purpose and non-infringement.
import sys
import os
import shutil
import re
import glob
import stat
from subprocess import *
Create = 1
Upgrade = 2
module_building_enabled = True
cygwin_installed = False
try:
conf.sim
except:
sys.exit("This script is intended to be invoked from fake-python.")
from os.path import join, basename, dirname, isfile, isdir, abspath, normpath, exists
from optparse import OptionParser, OptionValueError
from errno import *
def get_simics_root():
return conf.sim.simics_base
scriptdir = join(get_simics_root(), "scripts")
sys.path.append(scriptdir)
# Subdirectory to store workspace metadata stuff (such as workspace-version).
# wxSimics also store data in here.
workspace_properties = ".workspace-properties"
assert sys.platform != "cygwin"
if sys.platform == "win32":
cygwin_path_ok = False
# try to locate Cygwin
for p in os.environ['PATH'].split(";"):
if isfile(join(p, "cygpath.exe")):
cygwin_path_ok = True
if not cygwin_path_ok:
guess_cygroot = "c:\\cygwin"
if isfile(join(guess_cygroot, "bin", "cygpath.exe")):
print "Cygwin not in path; but is installed at", guess_cygroot
print "Adding %s to PATH." % join(guess_cygroot, "bin")
os.environ['PATH'] = join(guess_cygroot, "bin") + ";" + os.environ['PATH']
# On windows, we need Cygwin in case the user wants to build modules. To
# just run Simics, we can do without Cygwin.
root = os.popen("cygpath --mixed /").readline().strip()
if len(root) == 0:
cygwin_installed = False
module_building_enabled = False
if "--quiet" not in sys.argv:
print
print "*" * 75
print "Warning: Cygwin does not seem to be installed. You will not be able"
print " to build Simics modules. Script options related to building"
print " modules have been disabled."
print
print "If cygwin is installed, you may need to add Cygwin to your PATH:"
print
print "c:\> set PATH=c:\\cygwin\\bin;%PATH%"
print "*" * 75
print
else:
cygwin_installed = True
def cygwin_platform():
return sys.platform == "win32" and cygwin_installed
def need_short_form(s):
for c in [ " ", "(", ")" ]:
if c in s:
return True
return False
def run_command(args):
return Popen(args, stdout=PIPE).communicate()[0]
def is_valid_class_name(module):
import re
return bool(re.match("[A-Za-z][A-Za-z0-9_-]*$", module))
def dos_format_error(s):
print "Internal error: DOS format conversion requires an existing file or directory:", s
sys.exit(1)
def run_cygpath(path, style):
s = run_command(["cygpath", style, path]).strip()
if not s:
# Stupid CygWin bug. Try again
s = run_cygpath(path, style)
return s
# convert a native path to valid path for the build system
def buildpath(native_path):
if cygwin_platform():
if need_short_form(native_path):
if not exists(native_path):
dos_format_error(native_path)
else:
native_path = run_cygpath(native_path, "--dos")
return run_cygpath(native_path, "--unix")
else:
return native_path
def unixpath(native_path):
if cygwin_platform():
return run_cygpath(native_path, "--unix")
elif sys.platform == "win32":
raise "Unix path requested on pure win32 platform"
else:
return native_path
# convert any kind of path to a native path
def nativepath(any_path):
if cygwin_platform():
return run_cygpath(any_path, "--windows")
else:
return any_path
def build_cygwrap(workspace, options):
if not cygwin_installed:
return
if options.verbose:
print "Compiling 'cygwrap' wrapper"
cmd = '%s -Wall -O2 "%s" -o "%s"' % \
(nativepath("/bin/gcc"),
unixpath(join(get_simics_root(), "src", "misc", "cygwrap",
"cygwrap.c")),
unixpath(join(workspace, "cygwrap.exe")))
if cmd and not options.dry_run:
print cmd
status = os.system(cmd)
if status != 0:
sys.exit(1)
def get_simics_version():
d = os.getcwd()
try:
os.chdir(join(get_simics_root(), "bin"))
if sys.platform == "win32":
cmd = "simics.bat -version"
else:
cmd = "./simics -version"
return os.popen(cmd).readline().strip()
finally:
os.chdir(d)
def get_workspace_version(workspace):
workspace_version_file = join(workspace, workspace_properties,
"workspace-version")
if isfile(workspace_version_file):
return open(workspace_version_file, "r").readline().strip()
else:
return None
def is_workspace(workspace):
return get_workspace_version(workspace) != None
def get_mtime(f):
if not isfile(f):
return 0
try:
return os.stat(f)[stat.ST_MTIME]
except Exception, msg:
print "Warning: failed to get mtime for", f, msg
return 0
# Return the time when the workspace was last created/upgraded
def get_workspace_mtime(workspace):
return get_mtime(join(workspace, workspace_properties,
"workspace-version"))
def get_backup_filename(f):
n = 1
while True:
if sys.platform == "win32":
bf = f + "~%d~.backup" % n
else:
bf = f + ".~%d~" % n
if not isfile(bf):
return bf
else:
n += 1
num_backed_up_files = 0
def backup_file_if_modified(workspace, f):
global num_backed_up_files
if not isfile(f):
return
f_mtime = get_mtime(f)
w_mtime = get_workspace_mtime(workspace)
if f_mtime > w_mtime or f_mtime == 0 or w_mtime == 0:
bf = get_backup_filename(f)
try:
num_backed_up_files += 1
shutil.copy(f, bf)
except IOError, msg:
print "Failed to backup modified file %s: %s" % (f, msg)
print "Exiting."
sys.exit(1)
def create_version_file(workspace, options):
workspace_version_file = join(workspace, workspace_properties,
"workspace-version")
if options.dry_run:
return
out = open(workspace_version_file, "w")
out.write(get_simics_version() + "\n")
out.close()
def makedir(options, path):
if isdir(path):
if options.verbose:
print "Directory already exists: %s" % path
else:
if not options.dry_run:
os.makedirs(path)
if options.verbose:
print "Created directory: %s" % path
def create_directories(workspace, options):
makedir(options, workspace)
makedir(options, join(workspace, workspace_properties))
makedir(options, join(workspace, "modules"))
def create_start_script_sh(workspace, options, suffix, args = ""):
path = join(workspace, "simics" + suffix)
if not options.dry_run:
backup_file_if_modified(workspace, path)
f = open(path, "w+")
f.write("#!/bin/sh\n")
f.write(("# this file will be overwritten by the "
"workspace setup script\n"))
f.write("SIMICS_WORKSPACE=\"%s\"; export SIMICS_WORKSPACE\n" %
workspace)
f.write("exec \"%s/simics%s\" %s ${1+\"$@\"}\n" % \
(join(get_simics_root(), "bin"), suffix, args))
f.close()
os.chmod(path, 0755)
if options.verbose:
print "Wrote:", path
def create_start_script_bat(workspace, options):
path = join(workspace, "simics.bat")
if not options.dry_run:
backup_file_if_modified(workspace, path)
f = open(path, "w+")
f.write("@echo off\n")
f.write(("rem this file will be overwritten by the "
"workspace setup script\n"))
f.write("set SIMICS_HOST=x86-win32\n")
f.write("set SIMICS_WORKSPACE=%s\n" % workspace)
f.write("set SIMICS_EXTRA_LIB=%s\n" % \
join(workspace, "x86-win32", "lib"))
f.write("set PATH=%s\\%%SIMICS_HOST%%\\bin;%%PATH%%\n" % \
get_simics_root())
f.write("call simics-common %*\n")
f.close()
if options.verbose:
print "Wrote:", path
def create_start_script(workspace, options):
if sys.platform == "win32":
create_start_script_bat(workspace, options)
else:
create_start_script_sh(workspace, options, '')
create_start_script_sh(workspace, options,
'-eclipse', '-data "%s"' % workspace)
def create_compiler_makefile(workspace, options):
if options.dry_run:
return
compiler_mk = join(workspace, "compiler.mk")
if exists(compiler_mk):
# do not overwrite old one
return
f = open(compiler_mk, "w")
f.write("""\
# -*- makefile -*-
# Select compiler by changing CC for your host type.
ifeq (default,$(origin CC))
ifeq (x86-linux,$(HOST_TYPE))
CC=gcc
endif
ifeq (v9-sol8-64,$(HOST_TYPE))
CC=gcc
endif
ifeq (amd64-linux,$(HOST_TYPE))
CC=gcc
endif
ifeq (x86-win32,$(HOST_TYPE))
CC=/cygdrive/c/MinGW/bin/gcc
endif
endif
""")
f.close()
if options.verbose:
print "Wrote:", compiler_mk
def create_config_makefile(workspace, options):
if options.dry_run:
return
config_mk = join(workspace, "config.mk")
backup_file_if_modified(workspace, config_mk)
f = open(config_mk, "w")
f.write("""\
# -*- makefile -*-
# Do not edit this file.
# This file will be overwritten by the workspace setup script.
SIMICS_BASE=%s
SIMICS_WORKSPACE=%s
# allow user to override HOST_TYPE
ifeq (,$(HOST_TYPE))
HOST_TYPE=$(shell $(SIMICS_BASE)/scripts/host-type.sh)
endif
ifeq (x86-win32,$(HOST_TYPE))
CYGWRAP=%s
endif
include compiler.mk
include $(SIMICS_BASE)/config/config.mk
# Put user definitions in config-user.mk
-include config-user.mk
# Deprecated
-include Config-user.mk
""" % (buildpath(get_simics_root()),
buildpath(workspace),
buildpath(join(workspace, 'cygwrap.exe'))))
f.close()
if options.verbose:
print "Wrote:", config_mk
def create_workspace_makefile(workspace, options):
workspace_mk_src = join(get_simics_root(), "config",
"masters",
"Makefile.workspace")
workspace_mk = join(workspace, "GNUmakefile")
if not options.dry_run:
backup_file_if_modified(workspace, workspace_mk)
shutil.copy(workspace_mk_src, workspace_mk)
if options.verbose:
print "Wrote:", workspace_mk
def shallow_copy(workspace, options, source_dir, module,
translate=lambda s:s):
target_dir = join(workspace, "modules", module)
if isdir(target_dir):
if options.verbose:
print ("Ignoring module %s, directory "
"%s already exists." ) % (module, target_dir)
return
if options.dry_run:
return
if options.verbose:
print "Creating module directory: %s" % target_dir
os.makedirs(target_dir)
for source_file in os.listdir(source_dir):
if not isfile(join(source_dir, source_file)):
continue
content = open(join(source_dir, source_file)).read()
content = translate(content)
dest_file = translate(source_file)
open(join(target_dir, dest_file), "w").write(content)
if options.verbose:
print "Wrote", dest_file
def copy_module(workspace, options, module, source_module):
if not is_valid_class_name(module):
print ("Invalid device name '%s'. Device names must consist of"
" letters, digits, underscores or dashes, and not start"
" with a digit."%module)
return
idbase = re.sub("[^\w]", "_", module)
idbase_source = re.sub("[^\w]", "_", source_module)
if options.verbose:
print ("Creating module skeleton for module '%s', "
"C identifier base = '%s'" % (module, idbase))
source_dir = join(get_simics_root(), "src", "devices", source_module)
convert = lambda s: (s.replace(source_module, module)
.replace(idbase_source, idbase))
shallow_copy(workspace, options, source_dir, module, convert)
def create_module_skeletons(workspace, options):
for lang, skel, modules in \
[ ( "DML", "empty-dml-device", options.dml_modules ),
( "C", "empty-device", options.c_modules ),
( "Python", "empty-python-device", options.py_modules ) ]:
for mod in modules:
copy_module(workspace, options, mod, skel)
def create_copied_modules(workspace, options):
failed = []
for mod in options.copied_modules:
# find the source directory
for subdir in ("devices", "extensions"):
source_dir = join(get_simics_root(), "src", subdir, mod)
if isdir(source_dir): break
else:
failed.append(mod)
continue
shallow_copy(workspace, options, source_dir, mod)
if failed:
print "Failed to find the source of modules:", ' '.join(failed)
possible = []
for subdir in ("devices", "extensions"):
source_dir = join(get_simics_root(), "src", subdir)
try:
possible.extend(os.listdir(source_dir));
except EnvironmentError:
pass
if possible:
possible.sort()
print "Source is available for:", ' '.join(possible)
else:
print "Source is not available for any module"
def create_machine_scripts(workspace, options):
if options.dry_run:
return
if options.verbose:
print "Creating target start scripts"
for f in glob.glob(join(get_simics_root(), "targets", "*", "*.simics")):
script = basename(f)
target = basename(dirname(f))
target_ws_dir = join(workspace, "targets", target)
if not isdir(target_ws_dir):
try:
os.makedirs(target_ws_dir)
except OSError, msg:
print ("Failed to create directory for target %s: %s"
% (target, msg))
continue
script_path = join(target_ws_dir, script)
backup_file_if_modified(workspace, script_path)
f = open(script_path, "w")
f.write("add-directory \"%script%\"\n")
f.write("run-command-file \"%%simics%%/targets/%s/%s\"\n"
% (target, script))
f.close()
def print_version(*args, **kwargs):
print
print "Simics version: ", get_simics_version()
print "Installed at: ", get_simics_root()
print
sys.exit(0)
if __name__ == "__main__":
descr = """\
Creates or updates a Simics workspace for user scripts and modules.
If workspace directory is omitted, the current working directory is used.
"""
# Do not forget to update the programming-guide when adding new switches
parser = OptionParser(usage = ("workspace-setup [options] [workspace]"),
description = descr)
parser.add_option("-v", "--version",
action = "callback",
callback = print_version,
help = ("Prints information about Simics "
"(version, installation directory)."))
parser.add_option("-n", "--dry-run",
action = "store_true",
dest = "dry_run",
help = ("Execute normally, "
"but do change or create any files."))
parser.add_option("-q", "--quiet",
dest = "verbose",
action = "store_false",
default = True,
help = ("Do not print any info about the "
"actions taken by the script."))
parser.add_option("--force",
dest = "force",
action = "store_true",
help = ("Force using a non-empty directory as "
"workspace. Note: even with this option, "
"modules in the module sub-directory will "
"*not* be overwritten. Modules must be "
"removed manually before they can be "
"overwritten."))
parser.add_option("--check-workspace-version",
dest = "check_version",
action = "store_true",
help = ("Check the version of the workspace, and "
"return 1 if it needs creating/upgrading, "
"0 otherwise."))
if module_building_enabled:
parser.add_option("--device", "--dml-device",
action = "append",
metavar = "DEVICE_NAME",
dest = "dml_modules",
default = [],
help = ("Generate skeleton code for a device, "
"suitable to use as a starting point for "
"implementing your own device. The default "
"implementation language is DML. See the "
"--c-device and --py-device options for "
"creating devices using other languages. "
"If the device already exists, this option "
"is ignored. To recreate the skeleton, remove "
"the device directory."))
parser.add_option("--c-device",
action = "append",
metavar = "DEVICE_NAME",
dest = "c_modules",
default = [],
help = ("Similar to --device, but creates a device "
"using C instead of DML."))
parser.add_option("--py-device",
action = "append",
metavar = "DEVICE_NAME",
dest = "py_modules",
default = [],
help = ("Similar to --device, but creates a device "
"using Python instead of DML."))
parser.add_option("--copy-device",
action = "append",
metavar = "MODULE_NAME",
dest = "copied_modules",
default = [],
help = ("Copies the source for a sample device/module "
"into the workspace. The files will be "
"copied from the Simics installation. "
"If the device already exist in your "
"workspace, you must manually delete it "
"first."))
(options, args) = parser.parse_args()
if len(args) == 0:
workspace_raw = os.getcwd()
workspace = workspace_raw
explicit_workspace = False
elif len(args) == 1:
# make sure that the workspace passed as argument is a native path
workspace_raw = args[0]
workspace = abspath(nativepath(workspace_raw))
explicit_workspace = True
else:
parser.error("incorrect number of arguments")
if options.check_version:
if is_workspace(workspace):
if get_workspace_version(workspace) != get_simics_version():
print "Workspace needs upgrading:", workspace_raw
sys.exit(1)
else:
print "Workspace is up-to-date:", workspace_raw
sys.exit(0)
else:
print "Workspace does not exist:", workspace_raw
sys.exit(1)
if isdir(workspace) and not is_workspace(workspace) and \
not options.force and len(os.listdir(workspace)) > 0:
print "The workspace directory:"
print "\n\t%s\n" % workspace_raw
print "already exists and is non-empty. Select another directory,"
print "or use the --force flag."
sys.exit(1)
if module_building_enabled:
if options.dml_modules and not isdir(join(get_simics_root(), "src",
"devices", "empty-dml-device")):
print "The DML toolkit is required to create DML devices."
options.dml_modules = []
if options.verbose:
print "Setting up Simics workspace directory:",
print workspace_raw
create_directories(workspace, options)
# this needs to be done before the build makefiles are written
# to make sure that cygwrap.exe exists
if cygwin_platform() and not isfile(join(workspace, "cygwrap.exe")):
build_cygwrap(workspace, options)
# start-scripts
create_start_script(workspace, options)
# makefiles
create_compiler_makefile(workspace, options)
create_config_makefile(workspace, options)
create_workspace_makefile(workspace, options)
# modules
if module_building_enabled:
create_module_skeletons(workspace, options)
create_copied_modules(workspace, options)
# machine-scripts
create_machine_scripts(workspace, options)
if num_backed_up_files > 0:
print "Backed up %d files during workspace upgrade." % num_backed_up_files
# should be last, so the workspace-version file has mtime larger
# than all other created files
create_version_file(workspace, options)
if not options.verbose:
sys.exit(0)
# NOTE: only informative printouts below
print
if module_building_enabled:
print "Finished. To build devices:"
print
if explicit_workspace:
print "\t$ cd %s" % workspace_raw
print "\t$ make"
print
print "To start Simics, use the scripts under targets/<arch>/. "
print "For example:"
print
if explicit_workspace and not module_building_enabled:
print "\t$ cd %s" % workspace_raw
if sys.platform == "win32":
print "\t$ ./simics.bat targets/ebony/ebony-linux-firststeps.simics"
else:
print "\t$ ./simics targets/ebony/ebony-linux-firststeps.simics"
print
| gpl-2.0 |
DavidLP/home-assistant | homeassistant/components/websocket_api/sensor.py | 7 | 1473 | """Entity to track connections to websocket API."""
from homeassistant.core import callback
from homeassistant.helpers.entity import Entity
from .const import (
SIGNAL_WEBSOCKET_CONNECTED, SIGNAL_WEBSOCKET_DISCONNECTED,
DATA_CONNECTIONS)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the API streams platform."""
entity = APICount()
async_add_entities([entity])
class APICount(Entity):
"""Entity to represent how many people are connected to the stream API."""
def __init__(self):
"""Initialize the API count."""
self.count = None
async def async_added_to_hass(self):
"""Added to hass."""
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_WEBSOCKET_CONNECTED, self._update_count)
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_WEBSOCKET_DISCONNECTED, self._update_count)
self._update_count()
@property
def name(self):
"""Return name of entity."""
return "Connected clients"
@property
def state(self):
"""Return current API count."""
return self.count
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return "clients"
@callback
def _update_count(self):
self.count = self.hass.data.get(DATA_CONNECTIONS, 0)
self.async_schedule_update_ha_state()
| apache-2.0 |
oblique-labs/pyVM | rpython/conftest.py | 1 | 3299 | import py, pytest
from rpython.tool import leakfinder
pytest_plugins = 'rpython.tool.pytest.expecttest'
option = None
def braindead_deindent(self):
"""monkeypatch that wont end up doing stupid in the python tokenizer"""
text = '\n'.join(self.lines)
short = py.std.textwrap.dedent(text)
newsource = py.code.Source()
newsource.lines[:] = short.splitlines()
return newsource
py.code.Source.deindent = braindead_deindent
def pytest_report_header():
return "pytest-%s from %s" %(pytest.__version__, pytest.__file__)
def pytest_configure(config):
global option
option = config.option
from rpython.config.translationoption import PLATFORMS
from rpython.translator.platform import set_platform
platform = config.option.platform
if platform not in PLATFORMS:
raise ValueError("%s not in %s" % (platform, PLATFORMS))
set_platform(platform, None)
def pytest_addoption(parser):
group = parser.getgroup("rpython options")
group.addoption('--view', action="store_true", dest="view", default=False,
help="view translation tests' flow graphs with Pygame")
group.addoption('-P', '--platform', action="store", dest="platform",
type="string", default="host",
help="set up tests to use specified platform as compile/run target")
group = parser.getgroup("JIT options")
group.addoption('--viewloops', action="store_true",
default=False, dest="viewloops",
help="show only the compiled loops")
group.addoption('--viewdeps', action="store_true",
default=False, dest="viewdeps",
help="show the dependencies that have been constructed from a trace")
def pytest_addhooks(pluginmanager):
pluginmanager.register(LeakFinder())
class LeakFinder:
"""Track memory allocations during test execution.
So far, only used by the function lltype.malloc(flavor='raw').
"""
@pytest.hookimpl(trylast=True)
def pytest_runtest_setup(self, item):
if not isinstance(item, py.test.collect.Function):
return
if not getattr(item.obj, 'dont_track_allocations', False):
leakfinder.start_tracking_allocations()
@pytest.hookimpl(trylast=True)
def pytest_runtest_call(self, item):
if not isinstance(item, py.test.collect.Function):
return
item._success = True
@pytest.hookimpl(trylast=True)
def pytest_runtest_teardown(self, item):
if not isinstance(item, py.test.collect.Function):
return
if (not getattr(item.obj, 'dont_track_allocations', False)
and leakfinder.TRACK_ALLOCATIONS):
kwds = {}
try:
kwds['do_collection'] = item.track_allocations_collect
except AttributeError:
pass
item._pypytest_leaks = leakfinder.stop_tracking_allocations(False,
**kwds)
else: # stop_tracking_allocations() already called
item._pypytest_leaks = None
# check for leaks, but only if the test passed so far
if getattr(item, '_success', False) and item._pypytest_leaks:
raise leakfinder.MallocMismatch(item._pypytest_leaks)
| mit |
alexandrul-ci/robotframework | atest/run.py | 1 | 4480 | #!/usr/bin/env python
"""A script for running Robot Framework's acceptance tests.
Usage: atest/run.py interpreter [options] datasource(s)
Data sources are paths to directories or files under the `atest/robot` folder.
Available options are the same that can be used with Robot Framework.
See its help (e.g. `robot --help`) for more information.
The specified interpreter is used by acceptance tests under `atest/robot` to
run test cases under `atest/testdata`. It can be the name of the interpreter
like (e.g. `python` or `jython`, a path to the selected interpreter like
`/usr/bin/python26`, or a path to the standalone jar distribution (e.g.
`dist/robotframework-2.9dev234.jar`). If the interpreter itself needs
arguments, the interpreter and arguments need to be quoted like `"py -3"`.
As a special case the interpreter value `standalone` will compile a new
standalone jar from the current sources and execute the acceptance tests with
it.
Note that this script itself must always be executed with Python 2.7.
Examples:
$ atest/run.py python --test example atest/robot
$ atest/run.py /opt/jython27/bin/jython atest/robot/tags/tag_doc.robot
> atest\\run.py "py -3" -e no-ci atest\\robot
"""
from __future__ import print_function
import os
import shutil
import signal
import subprocess
import sys
import tempfile
from os.path import abspath, dirname, exists, join, normpath
from interpreter import InterpreterFactory
CURDIR = dirname(abspath(__file__))
sys.path.append(join(CURDIR, '..'))
try:
from tasks import jar
except ImportError:
def jar(*args, **kwargs):
raise RuntimeError("Creating jar distribution requires 'invoke'.")
ARGUMENTS = '''
--doc Robot Framework acceptance tests
--metadata interpreter:{interpreter.name} {interpreter.version} on {interpreter.os}
--variablefile {variable_file};{interpreter.path};{interpreter.name};{interpreter.version}
--pythonpath {pythonpath}
--outputdir {outputdir}
--splitlog
--console dotted
--consolewidth 100
--SuiteStatLevel 3
--TagStatExclude no-*
'''.strip()
def atests(interpreter, *arguments):
if interpreter == 'standalone':
interpreter = jar()
try:
interpreter = InterpreterFactory(interpreter)
except ValueError as err:
sys.exit(err)
outputdir, tempdir = _get_directories(interpreter)
arguments = list(_get_arguments(interpreter, outputdir)) + list(arguments)
return _run(arguments, tempdir, interpreter.classpath)
def _get_directories(interpreter):
name = '{i.name}-{i.version}-{i.os}'.format(i=interpreter).replace(' ', '')
outputdir = dos_to_long(join(CURDIR, 'results', name))
tempdir = dos_to_long(join(tempfile.gettempdir(), 'robottests', name))
if exists(outputdir):
shutil.rmtree(outputdir)
if exists(tempdir):
shutil.rmtree(tempdir)
os.makedirs(tempdir)
return outputdir, tempdir
def _get_arguments(interpreter, outputdir):
arguments = ARGUMENTS.format(interpreter=interpreter,
variable_file=join(CURDIR, 'interpreter.py'),
pythonpath=join(CURDIR, 'resources'),
outputdir=outputdir)
for line in arguments.splitlines():
for part in line.split(' ', 1):
yield part
for exclude in interpreter.excludes:
yield '--exclude'
yield exclude
def _run(args, tempdir, classpath):
runner = normpath(join(CURDIR, '..', 'src', 'robot', 'run.py'))
command = [sys.executable, runner] + args
environ = dict(os.environ, TEMPDIR=tempdir, CLASSPATH=classpath or '')
print('Running command:\n%s\n' % ' '.join(command))
sys.stdout.flush()
signal.signal(signal.SIGINT, signal.SIG_IGN)
return subprocess.call(command, env=environ)
def dos_to_long(path):
"""Convert Windows paths in DOS format (e.g. exampl~1.txt) to long format.
This is done to avoid problems when later comparing paths. Especially
IronPython handles DOS paths inconsistently.
"""
if not (os.name == 'nt' and '~' in path and os.path.exists(path)):
return path
from ctypes import create_unicode_buffer, windll
buf = create_unicode_buffer(500)
windll.kernel32.GetLongPathNameW(path.decode('mbcs'), buf, 500)
return buf.value.encode('mbcs')
if __name__ == '__main__':
if len(sys.argv) == 1 or '--help' in sys.argv:
print(__doc__)
rc = 251
else:
rc = atests(*sys.argv[1:])
sys.exit(rc)
| apache-2.0 |
jsirois/pex | tests/test_dist_metadata.py | 2 | 10084 | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import os
import tarfile
import warnings
from contextlib import contextmanager
from textwrap import dedent
import pytest
from pex.common import open_zip, temporary_dir
from pex.dist_metadata import (
MetadataError,
ProjectNameAndVersion,
project_name_and_version,
requires_dists,
requires_python,
)
from pex.orderedset import OrderedSet
from pex.pex_warnings import PEXWarning
from pex.pip import get_pip
from pex.third_party.packaging.specifiers import SpecifierSet
from pex.third_party.pkg_resources import Distribution, Requirement
from pex.typing import TYPE_CHECKING
from pex.util import DistributionHelper
from pex.variables import ENV
if TYPE_CHECKING:
from typing import Tuple, Iterator, Any
@contextmanager
def installed_wheel(wheel_path):
# type: (str) -> Iterator[Distribution]
with temporary_dir() as install_dir:
get_pip().spawn_install_wheel(wheel=wheel_path, install_dir=install_dir).wait()
dist = DistributionHelper.distribution_from_path(install_dir)
assert dist is not None, "Could not load a distribution from {}.".format(install_dir)
yield dist
@contextmanager
def example_distribution(name):
# type: (str) -> Iterator[Tuple[str, Distribution]]
wheel_path = os.path.join("./tests/example_packages", name)
with installed_wheel(wheel_path) as distribution:
yield wheel_path, distribution
@contextmanager
def downloaded_sdist(requirement):
# type: (str) -> Iterator[str]
with temporary_dir() as td:
download_dir = os.path.join(td, "download")
get_pip().spawn_download_distributions(
download_dir=download_dir,
requirements=[requirement],
transitive=False,
use_wheel=False,
).wait()
dists = os.listdir(download_dir)
assert len(dists) == 1, "Expected 1 dist to be downloaded for {}.".format(requirement)
sdist = os.path.join(download_dir, dists[0])
assert sdist.endswith((".sdist", ".tar.gz", ".zip"))
yield sdist
def as_requirement(project_name_and_version):
# type: (ProjectNameAndVersion) -> str
return "{}=={}".format(project_name_and_version.project_name, project_name_and_version.version)
PYGOOGLEEARTH_PROJECT_NAME_AND_VERSION = ProjectNameAndVersion("pygoogleearth", "0.0.2")
@pytest.fixture(scope="module")
def pygoogleearth_zip_sdist():
# type: () -> Iterator[str]
with downloaded_sdist(as_requirement(PYGOOGLEEARTH_PROJECT_NAME_AND_VERSION)) as sdist:
assert sdist.endswith(".zip")
yield sdist
PIP_PROJECT_NAME_AND_VERSION = ProjectNameAndVersion("pip", "20.3.1")
@pytest.fixture(scope="module")
def pip_tgz_sdist():
# type: () -> Iterator[str]
with downloaded_sdist(as_requirement(PIP_PROJECT_NAME_AND_VERSION)) as sdist:
assert sdist.endswith(".tar.gz")
yield sdist
@pytest.fixture(scope="module")
def pip_wheel(pip_tgz_sdist):
# type: (str) -> Iterator[str]
with temporary_dir() as wheel_dir:
get_pip().spawn_build_wheels([pip_tgz_sdist], wheel_dir=wheel_dir).wait()
wheels = os.listdir(wheel_dir)
assert len(wheels) == 1, "Expected 1 wheel to be built for {}.".format(pip_tgz_sdist)
wheel = os.path.join(wheel_dir, wheels[0])
assert wheel.endswith(".whl")
yield wheel
@pytest.fixture(scope="module")
def pip_distribution(pip_wheel):
# type: (str) -> Iterator[Distribution]
with installed_wheel(pip_wheel) as distribution:
yield distribution
def test_project_name_and_version_from_filename(
pygoogleearth_zip_sdist, # type: str
pip_tgz_sdist, # type: str
pip_wheel, # type: str
):
# type: (...) -> None
assert PYGOOGLEEARTH_PROJECT_NAME_AND_VERSION == ProjectNameAndVersion.from_filename(
pygoogleearth_zip_sdist
)
assert PIP_PROJECT_NAME_AND_VERSION == ProjectNameAndVersion.from_filename(pip_tgz_sdist)
assert PIP_PROJECT_NAME_AND_VERSION == ProjectNameAndVersion.from_filename(pip_wheel)
def test_project_name_and_version_from_filename_pep625():
# type: () -> None
assert ProjectNameAndVersion(
"a-distribution-name", "1.2.3"
) == ProjectNameAndVersion.from_filename("a-distribution-name-1.2.3.sdist")
def test_project_name_and_version_from_filename_invalid():
# type: () -> None
with pytest.raises(MetadataError):
ProjectNameAndVersion.from_filename("unknown_distribution.format")
def test_project_name_and_version_from_metadata(
pygoogleearth_zip_sdist, # type: str
pip_tgz_sdist, # type: str
pip_wheel, # type: str
pip_distribution, # type: Distribution
):
# type: (...) -> None
assert PYGOOGLEEARTH_PROJECT_NAME_AND_VERSION == project_name_and_version(
pygoogleearth_zip_sdist, fallback_to_filename=False
)
assert PIP_PROJECT_NAME_AND_VERSION == project_name_and_version(
pip_tgz_sdist, fallback_to_filename=False
)
assert PIP_PROJECT_NAME_AND_VERSION == project_name_and_version(
pip_wheel, fallback_to_filename=False
)
assert PIP_PROJECT_NAME_AND_VERSION == project_name_and_version(
pip_distribution, fallback_to_filename=False
)
def test_project_name_and_version_fallback(tmpdir):
# type: (Any) -> None
def tmp_path(relpath):
# type: (str) -> str
return os.path.join(str(tmpdir), relpath)
expected_metadata_project_name_and_version = ProjectNameAndVersion("foo", "1.2.3")
pkg_info_src = tmp_path("PKG-INFO")
with open(pkg_info_src, "w") as fp:
fp.write("Name: {}\n".format(expected_metadata_project_name_and_version.project_name))
fp.write("Version: {}\n".format(expected_metadata_project_name_and_version.version))
sdist_path = tmp_path("bar-baz-4.5.6.tar.gz")
with tarfile.open(sdist_path, mode="w:gz") as tf:
# N.B.: Valid PKG-INFO at an invalid location.
tf.add(pkg_info_src, arcname="PKG-INFO")
with ENV.patch(PEX_EMIT_WARNINGS="True"), warnings.catch_warnings(record=True) as events:
assert project_name_and_version(sdist_path, fallback_to_filename=False) is None
assert 1 == len(events)
warning = events[0]
assert PEXWarning == warning.category
assert "bar-baz-4.5.6/PKG-INFO" in str(warning.message)
assert ProjectNameAndVersion("bar-baz", "4.5.6") == project_name_and_version(
sdist_path, fallback_to_filename=True
)
name_and_version = "eggs-7.8.9"
pkf_info_path = "{}/PKG-INFO".format(name_and_version)
def write_sdist_tgz(extension):
sdist_path = tmp_path("{}.{}".format(name_and_version, extension))
with tarfile.open(sdist_path, mode="w:gz") as tf:
tf.add(pkg_info_src, arcname=pkf_info_path)
return sdist_path
assert expected_metadata_project_name_and_version == project_name_and_version(
write_sdist_tgz("tar.gz"), fallback_to_filename=False
)
assert expected_metadata_project_name_and_version == project_name_and_version(
write_sdist_tgz("sdist"), fallback_to_filename=False
)
zip_sdist_path = tmp_path("{}.zip".format(name_and_version))
with open_zip(zip_sdist_path, mode="w") as zf:
zf.write(pkg_info_src, arcname=pkf_info_path)
assert expected_metadata_project_name_and_version == project_name_and_version(
zip_sdist_path, fallback_to_filename=False
)
def test_requires_python(
pip_tgz_sdist, # type: str
pip_wheel, # type: str
pip_distribution, # type: Distribution
):
# type: (...) -> None
expected_requires_python = SpecifierSet(">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*")
assert expected_requires_python == requires_python(pip_tgz_sdist)
assert expected_requires_python == requires_python(pip_wheel)
assert expected_requires_python == requires_python(pip_distribution)
def test_requires_python_none(pygoogleearth_zip_sdist):
# type: (str) -> None
assert requires_python(pygoogleearth_zip_sdist) is None
with example_distribution("aws_cfn_bootstrap-1.4-py2-none-any.whl") as (wheel_path, dist):
assert requires_python(wheel_path) is None
assert requires_python(dist) is None
def test_requires_dists():
# type: () -> None
with example_distribution("aws_cfn_bootstrap-1.4-py2-none-any.whl") as (wheel_path, dist):
expected_requirements = [
Requirement.parse(req)
for req in ("python-daemon>=1.5.2,<2.0", "pystache>=0.4.0", "setuptools")
]
assert expected_requirements == list(requires_dists(wheel_path))
assert expected_requirements == list(requires_dists(dist))
def test_requires_dists_none(pygoogleearth_zip_sdist):
# type: (str) -> None
assert [] == list(requires_dists(pygoogleearth_zip_sdist))
with example_distribution("MarkupSafe-1.0-cp27-cp27mu-linux_x86_64.whl") as (wheel_path, dist):
assert [] == list(requires_dists(wheel_path))
assert [] == list(requires_dists(dist))
# This tests a strange case detailed here:
# https://github.com/pantsbuild/pex/issues/1201#issuecomment-791715585
with downloaded_sdist("et-xmlfile==1.0.1") as sdist, warnings.catch_warnings(
record=True
) as events:
assert [] == list(requires_dists(sdist))
assert len(events) == 1
warning = events[0]
assert PEXWarning == warning.category
assert (
dedent(
"""\
Ignoring 1 `Requires` field in {sdist} metadata:
1.) Requires: python (>=2.6.0)
You may have issues using the 'et_xmlfile' distribution as a result.
More information on this workaround can be found here:
https://github.com/pantsbuild/pex/issues/1201#issuecomment-791715585
"""
).format(sdist=sdist)
== str(warning.message)
)
| apache-2.0 |
minrk/sympy | sympy/polys/tests/test_densebasic.py | 1 | 20716 | """Tests for dense recursive polynomials' basic tools. """
from sympy.polys.densebasic import (
dup_LC, dmp_LC,
dup_TC, dmp_TC,
dmp_ground_LC, dmp_ground_TC,
dmp_true_LT,
dup_degree, dmp_degree,
dmp_degree_in, dmp_degree_list,
dup_strip, dmp_strip,
dmp_validate,
dup_reverse,
dup_copy, dmp_copy,
dup_normal, dmp_normal,
dup_convert, dmp_convert,
dup_from_sympy, dmp_from_sympy,
dup_nth, dmp_nth, dmp_ground_nth,
dmp_zero_p, dmp_zero,
dmp_one_p, dmp_one,
dmp_ground_p, dmp_ground,
dmp_negative_p, dmp_positive_p,
dmp_zeros, dmp_grounds,
dup_from_dict, dup_from_raw_dict,
dup_to_dict, dup_to_raw_dict,
dmp_from_dict, dmp_to_dict,
dmp_swap, dmp_permute,
dmp_nest, dmp_raise,
dup_deflate, dmp_deflate,
dup_multi_deflate, dmp_multi_deflate,
dup_inflate, dmp_inflate,
dmp_exclude, dmp_include,
dmp_inject, dmp_eject,
dup_terms_gcd, dmp_terms_gcd,
dmp_list_terms, dmp_apply_pairs,
dup_slice, dmp_slice, dmp_slice_in,
dup_random,
)
from sympy.polys.specialpolys import (
f_0, f_1, f_2, f_3, f_4, f_5, f_6
)
from sympy.polys.polyclasses import DMP
from sympy.polys.domains import ZZ, QQ
from sympy.core.singleton import S
from sympy.utilities.pytest import raises
from sympy.utilities import all
def test_dup_LC():
assert dup_LC([], ZZ) == 0
assert dup_LC([2,3,4,5], ZZ) == 2
def test_dup_TC():
assert dup_TC([], ZZ) == 0
assert dup_TC([2,3,4,5], ZZ) == 5
def test_dmp_LC():
assert dmp_LC([[]], ZZ) == []
assert dmp_LC([[2,3,4],[5]], ZZ) == [2,3,4]
assert dmp_LC([[[]]], ZZ) == [[]]
assert dmp_LC([[[2],[3,4]],[[5]]], ZZ) == [[2],[3,4]]
def test_dmp_TC():
assert dmp_TC([[]], ZZ) == []
assert dmp_TC([[2,3,4],[5]], ZZ) == [5]
assert dmp_TC([[[]]], ZZ) == [[]]
assert dmp_TC([[[2],[3,4]],[[5]]], ZZ) == [[5]]
def test_dmp_ground_LC():
assert dmp_ground_LC([[]], 1, ZZ) == 0
assert dmp_ground_LC([[2,3,4],[5]], 1, ZZ) == 2
assert dmp_ground_LC([[[]]], 2, ZZ) == 0
assert dmp_ground_LC([[[2],[3,4]],[[5]]], 2, ZZ) == 2
def test_dmp_ground_TC():
assert dmp_ground_TC([[]], 1, ZZ) == 0
assert dmp_ground_TC([[2,3,4],[5]], 1, ZZ) == 5
assert dmp_ground_TC([[[]]], 2, ZZ) == 0
assert dmp_ground_TC([[[2],[3,4]],[[5]]], 2, ZZ) == 5
def test_dmp_true_LT():
assert dmp_true_LT([[]], 1, ZZ) == ((0, 0), 0)
assert dmp_true_LT([[7]], 1, ZZ) == ((0, 0), 7)
assert dmp_true_LT([[1,0]], 1, ZZ) == ((0, 1), 1)
assert dmp_true_LT([[1],[]], 1, ZZ) == ((1, 0), 1)
assert dmp_true_LT([[1,0],[]], 1, ZZ) == ((1, 1), 1)
def test_dup_degree():
assert dup_degree([]) == -1
assert dup_degree([1]) == 0
assert dup_degree([1,0]) == 1
assert dup_degree([1,0,0,0,1]) == 4
def test_dmp_degree():
assert dmp_degree([[]], 1) == -1
assert dmp_degree([[[]]], 2) == -1
assert dmp_degree([[1]], 1) == 0
assert dmp_degree([[2],[1]], 1) == 1
def test_dmp_degree_in():
assert dmp_degree_in([[[]]], 0, 2) == -1
assert dmp_degree_in([[[]]], 1, 2) == -1
assert dmp_degree_in([[[]]], 2, 2) == -1
assert dmp_degree_in([[[1]]], 0, 2) == 0
assert dmp_degree_in([[[1]]], 1, 2) == 0
assert dmp_degree_in([[[1]]], 2, 2) == 0
assert dmp_degree_in(f_4, 0, 2) == 9
assert dmp_degree_in(f_4, 1, 2) == 12
assert dmp_degree_in(f_4, 2, 2) == 8
assert dmp_degree_in(f_6, 0, 2) == 4
assert dmp_degree_in(f_6, 1, 2) == 4
assert dmp_degree_in(f_6, 2, 2) == 6
assert dmp_degree_in(f_6, 3, 3) == 3
raises(IndexError, "dmp_degree_in([[1]], -5, 1)")
def test_dmp_degree_list():
assert dmp_degree_list([[[[ ]]]], 3) == (-1,-1,-1,-1)
assert dmp_degree_list([[[[1]]]], 3) == ( 0, 0, 0, 0)
assert dmp_degree_list(f_0, 2) == (2, 2, 2)
assert dmp_degree_list(f_1, 2) == (3, 3, 3)
assert dmp_degree_list(f_2, 2) == (5, 3, 3)
assert dmp_degree_list(f_3, 2) == (5, 4, 7)
assert dmp_degree_list(f_4, 2) == (9, 12, 8)
assert dmp_degree_list(f_5, 2) == (3, 3, 3)
assert dmp_degree_list(f_6, 3) == (4, 4, 6, 3)
def test_dup_strip():
assert dup_strip([]) == []
assert dup_strip([0]) == []
assert dup_strip([0,0,0]) == []
assert dup_strip([1]) == [1]
assert dup_strip([0,1]) == [1]
assert dup_strip([0,0,0,1]) == [1]
assert dup_strip([1,2,0]) == [1,2,0]
assert dup_strip([0,1,2,0]) == [1,2,0]
assert dup_strip([0,0,0,1,2,0]) == [1,2,0]
def test_dmp_strip():
assert dmp_strip([0,1,0], 0) == [1,0]
assert dmp_strip([[]], 1) == [[]]
assert dmp_strip([[], []], 1) == [[]]
assert dmp_strip([[], [], []], 1) == [[]]
assert dmp_strip([[[]]], 2) == [[[]]]
assert dmp_strip([[[]], [[]]], 2) == [[[]]]
assert dmp_strip([[[]], [[]], [[]]], 2) == [[[]]]
assert dmp_strip([[[1]]], 2) == [[[1]]]
assert dmp_strip([[[]], [[1]]], 2) == [[[1]]]
assert dmp_strip([[[]], [[1]], [[]]], 2) == [[[1]], [[]]]
def test_dmp_validate():
assert dmp_validate([]) == ([], 0)
assert dmp_validate([0,0,0,1,0]) == ([1,0], 0)
assert dmp_validate([[[]]]) == ([[[]]], 2)
assert dmp_validate([[0],[],[0],[1],[0]]) == ([[1],[]], 1)
raises(ValueError, 'dmp_validate([[0],0,[0],[1],[0]])')
def test_dup_reverse():
assert dup_reverse([1,2,0,3]) == [3,0,2,1]
assert dup_reverse([1,2,3,0]) == [3,2,1]
def test_dup_copy():
f = [ZZ(1),ZZ(0),ZZ(2)]
g = dup_copy(f)
g[0], g[2] = ZZ(7), ZZ(0)
assert f != g
def test_dmp_copy():
f = [[ZZ(1)],[ZZ(2),ZZ(0)]]
g = dmp_copy(f, 1)
g[0][0], g[1][1] = ZZ(7), ZZ(1)
assert f != g
def test_dup_normal():
assert dup_normal([0,0,2,1,0,11,0], ZZ) == \
[ZZ(2),ZZ(1),ZZ(0),ZZ(11),ZZ(0)]
def test_dmp_normal():
assert dmp_normal([[0],[],[0,2,1],[0],[11],[]], 1, ZZ) == \
[[ZZ(2),ZZ(1)],[],[ZZ(11)],[]]
def test_dup_convert():
K0, K1 = ZZ['x'], ZZ
f = [DMP([1], ZZ),DMP([2], ZZ),DMP([], ZZ),DMP([3], ZZ)]
assert dup_convert(f, K0, K1) == \
[ZZ(1),ZZ(2),ZZ(0),ZZ(3)]
def test_dmp_convert():
K0, K1 = ZZ['x'], ZZ
f = [[DMP([1], ZZ)],[DMP([2], ZZ)],[],[DMP([3], ZZ)]]
assert dmp_convert(f, 1, K0, K1) == \
[[ZZ(1)],[ZZ(2)],[],[ZZ(3)]]
def test_dup_from_sympy():
assert dup_from_sympy([S(1), S(2)], ZZ) == \
[ZZ(1), ZZ(2)]
assert dup_from_sympy([S(1)/2, S(3)], QQ) == \
[QQ(1, 2), QQ(3, 1)]
def test_dmp_from_sympy():
assert dmp_from_sympy([[S(1), S(2)], [S(0)]], 1, ZZ) == \
[[ZZ(1), ZZ(2)], []]
assert dmp_from_sympy([[S(1)/2, S(2)]], 1, QQ) == \
[[QQ(1, 2), QQ(2, 1)]]
def test_dup_nth():
assert dup_nth([1,2,3], 0, ZZ) == 3
assert dup_nth([1,2,3], 1, ZZ) == 2
assert dup_nth([1,2,3], 2, ZZ) == 1
assert dup_nth([1,2,3], 9, ZZ) == 0
raises(IndexError, 'dup_nth([3,4,5], -1, ZZ)')
def test_dmp_nth():
assert dmp_nth([[1],[2],[3]], 0, 1, ZZ) == [3]
assert dmp_nth([[1],[2],[3]], 1, 1, ZZ) == [2]
assert dmp_nth([[1],[2],[3]], 2, 1, ZZ) == [1]
assert dmp_nth([[1],[2],[3]], 9, 1, ZZ) == []
raises(IndexError, 'dmp_nth([[3],[4],[5]], -1, 1, ZZ)')
def test_dmp_ground_nth():
assert dmp_ground_nth([[1],[2],[3]], (0,0), 1, ZZ) == 3
assert dmp_ground_nth([[1],[2],[3]], (1,0), 1, ZZ) == 2
assert dmp_ground_nth([[1],[2],[3]], (2,0), 1, ZZ) == 1
assert dmp_ground_nth([[1],[2],[3]], (2,1), 1, ZZ) == 0
assert dmp_ground_nth([[1],[2],[3]], (3,0), 1, ZZ) == 0
raises(IndexError, 'dmp_ground_nth([[3],[4],[5]], (2,-1), 1, ZZ)')
def test_dmp_zero_p():
assert dmp_zero_p([], 0) == True
assert dmp_zero_p([[]], 1) == True
assert dmp_zero_p([[[]]], 2) == True
assert dmp_zero_p([[[1]]], 2) == False
def test_dmp_zero():
assert dmp_zero(0) == []
assert dmp_zero(2) == [[[]]]
def test_dmp_one_p():
assert dmp_one_p([1], 0, ZZ) == True
assert dmp_one_p([[1]], 1, ZZ) == True
assert dmp_one_p([[[1]]], 2, ZZ) == True
assert dmp_one_p([[[12]]], 2, ZZ) == False
def test_dmp_one():
assert dmp_one(0, ZZ) == [ZZ(1)]
assert dmp_one(2, ZZ) == [[[ZZ(1)]]]
def test_dmp_ground_p():
assert dmp_ground_p([], 0, 0) == True
assert dmp_ground_p([[]], 0, 1) == True
assert dmp_ground_p([[]], 1, 1) == False
assert dmp_ground_p([[ZZ(1)]], 1, 1) == True
assert dmp_ground_p([[[ZZ(2)]]], 2, 2) == True
assert dmp_ground_p([[[ZZ(2)]]], 3, 2) == False
assert dmp_ground_p([[[ZZ(3)], []]], 3, 2) == False
assert dmp_ground_p([], None, 0) == True
assert dmp_ground_p([[]], None, 1) == True
assert dmp_ground_p([ZZ(1)], None, 0) == True
assert dmp_ground_p([[[ZZ(1)]]], None, 2) == True
assert dmp_ground_p([[[ZZ(3)], []]], None, 2) == False
def test_dmp_ground():
assert dmp_ground(ZZ(0), 2) == [[[]]]
assert dmp_ground(ZZ(7),-1) == ZZ(7)
assert dmp_ground(ZZ(7), 0) == [ZZ(7)]
assert dmp_ground(ZZ(7), 2) == [[[ZZ(7)]]]
def test_dmp_zeros():
assert dmp_zeros(4, 0, ZZ) == [[], [], [], []]
assert dmp_zeros(0, 2, ZZ) == []
assert dmp_zeros(1, 2, ZZ) == [[[[]]]]
assert dmp_zeros(2, 2, ZZ) == [[[[]]], [[[]]]]
assert dmp_zeros(3, 2, ZZ) == [[[[]]], [[[]]], [[[]]]]
assert dmp_zeros(3, -1, ZZ) == [0, 0, 0]
def test_dmp_grounds():
assert dmp_grounds(ZZ(7), 0, 2) == []
assert dmp_grounds(ZZ(7), 1, 2) == [[[[7]]]]
assert dmp_grounds(ZZ(7), 2, 2) == [[[[7]]], [[[7]]]]
assert dmp_grounds(ZZ(7), 3, 2) == [[[[7]]], [[[7]]], [[[7]]]]
assert dmp_grounds(ZZ(7), 3, -1) == [7, 7, 7]
def test_dmp_negative_p():
assert dmp_negative_p([[[]]], 2, ZZ) == False
assert dmp_negative_p([[[1], [2]]], 2, ZZ) == False
assert dmp_negative_p([[[-1], [2]]], 2, ZZ) == True
def test_dmp_positive_p():
assert dmp_positive_p([[[]]], 2, ZZ) == False
assert dmp_positive_p([[[1], [2]]], 2, ZZ) == True
assert dmp_positive_p([[[-1], [2]]], 2, ZZ) == False
def test_dup_from_to_dict():
assert dup_from_raw_dict({}, ZZ) == []
assert dup_from_dict({}, ZZ) == []
assert dup_to_raw_dict([]) == {}
assert dup_to_dict([]) == {}
assert dup_to_raw_dict([], ZZ, zero=True) == {0: ZZ(0)}
assert dup_to_dict([], ZZ, zero=True) == {(0,): ZZ(0)}
f = [3,0,0,2,0,0,0,0,8]
g = {8: 3, 5: 2, 0: 8}
h = {(8,): 3, (5,): 2, (0,): 8}
assert dup_from_raw_dict(g, ZZ) == f
assert dup_from_dict(h, ZZ) == f
assert dup_to_raw_dict(f) == g
assert dup_to_dict(f) == h
K = ZZ['x','y']
f = [K([[3]]),K([[]]),K([[2]]),K([[]]),K([[]]),K([[8]])]
g = {5: K([[3]]), 3: K([[2]]), 0: K([[8]])}
h = {(5,): K([[3]]), (3,): K([[2]]), (0,): K([[8]])}
assert dup_from_raw_dict(g, K) == f
assert dup_from_dict(h, K) == f
assert dup_to_raw_dict(f) == g
assert dup_to_dict(f) == h
def test_dmp_from_to_dict():
assert dmp_from_dict({}, 1, ZZ) == [[]]
assert dmp_to_dict([[]], 1) == {}
assert dmp_to_dict([], 0, ZZ, zero=True) == {(0,): ZZ(0)}
assert dmp_to_dict([[]], 1, ZZ, zero=True) == {(0,0): ZZ(0)}
f = [[3],[],[],[2],[],[],[],[],[8]]
g = {(8,0): 3, (5,0): 2, (0,0): 8}
assert dmp_from_dict(g, 1, ZZ) == f
assert dmp_to_dict(f, 1) == g
def test_dmp_swap():
f = dmp_normal([[1,0,0],[],[1,0],[],[1]], 1, ZZ)
g = dmp_normal([[1,0,0,0,0],[1,0,0],[1]], 1, ZZ)
assert dmp_swap(f, 1, 1, 1, ZZ) == f
assert dmp_swap(f, 0, 1, 1, ZZ) == g
assert dmp_swap(g, 0, 1, 1, ZZ) == f
raises(IndexError, "dmp_swap(f, -1, -7, 1, ZZ)")
def test_dmp_permute():
f = dmp_normal([[1,0,0],[],[1,0],[],[1]], 1, ZZ)
g = dmp_normal([[1,0,0,0,0],[1,0,0],[1]], 1, ZZ)
assert dmp_permute(f, [0, 1], 1, ZZ) == f
assert dmp_permute(g, [0, 1], 1, ZZ) == g
assert dmp_permute(f, [1, 0], 1, ZZ) == g
assert dmp_permute(g, [1, 0], 1, ZZ) == f
def test_dmp_nest():
assert dmp_nest(ZZ(1), 2, ZZ) == [[[1]]]
assert dmp_nest([[1]], 0, ZZ) == [[1]]
assert dmp_nest([[1]], 1, ZZ) == [[[1]]]
assert dmp_nest([[1]], 2, ZZ) == [[[[1]]]]
def test_dmp_raise():
assert dmp_raise([], 2, 0, ZZ) == [[[]]]
assert dmp_raise([[1]], 0, 1, ZZ) == [[1]]
assert dmp_raise([[1,2,3], [], [2,3]], 2, 1, ZZ) == \
[[[[1]],[[2]],[[3]]], [[[]]], [[[2]],[[3]]]]
def test_dup_deflate():
assert dup_deflate([], ZZ) == (1, [])
assert dup_deflate([2], ZZ) == (1, [2])
assert dup_deflate([1,2,3], ZZ) == (1, [1,2,3])
assert dup_deflate([1,0,2,0,3], ZZ) == (2, [1,2,3])
assert dup_deflate(dup_from_raw_dict({7:1,1:1}, ZZ), ZZ) == \
(1, [1, 0, 0, 0, 0, 0, 1, 0])
assert dup_deflate(dup_from_raw_dict({7:1,0:1}, ZZ), ZZ) == \
(7, [1, 1])
assert dup_deflate(dup_from_raw_dict({7:1,3:1}, ZZ), ZZ) == \
(1, [1, 0, 0, 0, 1, 0, 0, 0])
assert dup_deflate(dup_from_raw_dict({7:1,4:1}, ZZ), ZZ) == \
(1, [1, 0, 0, 1, 0, 0, 0, 0])
assert dup_deflate(dup_from_raw_dict({8:1,4:1}, ZZ), ZZ) == \
(4, [1, 1, 0])
assert dup_deflate(dup_from_raw_dict({8:1}, ZZ), ZZ) == \
(8, [1, 0])
assert dup_deflate(dup_from_raw_dict({7:1}, ZZ), ZZ) == \
(7, [1, 0])
assert dup_deflate(dup_from_raw_dict({1:1}, ZZ), ZZ) == \
(1, [1, 0])
def test_dmp_deflate():
assert dmp_deflate([[]], 1, ZZ) == ((1, 1), [[]])
assert dmp_deflate([[2]], 1, ZZ) == ((1, 1), [[2]])
f = [[1, 0, 0], [], [1, 0], [], [1]]
assert dmp_deflate(f, 1, ZZ) == ((2, 1), [[1, 0, 0], [1, 0], [1]])
def test_dup_multi_deflate():
assert dup_multi_deflate(([2],), ZZ) == (1, ([2],))
assert dup_multi_deflate(([], []), ZZ) == (1, ([], []))
assert dup_multi_deflate(([1,2,3],), ZZ) == (1, ([1,2,3],))
assert dup_multi_deflate(([1,0,2,0,3],), ZZ) == (2, ([1,2,3],))
assert dup_multi_deflate(([1,0,2,0,3], [2,0,0]), ZZ) == \
(2, ([1,2,3], [2,0]))
assert dup_multi_deflate(([1,0,2,0,3], [2,1,0]), ZZ) == \
(1, ([1,0,2,0,3], [2,1,0]))
def test_dmp_multi_deflate():
assert dmp_multi_deflate(([[]],), 1, ZZ) == \
((1, 1), ([[]],))
assert dmp_multi_deflate(([[]], [[]]), 1, ZZ) == \
((1, 1), ([[]], [[]]))
assert dmp_multi_deflate(([[1]], [[]]), 1, ZZ) == \
((1, 1), ([[1]], [[]]))
assert dmp_multi_deflate(([[1]], [[2]]), 1, ZZ) == \
((1, 1), ([[1]], [[2]]))
assert dmp_multi_deflate(([[1]], [[2,0]]), 1, ZZ) == \
((1, 1), ([[1]], [[2, 0]]))
assert dmp_multi_deflate(([[2,0]], [[2,0]]), 1, ZZ) == \
((1, 1), ([[2, 0]], [[2, 0]]))
assert dmp_multi_deflate(([[2]], [[2,0,0]]), 1, ZZ) == ((1, 2), ([[2]], [[2, 0]]))
assert dmp_multi_deflate(([[2,0,0]], [[2,0,0]]), 1, ZZ) == ((1, 2), ([[2, 0]], [[2, 0]]))
assert dmp_multi_deflate(([2,0,0], [1,0,4,0,1]), 0, ZZ) == \
((2,), ([2, 0], [1, 4, 1]))
f = [[1, 0, 0], [], [1, 0], [], [1]]
g = [[1, 0, 1, 0], [], [1]]
assert dmp_multi_deflate((f,), 1, ZZ) == \
((2, 1), ([[1, 0, 0], [1, 0], [1]],))
assert dmp_multi_deflate((f, g), 1, ZZ) == \
((2, 1), ([[1, 0, 0], [1, 0], [1]],
[[1, 0, 1, 0], [1]]))
def test_dup_inflate():
assert dup_inflate([], 17, ZZ) == []
assert dup_inflate([1,2,3], 1, ZZ) == [1,2,3]
assert dup_inflate([1,2,3], 2, ZZ) == [1,0,2,0,3]
assert dup_inflate([1,2,3], 3, ZZ) == [1,0,0,2,0,0,3]
assert dup_inflate([1,2,3], 4, ZZ) == [1,0,0,0,2,0,0,0,3]
raises(IndexError, 'dup_inflate([1,2,3], 0, ZZ)')
def test_dmp_inflate():
assert dmp_inflate([1], (3,), 0, ZZ) == [1]
assert dmp_inflate([[]], (3, 7), 1, ZZ) == [[]]
assert dmp_inflate([[2]], (1, 2), 1, ZZ) == [[2]]
assert dmp_inflate([[2,0]], (1, 1), 1, ZZ) == [[2,0]]
assert dmp_inflate([[2,0]], (1, 2), 1, ZZ) == [[2,0,0]]
assert dmp_inflate([[2,0]], (1, 3), 1, ZZ) == [[2,0,0,0]]
assert dmp_inflate([[1, 0, 0], [1], [1, 0]], (2, 1), 1, ZZ) == \
[[1, 0, 0], [], [1], [], [1, 0]]
raises(IndexError, "dmp_inflate([[]], (-3, 7), 1, ZZ)")
def test_dmp_exclude():
assert dmp_exclude([[[]]], 2, ZZ) == ([], [[[]]], 2)
assert dmp_exclude([[[7]]], 2, ZZ) == ([], [[[7]]], 2)
assert dmp_exclude([1,2,3], 0, ZZ) == ([], [1,2,3], 0)
assert dmp_exclude([[1],[2,3]], 1, ZZ) == ([], [[1],[2,3]], 1)
assert dmp_exclude([[1,2,3]], 1, ZZ) == ([0], [1,2,3], 0)
assert dmp_exclude([[1],[2],[3]], 1, ZZ) == ([1], [1,2,3], 0)
assert dmp_exclude([[[1,2,3]]], 2, ZZ) == ([0,1], [1,2,3], 0)
assert dmp_exclude([[[1]],[[2]],[[3]]], 2, ZZ) == ([1,2], [1,2,3], 0)
def test_dmp_include():
assert dmp_include([1,2,3], [], 0, ZZ) == [1,2,3]
assert dmp_include([1,2,3], [0], 0, ZZ) == [[1,2,3]]
assert dmp_include([1,2,3], [1], 0, ZZ) == [[1],[2],[3]]
assert dmp_include([1,2,3], [0,1], 0, ZZ) == [[[1,2,3]]]
assert dmp_include([1,2,3], [1,2], 0, ZZ) == [[[1]],[[2]],[[3]]]
def test_dmp_inject():
K = ZZ['x','y']
assert dmp_inject([], 0, K) == ([[[]]], 2)
assert dmp_inject([[]], 1, K) == ([[[[]]]], 3)
assert dmp_inject([K([[1]])], 0, K) == ([[[1]]], 2)
assert dmp_inject([[K([[1]])]], 1, K) == ([[[[1]]]], 3)
assert dmp_inject([K([[1]]),K([[2],[3,4]])], 0, K) == ([[[1]],[[2],[3,4]]], 2)
f = [K([[3],[7,0],[5,0,0]]),K([[2],[]]),K([[]]),K([[1,0,0],[11]])]
g = [[[3],[7,0],[5,0,0]],[[2],[]],[[]],[[1,0,0],[11]]]
assert dmp_inject(f, 0, K) == (g, 2)
def test_dmp_eject():
K = ZZ['x','y']
assert dmp_eject([[[]]], 2, K) == []
assert dmp_eject([[[[]]]], 3, K) == [[]]
assert dmp_eject([[[1]]], 2, K) == [K([[1]])]
assert dmp_eject([[[[1]]]], 3, K) == [[K([[1]])]]
assert dmp_eject([[[1]],[[2],[3,4]]], 2, K) == [K([[1]]),K([[2],[3,4]])]
f = [K([[3],[7,0],[5,0,0]]),K([[2],[]]),K([[]]),K([[1,0,0],[11]])]
g = [[[3],[7,0],[5,0,0]],[[2],[]],[[]],[[1,0,0],[11]]]
assert dmp_eject(g, 2, K) == f
def test_dup_terms_gcd():
assert dup_terms_gcd([], ZZ) == (0, [])
assert dup_terms_gcd([1,0,1], ZZ) == (0, [1,0,1])
assert dup_terms_gcd([1,0,1,0], ZZ) == (1, [1,0,1])
def test_dmp_terms_gcd():
assert dmp_terms_gcd([[]], 1, ZZ) == ((0,0), [[]])
assert dmp_terms_gcd([1,0,1,0], 0, ZZ) == ((1,), [1,0,1])
assert dmp_terms_gcd([[1],[],[1],[]], 1, ZZ) == ((1,0), [[1],[],[1]])
assert dmp_terms_gcd([[1,0],[],[1]], 1, ZZ) == ((0,0), [[1,0],[],[1]])
assert dmp_terms_gcd([[1,0],[1,0,0],[],[]], 1, ZZ) == ((2,1), [[1],[1,0]])
def test_dmp_list_terms():
assert dmp_list_terms([[[]]], 2, ZZ) == [((0,0,0), 0)]
assert dmp_list_terms([[[1]]], 2, ZZ) == [((0,0,0), 1)]
assert dmp_list_terms([1,2,4,3,5], 0, ZZ) == \
[((4,), 1), ((3,), 2), ((2,), 4), ((1,), 3), ((0,), 5)]
assert dmp_list_terms([[1],[2,4],[3,5,0]], 1, ZZ) == \
[((2, 0), 1), ((1, 1), 2), ((1, 0), 4), ((0, 2), 3), ((0, 1), 5)]
f = [[2, 0, 0, 0], [1, 0, 0], []]
assert dmp_list_terms(f, 1, ZZ, order='lex') == [((2, 3), 2), ((1, 2), 1)]
assert dmp_list_terms(f, 1, ZZ, order='grlex') == [((2, 3), 2), ((1, 2), 1)]
f = [[2, 0, 0, 0], [1, 0, 0, 0, 0, 0], []]
assert dmp_list_terms(f, 1, ZZ, order='lex') == [((2, 3), 2), ((1, 5), 1)]
assert dmp_list_terms(f, 1, ZZ, order='grlex') == [((1, 5), 1), ((2, 3), 2)]
def test_dmp_apply_pairs():
h = lambda a, b: a*b
assert dmp_apply_pairs([1,2,3], [4,5,6], h, [], 0, ZZ) == [4,10,18]
assert dmp_apply_pairs([2,3], [4,5,6], h, [], 0, ZZ) == [10,18]
assert dmp_apply_pairs([1,2,3], [5,6], h, [], 0, ZZ) == [10,18]
assert dmp_apply_pairs([[1,2],[3]], [[4,5],[6]], h, [], 1, ZZ) == [[4,10],[18]]
assert dmp_apply_pairs([[1,2],[3]], [[4],[5,6]], h, [], 1, ZZ) == [[8],[18]]
assert dmp_apply_pairs([[1],[2,3]], [[4,5],[6]], h, [], 1, ZZ) == [[5],[18]]
def test_dup_slice():
f = [1, 2, 3, 4]
assert dup_slice(f, 0, 0, ZZ) == []
assert dup_slice(f, 0, 1, ZZ) == [4]
assert dup_slice(f, 0, 2, ZZ) == [3,4]
assert dup_slice(f, 0, 3, ZZ) == [2,3,4]
assert dup_slice(f, 0, 4, ZZ) == [1,2,3,4]
assert dup_slice(f, 0, 4, ZZ) == f
assert dup_slice(f, 0, 9, ZZ) == f
assert dup_slice(f, 1, 0, ZZ) == []
assert dup_slice(f, 1, 1, ZZ) == []
assert dup_slice(f, 1, 2, ZZ) == [3,0]
assert dup_slice(f, 1, 3, ZZ) == [2,3,0]
assert dup_slice(f, 1, 4, ZZ) == [1,2,3,0]
def test_dup_random():
f = dup_random(0, -10, 10, ZZ)
assert dup_degree(f) == 0
assert all([ -10 <= c <= 10 for c in f ])
f = dup_random(1, -20, 20, ZZ)
assert dup_degree(f) == 1
assert all([ -20 <= c <= 20 for c in f ])
f = dup_random(2, -30, 30, ZZ)
assert dup_degree(f) == 2
assert all([ -30 <= c <= 30 for c in f ])
f = dup_random(3, -40, 40, ZZ)
assert dup_degree(f) == 3
assert all([ -40 <= c <= 40 for c in f ])
| bsd-3-clause |
zyga/textland | demo6.py | 1 | 2579 | #!/usr/bin/env python3
# This file is part of textland.
#
# Copyright 2014 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
#
# Textland is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3,
# as published by the Free Software Foundation.
#
# Textland is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Textland. If not, see <http://www.gnu.org/licenses/>.
from textland import DrawingContext
from textland import EVENT_KEYBOARD
from textland import EVENT_RESIZE
from textland import Event
from textland import IApplication
from textland import Size
from textland import TextImage
from textland import get_display
class DemoApp(IApplication):
def __init__(self):
self.image = TextImage(Size(0, 0))
def consume_event(self, event: Event):
if event.kind == EVENT_RESIZE:
self.image = TextImage(event.data) # data is the new size
elif event.kind == EVENT_KEYBOARD and event.data.key == 'q':
raise StopIteration
self.repaint(event)
return self.image
def repaint(self, event: Event) -> None:
ctx = DrawingContext(self.image)
if self.image.size.width < 65 or self.image.size.height < 18:
self._paint_resize_msg(ctx)
else:
self._paint_color_table(ctx)
def _paint_color_table(self, ctx: DrawingContext) -> None:
CELL_WIDTH = 4
NUM_COLORS = 16
for fg in range(NUM_COLORS):
for bg in range(NUM_COLORS):
ctx.attributes.reset()
ctx.border(
0, self.image.size.width - (NUM_COLORS * CELL_WIDTH) - 1,
0, self.image.size.height - NUM_COLORS - 2)
ctx.move_to(1 + fg * CELL_WIDTH, 1 + bg)
ctx.attributes.fg = fg
ctx.attributes.bg = bg
ctx.print("{:X}+{:X}".format(fg, bg))
def _paint_resize_msg(self, ctx: DrawingContext) -> None:
text = "Please enlarge this window"
ctx.move_to(
(self.image.size.width - len(text)) // 2,
self.image.size.height // 2)
ctx.print(text)
def main():
display = get_display()
display.run(DemoApp())
if __name__ == "__main__":
main()
| gpl-3.0 |
mancoast/CPythonPyc_test | fail/312_test_macpath.py | 2 | 6054 | import macpath
from test import support
import unittest
class MacPathTestCase(unittest.TestCase):
def test_abspath(self):
self.assertEqual(macpath.abspath("xx:yy"), "xx:yy")
def test_isabs(self):
isabs = macpath.isabs
self.assertTrue(isabs("xx:yy"))
self.assertTrue(isabs("xx:yy:"))
self.assertTrue(isabs("xx:"))
self.assertFalse(isabs("foo"))
self.assertFalse(isabs(":foo"))
self.assertFalse(isabs(":foo:bar"))
self.assertFalse(isabs(":foo:bar:"))
self.assertTrue(isabs(b"xx:yy"))
self.assertTrue(isabs(b"xx:yy:"))
self.assertTrue(isabs(b"xx:"))
self.assertFalse(isabs(b"foo"))
self.assertFalse(isabs(b":foo"))
self.assertFalse(isabs(b":foo:bar"))
self.assertFalse(isabs(b":foo:bar:"))
def test_commonprefix(self):
commonprefix = macpath.commonprefix
self.assertTrue(commonprefix(["home:swenson:spam", "home:swen:spam"])
== "home:swen")
self.assertTrue(commonprefix([":home:swen:spam", ":home:swen:eggs"])
== ":home:swen:")
self.assertTrue(commonprefix([":home:swen:spam", ":home:swen:spam"])
== ":home:swen:spam")
self.assertTrue(commonprefix([b"home:swenson:spam", b"home:swen:spam"])
== b"home:swen")
self.assertTrue(commonprefix([b":home:swen:spam", b":home:swen:eggs"])
== b":home:swen:")
self.assertTrue(commonprefix([b":home:swen:spam", b":home:swen:spam"])
== b":home:swen:spam")
def test_split(self):
split = macpath.split
self.assertEquals(split("foo:bar"),
('foo:', 'bar'))
self.assertEquals(split("conky:mountpoint:foo:bar"),
('conky:mountpoint:foo', 'bar'))
self.assertEquals(split(":"), ('', ''))
self.assertEquals(split(":conky:mountpoint:"),
(':conky:mountpoint', ''))
self.assertEquals(split(b"foo:bar"),
(b'foo:', b'bar'))
self.assertEquals(split(b"conky:mountpoint:foo:bar"),
(b'conky:mountpoint:foo', b'bar'))
self.assertEquals(split(b":"), (b'', b''))
self.assertEquals(split(b":conky:mountpoint:"),
(b':conky:mountpoint', b''))
def test_join(self):
join = macpath.join
self.assertEquals(join('a', 'b'), ':a:b')
self.assertEquals(join('', 'a:b'), 'a:b')
self.assertEquals(join('a:b', 'c'), 'a:b:c')
self.assertEquals(join('a:b', ':c'), 'a:b:c')
self.assertEquals(join('a', ':b', ':c'), ':a:b:c')
self.assertEquals(join(b'a', b'b'), b':a:b')
self.assertEquals(join(b'', b'a:b'), b'a:b')
self.assertEquals(join(b'a:b', b'c'), b'a:b:c')
self.assertEquals(join(b'a:b', b':c'), b'a:b:c')
self.assertEquals(join(b'a', b':b', b':c'), b':a:b:c')
def test_splitdrive(self):
splitdrive = macpath.splitdrive
self.assertEquals(splitdrive("foo:bar"), ('', 'foo:bar'))
self.assertEquals(splitdrive(":foo:bar"), ('', ':foo:bar'))
self.assertEquals(splitdrive(b"foo:bar"), (b'', b'foo:bar'))
self.assertEquals(splitdrive(b":foo:bar"), (b'', b':foo:bar'))
def test_splitext(self):
splitext = macpath.splitext
self.assertEquals(splitext(":foo.ext"), (':foo', '.ext'))
self.assertEquals(splitext("foo:foo.ext"), ('foo:foo', '.ext'))
self.assertEquals(splitext(".ext"), ('.ext', ''))
self.assertEquals(splitext("foo.ext:foo"), ('foo.ext:foo', ''))
self.assertEquals(splitext(":foo.ext:"), (':foo.ext:', ''))
self.assertEquals(splitext(""), ('', ''))
self.assertEquals(splitext("foo.bar.ext"), ('foo.bar', '.ext'))
self.assertEquals(splitext(b":foo.ext"), (b':foo', b'.ext'))
self.assertEquals(splitext(b"foo:foo.ext"), (b'foo:foo', b'.ext'))
self.assertEquals(splitext(b".ext"), (b'.ext', b''))
self.assertEquals(splitext(b"foo.ext:foo"), (b'foo.ext:foo', b''))
self.assertEquals(splitext(b":foo.ext:"), (b':foo.ext:', b''))
self.assertEquals(splitext(b""), (b'', b''))
self.assertEquals(splitext(b"foo.bar.ext"), (b'foo.bar', b'.ext'))
def test_ismount(self):
ismount = macpath.ismount
self.assertEquals(ismount("a:"), True)
self.assertEquals(ismount("a:b"), False)
self.assertEquals(ismount("a:b:"), True)
self.assertEquals(ismount(""), False)
self.assertEquals(ismount(":"), False)
self.assertEquals(ismount(b"a:"), True)
self.assertEquals(ismount(b"a:b"), False)
self.assertEquals(ismount(b"a:b:"), True)
self.assertEquals(ismount(b""), False)
self.assertEquals(ismount(b":"), False)
def test_normpath(self):
normpath = macpath.normpath
self.assertEqual(normpath("a:b"), "a:b")
self.assertEqual(normpath("a"), ":a")
self.assertEqual(normpath("a:b::c"), "a:c")
self.assertEqual(normpath("a:b:c:::d"), "a:d")
self.assertRaises(macpath.norm_error, normpath, "a::b")
self.assertRaises(macpath.norm_error, normpath, "a:b:::c")
self.assertEqual(normpath(":"), ":")
self.assertEqual(normpath("a:"), "a:")
self.assertEqual(normpath("a:b:"), "a:b")
self.assertEqual(normpath(b"a:b"), b"a:b")
self.assertEqual(normpath(b"a"), b":a")
self.assertEqual(normpath(b"a:b::c"), b"a:c")
self.assertEqual(normpath(b"a:b:c:::d"), b"a:d")
self.assertRaises(macpath.norm_error, normpath, b"a::b")
self.assertRaises(macpath.norm_error, normpath, b"a:b:::c")
self.assertEqual(normpath(b":"), b":")
self.assertEqual(normpath(b"a:"), b"a:")
self.assertEqual(normpath(b"a:b:"), b"a:b")
def test_main():
support.run_unittest(MacPathTestCase)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
EmadHelmi/chipsec_gui | chipsec/utilcmd/smbus_cmd.py | 3 | 3071 | #!/usr/local/bin/python
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2015, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#chipsec@intel.com
#
__version__ = '1.0'
import time
from chipsec.command import BaseCommand
from chipsec.logger import print_buffer
from chipsec.hal.smbus import *
class SMBusCommand(BaseCommand):
"""
>>> chipsec_util smbus read <device_addr> <start_offset> [size]
>>> chipsec_util smbus write <device_addr> <offset> <byte_val>
Examples:
>>> chipsec_util smbus read 0xA0 0x0 0x100
"""
def requires_driver(self):
# No driver required when printing the util documentation
if len(self.argv) < 3:
return False
return True
def run(self):
if len(self.argv) < 3:
print SMBusCommand.__doc__
return
try:
_smbus = SMBus( self.cs )
except BaseException, msg:
print msg
return
op = self.argv[2]
t = time.time()
if not _smbus.is_SMBus_supported():
self.logger.log( "[CHIPSEC] SMBus controller is not supported" )
return
_smbus.display_SMBus_info()
if ( 'read' == op ):
dev_addr = int(self.argv[3],16)
start_off = int(self.argv[4],16)
if len(self.argv) > 5:
size = int(self.argv[5],16)
buf = _smbus.read_range( dev_addr, start_off, size )
self.logger.log( "[CHIPSEC] SMBus read: device 0x%X offset 0x%X size 0x%X" % (dev_addr, start_off, size) )
print_buffer( buf )
else:
val = _smbus.read_byte( dev_addr, start_off )
self.logger.log( "[CHIPSEC] SMBus read: device 0x%X offset 0x%X = 0x%X" % (dev_addr, start_off, val) )
elif ( 'write' == op ):
dev_addr = int(self.argv[3],16)
off = int(self.argv[4],16)
val = int(self.argv[5],16)
self.logger.log( "[CHIPSEC] SMBus write: device 0x%X offset 0x%X = 0x%X" % (dev_addr, off, val) )
_smbus.write_byte( dev_addr, off, val )
else:
self.logger.error( "unknown command-line option '%.32s'" % op )
print SMBusCommand.__doc__
return
self.logger.log( "[CHIPSEC] (smbus) time elapsed %.3f" % (time.time()-t) )
commands = { 'smbus': SMBusCommand }
| gpl-2.0 |
hothHowler/pymc3 | pymc3/tests/test_ndarray_backend.py | 13 | 2679 | import unittest
import numpy as np
import numpy.testing as npt
from pymc3.tests import backend_fixtures as bf
from pymc3.backends import base, ndarray
class TestNDArray0dSampling(bf.SamplingTestCase):
backend = ndarray.NDArray
name = None
shape = ()
class TestNDArray1dSampling(bf.SamplingTestCase):
backend = ndarray.NDArray
name = None
shape = 2
class TestNDArray2dSampling(bf.SamplingTestCase):
backend = ndarray.NDArray
name = None
shape = (2, 3)
class TestNDArray0dSelection(bf.SelectionTestCase):
backend = ndarray.NDArray
name = None
shape = ()
class TestNDArray1dSelection(bf.SelectionTestCase):
backend = ndarray.NDArray
name = None
shape = 2
class TestNDArray2dSelection(bf.SelectionTestCase):
backend = ndarray.NDArray
name = None
shape = (2, 3)
class TestMultiTrace(bf.ModelBackendSetupTestCase):
name = None
backend = ndarray.NDArray
shape = ()
def setUp(self):
super(TestMultiTrace, self).setUp()
self.strace0 = self.strace
super(TestMultiTrace, self).setUp()
self.strace1 = self.strace
def test_multitrace_nonunique(self):
self.assertRaises(ValueError,
base.MultiTrace, [self.strace0, self.strace1])
def test_merge_traces_nonunique(self):
mtrace0 = base.MultiTrace([self.strace0])
mtrace1 = base.MultiTrace([self.strace1])
self.assertRaises(ValueError,
base.merge_traces, [mtrace0, mtrace1])
class TestSqueezeCat(unittest.TestCase):
def setUp(self):
self.x = np.arange(10)
self.y = np.arange(10, 20)
def test_combine_false_squeeze_false(self):
expected = [self.x, self.y]
result = base._squeeze_cat([self.x, self.y], False, False)
npt.assert_equal(result, expected)
def test_combine_true_squeeze_false(self):
expected = [np.concatenate([self.x, self.y])]
result = base._squeeze_cat([self.x, self.y], True, False)
npt.assert_equal(result, expected)
def test_combine_false_squeeze_true_more_than_one_item(self):
expected = [self.x, self.y]
result = base._squeeze_cat([self.x, self.y], False, True)
npt.assert_equal(result, expected)
def test_combine_false_squeeze_true_one_item(self):
expected = self.x
result = base._squeeze_cat([self.x], False, True)
npt.assert_equal(result, expected)
def test_combine_true_squeeze_true(self):
expected = np.concatenate([self.x, self.y])
result = base._squeeze_cat([self.x, self.y], True, True)
npt.assert_equal(result, expected)
| apache-2.0 |
GRArmstrong/invenio-inspire-ops | modules/bibrecord/lib/bibrecord.py | 1 | 70305 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibRecord - XML MARC processing library for Invenio.
For API, see create_record(), record_get_field_instances() and friends
in the source code of this file in the section entitled INTERFACE.
Note: Does not access the database, the input is MARCXML only."""
### IMPORT INTERESTING MODULES AND XML PARSERS
import re
import sys
from cStringIO import StringIO
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from invenio.bibrecord_config import CFG_MARC21_DTD, \
CFG_BIBRECORD_WARNING_MSGS, CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL, \
CFG_BIBRECORD_DEFAULT_CORRECT, CFG_BIBRECORD_PARSERS_AVAILABLE, \
InvenioBibRecordParserError, InvenioBibRecordFieldError
from invenio.config import CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG
from invenio.textutils import encode_for_xml
# Some values used for the RXP parsing.
TAG, ATTRS, CHILDREN = 0, 1, 2
# Find out about the best usable parser:
AVAILABLE_PARSERS = []
# Do we remove singletons (empty tags)?
# NOTE: this is currently set to True as there are some external workflow
# exploiting singletons, e.g. bibupload -c used to delete fields, and
# bibdocfile --fix-marc called on a record where the latest document
# has been deleted.
CFG_BIBRECORD_KEEP_SINGLETONS = True
try:
import pyRXP
if 'pyrxp' in CFG_BIBRECORD_PARSERS_AVAILABLE:
AVAILABLE_PARSERS.append('pyrxp')
except ImportError:
pass
try:
from lxml import etree
if 'lxml' in CFG_BIBRECORD_PARSERS_AVAILABLE:
AVAILABLE_PARSERS.append('lxml')
except ImportError:
pass
try:
import Ft.Xml.Domlette
if '4suite' in CFG_BIBRECORD_PARSERS_AVAILABLE:
AVAILABLE_PARSERS.append('4suite')
except ImportError:
pass
except Exception, err:
from warnings import warn
warn("Error when importing 4suite: %s" % err)
pass
try:
import xml.dom.minidom
import xml.parsers.expat
if 'minidom' in CFG_BIBRECORD_PARSERS_AVAILABLE:
AVAILABLE_PARSERS.append('minidom')
except ImportError:
pass
### INTERFACE / VISIBLE FUNCTIONS
def create_field(subfields=None, ind1=' ', ind2=' ', controlfield_value='',
global_position=-1):
"""
Returns a field created with the provided elements. Global position is
set arbitrary to -1."""
if subfields is None:
subfields = []
ind1, ind2 = _wash_indicators(ind1, ind2)
field = (subfields, ind1, ind2, controlfield_value, global_position)
_check_field_validity(field)
return field
def create_records(marcxml, verbose=CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL,
correct=CFG_BIBRECORD_DEFAULT_CORRECT, parser='',
keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
"""Creates a list of records from the marcxml description. Returns a
list of objects initiated by the function create_record(). Please
see that function's docstring."""
# Use the DOTALL flag to include newlines.
regex = re.compile('<record.*?>.*?</record>', re.DOTALL)
record_xmls = regex.findall(marcxml)
return [create_record(record_xml, verbose=verbose, correct=correct,
parser=parser, keep_singletons=keep_singletons) for record_xml in record_xmls]
def create_record(marcxml, verbose=CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL,
correct=CFG_BIBRECORD_DEFAULT_CORRECT, parser='',
sort_fields_by_indicators=False,
keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
"""Creates a record object from the marcxml description.
Uses the best parser available in CFG_BIBRECORD_PARSERS_AVAILABLE or
the parser specified.
The returned object is a tuple (record, status_code, list_of_errors),
where status_code is 0 when there are errors, 1 when no errors.
The return record structure is as follows:
Record := {tag : [Field]}
Field := (Subfields, ind1, ind2, value)
Subfields := [(code, value)]
For example:
______
|record|
------
__________________________|_______________________________________
|record['001'] |record['909'] |record['520'] |
| | | |
[list of fields] [list of fields] [list of fields] ...
| ______|______________ |
|[0] |[0] |[1] | |[0]
___|_____ _____|___ ___|_____ ... ____|____
|Field 001| |Field 909| |Field 909| |Field 520|
--------- --------- --------- ---------
| _______________|_________________ | |
... |[0] |[1] |[2] | ... ...
| | | |
[list of subfields] 'C' '4'
___|__________________________________________
| | |
('a', 'value') ('b', 'value for subfield b') ('a', 'value for another a')
@param marcxml: an XML string representation of the record to create
@param verbose: the level of verbosity: 0 (silent), 1-2 (warnings),
3(strict:stop when errors)
@param correct: 1 to enable correction of marcxml syntax. Else 0.
@return: a tuple (record, status_code, list_of_errors), where status
code is 0 where there are errors, 1 when no errors"""
# Select the appropriate parser.
parser = _select_parser(parser)
try:
if parser == 'pyrxp':
rec = _create_record_rxp(marcxml, verbose, correct,
keep_singletons=keep_singletons)
elif parser == 'lxml':
rec = _create_record_lxml(marcxml, verbose, correct,
keep_singletons=keep_singletons)
elif parser == '4suite':
rec = _create_record_4suite(marcxml,
keep_singletons=keep_singletons)
elif parser == 'minidom':
rec = _create_record_minidom(marcxml,
keep_singletons=keep_singletons)
except InvenioBibRecordParserError, ex1:
return (None, 0, str(ex1))
# _create_record = {
# 'pyrxp': _create_record_rxp,
# 'lxml': _create_record_lxml,
# '4suite': _create_record_4suite,
# 'minidom': _create_record_minidom,
# }
# try:
# rec = _create_record[parser](marcxml, verbose)
# except InvenioBibRecordParserError, ex1:
# return (None, 0, str(ex1))
if sort_fields_by_indicators:
_record_sort_by_indicators(rec)
errs = []
if correct:
# Correct the structure of the record.
errs = _correct_record(rec)
return (rec, int(not errs), errs)
def filter_field_instances(field_instances, filter_subcode, filter_value, filter_mode = 'e'):
""" Filters given field and returns only that field instances
that contain filter_subcode with given filter_value.
As an input for search function accepts output from
record_get_field_instances function.
Function can be run in three modes:
'e' - looking for exact match in subfield value
's' - looking for substring in subfield value
'r' - looking for regular expression in subfield value
Example:
record_filter_field(record_get_field_instances(rec, '999', '%', '%'), 'y', '2001')
In this case filter_subcode is 'y' and
filter_value is '2001'.
@param field_instances: output from record_get_field_instances
@param filter_subcode: name of the subfield
@type filter_subcode: string
@param filter_value: value of the subfield
@type filter_value: string
@param filter_mode: 'e','s' or 'r'
"""
matched = []
if filter_mode == 'e':
to_match = (filter_subcode, filter_value)
for instance in field_instances:
if to_match in instance[0]:
matched.append(instance)
elif filter_mode == 's':
for instance in field_instances:
for subfield in instance[0]:
if subfield[0] == filter_subcode and \
subfield[1].find(filter_value) > -1:
matched.append(instance)
break
elif filter_mode == 'r':
reg_exp = re.compile(filter_value)
for instance in field_instances:
for subfield in instance[0]:
if subfield[0] == filter_subcode and \
reg_exp.match(subfield[1]) is not None:
matched.append(instance)
break
return matched
def records_identical(rec1, rec2, skip_005=True, ignore_field_order=False, ignore_subfield_order=False, ignore_duplicate_subfields=False, ignore_duplicate_controlfields=False):
"""
Return True if rec1 is identical to rec2, regardless of a difference
in the 005 tag (i.e. the timestamp).
"""
rec1_keys = set(rec1.keys())
rec2_keys = set(rec2.keys())
if skip_005:
rec1_keys.discard("005")
rec2_keys.discard("005")
if rec1_keys != rec2_keys:
return False
for key in rec1_keys:
if ignore_duplicate_controlfields and key.startswith('00'):
if set(field[3] for field in rec1[key]) != set(field[3] for field in rec2[key]):
return False
continue
rec1_fields = rec1[key]
rec2_fields = rec2[key]
if len(rec1_fields) != len(rec2_fields):
## They already differs in length...
return False
if ignore_field_order:
## We sort the fields, first by indicators and then by anything else
rec1_fields = sorted(rec1_fields, key=lambda elem: (elem[1], elem[2], elem[3], elem[0]))
rec2_fields = sorted(rec2_fields, key=lambda elem: (elem[1], elem[2], elem[3], elem[0]))
else:
## We sort the fields, first by indicators, then by global position and then by anything else
rec1_fields = sorted(rec1_fields, key=lambda elem: (elem[1], elem[2], elem[4], elem[3], elem[0]))
rec2_fields = sorted(rec2_fields, key=lambda elem: (elem[1], elem[2], elem[4], elem[3], elem[0]))
for field1, field2 in zip(rec1_fields, rec2_fields):
if ignore_duplicate_subfields:
if field1[1:4] != field2[1:4] or set(field1[0]) != set(field2[0]):
return False
elif ignore_subfield_order:
if field1[1:4] != field2[1:4] or sorted(field1[0]) != sorted(field2[0]):
return False
elif field1[:4] != field2[:4]:
return False
return True
def record_get_field_instances(rec, tag="", ind1=" ", ind2=" "):
"""Returns the list of field instances for the specified tag and
indicators of the record (rec).
Returns empty list if not found.
If tag is empty string, returns all fields
Parameters (tag, ind1, ind2) can contain wildcard %.
@param rec: a record structure as returned by create_record()
@param tag: a 3 characters long string
@param ind1: a 1 character long string
@param ind2: a 1 character long string
@param code: a 1 character long string
@return: a list of field tuples (Subfields, ind1, ind2, value,
field_position_global) where subfields is list of (code, value)"""
if not rec:
return []
if not tag:
return rec.items()
else:
out = []
ind1, ind2 = _wash_indicators(ind1, ind2)
if '%' in tag:
# Wildcard in tag. Check all possible
for field_tag in rec:
if _tag_matches_pattern(field_tag, tag):
for possible_field_instance in rec[field_tag]:
if (ind1 in ('%', possible_field_instance[1]) and
ind2 in ('%', possible_field_instance[2])):
out.append(possible_field_instance)
else:
# Completely defined tag. Use dict
for possible_field_instance in rec.get(tag, []):
if (ind1 in ('%', possible_field_instance[1]) and
ind2 in ('%', possible_field_instance[2])):
out.append(possible_field_instance)
return out
def record_add_field(rec, tag, ind1=' ', ind2=' ', controlfield_value='',
subfields=None, field_position_global=None, field_position_local=None):
"""
Adds a new field into the record.
If field_position_global or field_position_local is specified then
this method will insert the new field at the desired position.
Otherwise a global field position will be computed in order to
insert the field at the best position (first we try to keep the
order of the tags and then we insert the field at the end of the
fields with the same tag).
If both field_position_global and field_position_local are present,
then field_position_local takes precedence.
@param rec: the record data structure
@param tag: the tag of the field to be added
@param ind1: the first indicator
@param ind2: the second indicator
@param controlfield_value: the value of the controlfield
@param subfields: the subfields (a list of tuples (code, value))
@param field_position_global: the global field position (record wise)
@param field_position_local: the local field position (tag wise)
@return: the global field position of the newly inserted field or -1 if the
operation failed
"""
error = validate_record_field_positions_global(rec)
if error:
# FIXME one should write a message here
pass
# Clean the parameters.
if subfields is None:
subfields = []
ind1, ind2 = _wash_indicators(ind1, ind2)
if controlfield_value and (ind1 != ' ' or ind2 != ' ' or subfields):
return -1
# Detect field number to be used for insertion:
# Dictionaries for uniqueness.
tag_field_positions_global = {}.fromkeys([field[4]
for field in rec.get(tag, [])])
all_field_positions_global = {}.fromkeys([field[4]
for fields in rec.values()
for field in fields])
if field_position_global is None and field_position_local is None:
# Let's determine the global field position of the new field.
if tag in rec:
try:
field_position_global = max([field[4] for field in rec[tag]]) \
+ 1
except IndexError:
if tag_field_positions_global:
field_position_global = max(tag_field_positions_global) + 1
elif all_field_positions_global:
field_position_global = max(all_field_positions_global) + 1
else:
field_position_global = 1
else:
if tag in ('FMT', 'FFT', 'BDR', 'BDM'):
# Add the new tag to the end of the record.
if tag_field_positions_global:
field_position_global = max(tag_field_positions_global) + 1
elif all_field_positions_global:
field_position_global = max(all_field_positions_global) + 1
else:
field_position_global = 1
else:
# Insert the tag in an ordered way by selecting the
# right global field position.
immediate_lower_tag = '000'
for rec_tag in rec:
if (tag not in ('FMT', 'FFT', 'BDR', 'BDM') and
immediate_lower_tag < rec_tag < tag):
immediate_lower_tag = rec_tag
if immediate_lower_tag == '000':
field_position_global = 1
else:
field_position_global = rec[immediate_lower_tag][-1][4] + 1
field_position_local = len(rec.get(tag, []))
_shift_field_positions_global(rec, field_position_global, 1)
elif field_position_local is not None:
if tag in rec:
if field_position_local >= len(rec[tag]):
field_position_global = rec[tag][-1][4] + 1
else:
field_position_global = rec[tag][field_position_local][4]
_shift_field_positions_global(rec, field_position_global, 1)
else:
if all_field_positions_global:
field_position_global = max(all_field_positions_global) + 1
else:
# Empty record.
field_position_global = 1
elif field_position_global is not None:
# If the user chose an existing global field position, shift all the
# global field positions greater than the input global field position.
if tag not in rec:
if all_field_positions_global:
field_position_global = max(all_field_positions_global) + 1
else:
field_position_global = 1
field_position_local = 0
elif field_position_global < min(tag_field_positions_global):
field_position_global = min(tag_field_positions_global)
_shift_field_positions_global(rec, min(tag_field_positions_global),
1)
field_position_local = 0
elif field_position_global > max(tag_field_positions_global):
field_position_global = max(tag_field_positions_global) + 1
_shift_field_positions_global(rec,
max(tag_field_positions_global) + 1, 1)
field_position_local = len(rec.get(tag, []))
else:
if field_position_global in tag_field_positions_global:
_shift_field_positions_global(rec, field_position_global, 1)
field_position_local = 0
for position, field in enumerate(rec[tag]):
if field[4] == field_position_global + 1:
field_position_local = position
# Create the new field.
newfield = (subfields, ind1, ind2, str(controlfield_value),
field_position_global)
rec.setdefault(tag, []).insert(field_position_local, newfield)
# Return new field number:
return field_position_global
def record_has_field(rec, tag):
"""
Checks if the tag exists in the record.
@param rec: the record data structure
@param the: field
@return: a boolean
"""
return tag in rec
def record_delete_field(rec, tag, ind1=' ', ind2=' ',
field_position_global=None, field_position_local=None):
"""
If global field position is specified, deletes the field with the
corresponding global field position.
If field_position_local is specified, deletes the field with the
corresponding local field position and tag.
Else deletes all the fields matching tag and optionally ind1 and
ind2.
If both field_position_global and field_position_local are present,
then field_position_local takes precedence.
@param rec: the record data structure
@param tag: the tag of the field to be deleted
@param ind1: the first indicator of the field to be deleted
@param ind2: the second indicator of the field to be deleted
@param field_position_global: the global field position (record wise)
@param field_position_local: the local field position (tag wise)
@return: the list of deleted fields
"""
error = validate_record_field_positions_global(rec)
if error:
# FIXME one should write a message here.
pass
if tag not in rec:
return False
ind1, ind2 = _wash_indicators(ind1, ind2)
deleted = []
newfields = []
if field_position_global is None and field_position_local is None:
# Remove all fields with tag 'tag'.
for field in rec[tag]:
if field[1] != ind1 or field[2] != ind2:
newfields.append(field)
else:
deleted.append(field)
rec[tag] = newfields
elif field_position_global is not None:
# Remove the field with 'field_position_global'.
for field in rec[tag]:
if (field[1] != ind1 and field[2] != ind2 or
field[4] != field_position_global):
newfields.append(field)
else:
deleted.append(field)
rec[tag] = newfields
elif field_position_local is not None:
# Remove the field with 'field_position_local'.
try:
del rec[tag][field_position_local]
except IndexError:
return []
if not rec[tag]:
# Tag is now empty, remove it.
del rec[tag]
return deleted
def record_delete_fields(rec, tag, field_positions_local=None):
"""
Delete all/some fields defined with MARC tag 'tag' from record 'rec'.
@param rec: a record structure.
@type rec: tuple
@param tag: three letter field.
@type tag: string
@param field_position_local: if set, it is the list of local positions
within all the fields with the specified tag, that should be deleted.
If not set all the fields with the specified tag will be deleted.
@type field_position_local: sequence
@return: the list of deleted fields.
@rtype: list
@note: the record is modified in place.
"""
if tag not in rec:
return []
new_fields, deleted_fields = [], []
for position, field in enumerate(rec.get(tag, [])):
if field_positions_local is None or position in field_positions_local:
deleted_fields.append(field)
else:
new_fields.append(field)
if new_fields:
rec[tag] = new_fields
else:
del rec[tag]
return deleted_fields
def record_add_fields(rec, tag, fields, field_position_local=None,
field_position_global=None):
"""
Adds the fields into the record at the required position. The
position is specified by the tag and the field_position_local in
the list of fields.
@param rec: a record structure
@param tag: the tag of the fields
to be moved
@param field_position_local: the field_position_local to which the
field will be inserted. If not specified, appends the fields to
the tag.
@param a: list of fields to be added
@return: -1 if the operation failed, or the field_position_local
if it was successful
"""
if field_position_local is None and field_position_global is None:
for field in fields:
record_add_field(rec, tag, ind1=field[1],
ind2=field[2], subfields=field[0],
controlfield_value=field[3])
else:
fields.reverse()
for field in fields:
record_add_field(rec, tag, ind1=field[1], ind2=field[2],
subfields=field[0], controlfield_value=field[3],
field_position_local=field_position_local,
field_position_global=field_position_global)
return field_position_local
def record_move_fields(rec, tag, field_positions_local,
field_position_local=None):
"""
Moves some fields to the position specified by
'field_position_local'.
@param rec: a record structure as returned by create_record()
@param tag: the tag of the fields to be moved
@param field_positions_local: the positions of the
fields to move
@param field_position_local: insert the field before that
field_position_local. If unspecified, appends the fields
@return: the field_position_local is the operation was successful
"""
fields = record_delete_fields(rec, tag,
field_positions_local=field_positions_local)
return record_add_fields(rec, tag, fields,
field_position_local=field_position_local)
def record_delete_subfield(rec, tag, subfield_code, ind1=' ', ind2=' '):
"""Deletes all subfields with subfield_code in the record."""
ind1, ind2 = _wash_indicators(ind1, ind2)
for field in rec.get(tag, []):
if field[1] == ind1 and field[2] == ind2:
field[0][:] = [subfield for subfield in field[0]
if subfield_code != subfield[0]]
def record_get_field(rec, tag, field_position_global=None,
field_position_local=None):
"""
Returns the the matching field. One has to enter either a global
field position or a local field position.
@return: a list of subfield tuples (subfield code, value).
@rtype: list
"""
if field_position_global is None and field_position_local is None:
raise InvenioBibRecordFieldError("A field position is required to "
"complete this operation.")
elif field_position_global is not None and field_position_local is not None:
raise InvenioBibRecordFieldError("Only one field position is required "
"to complete this operation.")
elif field_position_global:
if not tag in rec:
raise InvenioBibRecordFieldError("No tag '%s' in record." % tag)
for field in rec[tag]:
if field[4] == field_position_global:
return field
raise InvenioBibRecordFieldError("No field has the tag '%s' and the "
"global field position '%d'." % (tag, field_position_global))
else:
try:
return rec[tag][field_position_local]
except KeyError:
raise InvenioBibRecordFieldError("No tag '%s' in record." % tag)
except IndexError:
raise InvenioBibRecordFieldError("No field has the tag '%s' and "
"the local field position '%d'." % (tag, field_position_local))
def record_replace_field(rec, tag, new_field, field_position_global=None,
field_position_local=None):
"""Replaces a field with a new field."""
if field_position_global is None and field_position_local is None:
raise InvenioBibRecordFieldError("A field position is required to "
"complete this operation.")
elif field_position_global is not None and field_position_local is not None:
raise InvenioBibRecordFieldError("Only one field position is required "
"to complete this operation.")
elif field_position_global:
if not tag in rec:
raise InvenioBibRecordFieldError("No tag '%s' in record." % tag)
replaced = False
for position, field in enumerate(rec[tag]):
if field[4] == field_position_global:
rec[tag][position] = new_field
replaced = True
if not replaced:
raise InvenioBibRecordFieldError("No field has the tag '%s' and "
"the global field position '%d'." %
(tag, field_position_global))
else:
try:
rec[tag][field_position_local] = new_field
except KeyError:
raise InvenioBibRecordFieldError("No tag '%s' in record." % tag)
except IndexError:
raise InvenioBibRecordFieldError("No field has the tag '%s' and "
"the local field position '%d'." % (tag, field_position_local))
def record_get_subfields(rec, tag, field_position_global=None,
field_position_local=None):
"""
Returns the subfield of the matching field. One has to enter either a
global field position or a local field position.
@return: a list of subfield tuples (subfield code, value).
@rtype: list
"""
field = record_get_field(rec, tag,
field_position_global=field_position_global,
field_position_local=field_position_local)
return field[0]
def record_delete_subfield_from(rec, tag, subfield_position,
field_position_global=None, field_position_local=None):
"""Delete subfield from position specified by tag, field number and
subfield position."""
subfields = record_get_subfields(rec, tag,
field_position_global=field_position_global,
field_position_local=field_position_local)
try:
del subfields[subfield_position]
except IndexError:
from invenio.xmlmarc2textmarc import create_marc_record
recordMarc = create_marc_record(rec, 0, {"text-marc": 1, "aleph-marc": 0})
raise InvenioBibRecordFieldError("The record : %(recordCode)s does not contain the subfield "
"'%(subfieldIndex)s' inside the field (local: '%(fieldIndexLocal)s, global: '%(fieldIndexGlobal)s' ) of tag '%(tag)s'." % \
{"subfieldIndex" : subfield_position, \
"fieldIndexLocal" : str(field_position_local), \
"fieldIndexGlobal" : str(field_position_global), \
"tag" : tag, \
"recordCode" : recordMarc})
if not subfields:
if field_position_global is not None:
for position, field in enumerate(rec[tag]):
if field[4] == field_position_global:
del rec[tag][position]
else:
del rec[tag][field_position_local]
if not rec[tag]:
del rec[tag]
def record_add_subfield_into(rec, tag, subfield_code, value,
subfield_position=None, field_position_global=None,
field_position_local=None):
"""Add subfield into position specified by tag, field number and
optionally by subfield position."""
subfields = record_get_subfields(rec, tag,
field_position_global=field_position_global,
field_position_local=field_position_local)
if subfield_position is None:
subfields.append((subfield_code, value))
else:
subfields.insert(subfield_position, (subfield_code, value))
def record_modify_controlfield(rec, tag, controlfield_value,
field_position_global=None, field_position_local=None):
"""Modify controlfield at position specified by tag and field number."""
field = record_get_field(rec, tag,
field_position_global=field_position_global,
field_position_local=field_position_local)
new_field = (field[0], field[1], field[2], controlfield_value, field[4])
record_replace_field(rec, tag, new_field,
field_position_global=field_position_global,
field_position_local=field_position_local)
def record_modify_subfield(rec, tag, subfield_code, value, subfield_position,
field_position_global=None, field_position_local=None):
"""Modify subfield at position specified by tag, field number and
subfield position."""
subfields = record_get_subfields(rec, tag,
field_position_global=field_position_global,
field_position_local=field_position_local)
try:
subfields[subfield_position] = (subfield_code, value)
except IndexError:
raise InvenioBibRecordFieldError("There is no subfield with position "
"'%d'." % subfield_position)
def record_move_subfield(rec, tag, subfield_position, new_subfield_position,
field_position_global=None, field_position_local=None):
"""Move subfield at position specified by tag, field number and
subfield position to new subfield position."""
subfields = record_get_subfields(rec, tag,
field_position_global=field_position_global,
field_position_local=field_position_local)
try:
subfield = subfields.pop(subfield_position)
subfields.insert(new_subfield_position, subfield)
except IndexError:
raise InvenioBibRecordFieldError("There is no subfield with position "
"'%d'." % subfield_position)
def record_get_field_value(rec, tag, ind1=" ", ind2=" ", code=""):
"""Returns first (string) value that matches specified field
(tag, ind1, ind2, code) of the record (rec).
Returns empty string if not found.
Parameters (tag, ind1, ind2, code) can contain wildcard %.
Difference between wildcard % and empty '':
- Empty char specifies that we are not interested in a field which
has one of the indicator(s)/subfield specified.
- Wildcard specifies that we are interested in getting the value
of the field whatever the indicator(s)/subfield is.
For e.g. consider the following record in MARC:
100C5 $$a val1
555AB $$a val2
555AB val3
555 $$a val4
555A val5
>> record_get_field_value(record, '555', 'A', '', '')
>> "val5"
>> record_get_field_value(record, '555', 'A', '%', '')
>> "val3"
>> record_get_field_value(record, '555', 'A', '%', '%')
>> "val2"
>> record_get_field_value(record, '555', 'A', 'B', '')
>> "val3"
>> record_get_field_value(record, '555', '', 'B', 'a')
>> ""
>> record_get_field_value(record, '555', '', '', 'a')
>> "val4"
>> record_get_field_value(record, '555', '', '', '')
>> ""
>> record_get_field_value(record, '%%%', '%', '%', '%')
>> "val1"
@param rec: a record structure as returned by create_record()
@param tag: a 3 characters long string
@param ind1: a 1 character long string
@param ind2: a 1 character long string
@param code: a 1 character long string
@return: string value (empty if nothing found)"""
# Note: the code is quite redundant for speed reasons (avoid calling
# functions or doing tests inside loops)
ind1, ind2 = _wash_indicators(ind1, ind2)
if '%' in tag:
# Wild card in tag. Must find all corresponding fields
if code == '':
# Code not specified.
for field_tag, fields in rec.items():
if _tag_matches_pattern(field_tag, tag):
for field in fields:
if ind1 in ('%', field[1]) and ind2 in ('%', field[2]):
# Return matching field value if not empty
if field[3]:
return field[3]
elif code == '%':
# Code is wildcard. Take first subfield of first matching field
for field_tag, fields in rec.items():
if _tag_matches_pattern(field_tag, tag):
for field in fields:
if (ind1 in ('%', field[1]) and ind2 in ('%', field[2])
and field[0]):
return field[0][0][1]
else:
# Code is specified. Take corresponding one
for field_tag, fields in rec.items():
if _tag_matches_pattern(field_tag, tag):
for field in fields:
if ind1 in ('%', field[1]) and ind2 in ('%', field[2]):
for subfield in field[0]:
if subfield[0] == code:
return subfield[1]
else:
# Tag is completely specified. Use tag as dict key
if tag in rec:
if code == '':
# Code not specified.
for field in rec[tag]:
if ind1 in ('%', field[1]) and ind2 in ('%', field[2]):
# Return matching field value if not empty
# or return "" empty if not exist.
if field[3]:
return field[3]
elif code == '%':
# Code is wildcard. Take first subfield of first matching field
for field in rec[tag]:
if (ind1 in ('%', field[1]) and ind2 in ('%', field[2]) and
field[0]):
return field[0][0][1]
else:
# Code is specified. Take corresponding one
for field in rec[tag]:
if ind1 in ('%', field[1]) and ind2 in ('%', field[2]):
for subfield in field[0]:
if subfield[0] == code:
return subfield[1]
# Nothing was found
return ""
def record_get_field_values(rec, tag, ind1=" ", ind2=" ", code="",
filter_subfield_code="",
filter_subfield_value="",
filter_subfield_mode="e"):
"""Returns the list of (string) values for the specified field
(tag, ind1, ind2, code) of the record (rec).
List can be filtered. Use filter_subfield_code
and filter_subfield_value to search
only in fields that have these values inside them as a subfield.
filter_subfield_mode can have 3 different values:
'e' for exact search
's' for substring search
'r' for regexp search
Returns empty list if nothing was found.
Parameters (tag, ind1, ind2, code) can contain wildcard %.
@param rec: a record structure as returned by create_record()
@param tag: a 3 characters long string
@param ind1: a 1 character long string
@param ind2: a 1 character long string
@param code: a 1 character long string
@return: a list of strings"""
tmp = []
ind1, ind2 = _wash_indicators(ind1, ind2)
if filter_subfield_code and filter_subfield_mode == "r":
reg_exp = re.compile(filter_subfield_value)
tags = []
if '%' in tag:
# Wild card in tag. Must find all corresponding tags and fields
tags = [k for k in rec if _tag_matches_pattern(k, tag)]
elif rec and tag in rec:
tags = [tag]
if code == '':
# Code not specified. Consider field value (without subfields)
for tag in tags:
for field in rec[tag]:
if (ind1 in ('%', field[1]) and ind2 in ('%', field[2]) and
field[3]):
tmp.append(field[3])
elif code == '%':
# Code is wildcard. Consider all subfields
for tag in tags:
for field in rec[tag]:
if ind1 in ('%', field[1]) and ind2 in ('%', field[2]):
if filter_subfield_code:
if filter_subfield_mode == "e":
subfield_to_match = (filter_subfield_code, filter_subfield_value)
if subfield_to_match in field[0]:
for subfield in field[0]:
tmp.append(subfield[1])
elif filter_subfield_mode == "s":
if (dict(field[0]).get(filter_subfield_code, '')).find(filter_subfield_value) > -1:
for subfield in field[0]:
tmp.append(subfield[1])
elif filter_subfield_mode == "r":
if reg_exp.match(dict(field[0]).get(filter_subfield_code, '')):
for subfield in field[0]:
tmp.append(subfield[1])
else:
for subfield in field[0]:
tmp.append(subfield[1])
else:
# Code is specified. Consider all corresponding subfields
for tag in tags:
for field in rec[tag]:
if ind1 in ('%', field[1]) and ind2 in ('%', field[2]):
if filter_subfield_code:
if filter_subfield_mode == "e":
subfield_to_match = (filter_subfield_code, filter_subfield_value)
if subfield_to_match in field[0]:
for subfield in field[0]:
if subfield[0] == code:
tmp.append(subfield[1])
elif filter_subfield_mode == "s":
if (dict(field[0]).get(filter_subfield_code, '')).find(filter_subfield_value) > -1:
for subfield in field[0]:
if subfield[0] == code:
tmp.append(subfield[1])
elif filter_subfield_mode == "r":
if reg_exp.match(dict(field[0]).get(filter_subfield_code, '')):
for subfield in field[0]:
if subfield[0] == code:
tmp.append(subfield[1])
else:
for subfield in field[0]:
if subfield[0] == code:
tmp.append(subfield[1])
# If tmp was not set, nothing was found
return tmp
def record_xml_output(rec, tags=None, order_fn=None):
"""Generates the XML for record 'rec' and returns it as a string
@rec: record
@tags: list of tags to be printed"""
if tags is None:
tags = []
if isinstance(tags, str):
tags = [tags]
if tags and '001' not in tags:
# Add the missing controlfield.
tags.append('001')
marcxml = ['<record>']
# Add the tag 'tag' to each field in rec[tag]
fields = []
if rec is not None:
for tag in rec:
if not tags or tag in tags:
for field in rec[tag]:
fields.append((tag, field))
if order_fn is None:
record_order_fields(fields)
else:
record_order_fields(fields, order_fn)
for field in fields:
marcxml.append(field_xml_output(field[1], field[0]))
marcxml.append('</record>')
return '\n'.join(marcxml)
def field_get_subfield_instances(field):
"""Returns the list of subfields associated with field 'field'"""
return field[0]
def field_get_subfield_values(field_instance, code):
"""Return subfield CODE values of the field instance FIELD."""
return [subfield_value
for subfield_code, subfield_value in field_instance[0]
if subfield_code == code]
def field_get_subfield_codes(field_instance):
"""Return subfield codes of the field instance FIELD."""
return [subfield_code
for subfield_code, subfield_value in field_instance[0]]
def field_add_subfield(field, code, value):
"""Adds a subfield to field 'field'"""
field[0].append((code, value))
def record_order_fields(rec, fun="_order_by_ord"):
"""Orders field inside record 'rec' according to a function"""
rec.sort(eval(fun))
def field_xml_output(field, tag):
"""Generates the XML for field 'field' and returns it as a string."""
marcxml = []
if field[3]:
marcxml.append(' <controlfield tag="%s">%s</controlfield>' %
(tag, encode_for_xml(field[3])))
else:
marcxml.append(' <datafield tag="%s" ind1="%s" ind2="%s">' %
(tag, field[1], field[2]))
marcxml += [_subfield_xml_output(subfield) for subfield in field[0]]
marcxml.append(' </datafield>')
return '\n'.join(marcxml)
def record_extract_oai_id(record):
"""Returns the OAI ID of the record."""
tag = CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[0:3]
ind1 = CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3]
ind2 = CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4]
subfield = CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[5]
values = record_get_field_values(record, tag, ind1, ind2, subfield)
oai_id_regex = re.compile("oai[a-zA-Z0-9/.:]+")
for value in [value.strip() for value in values]:
if oai_id_regex.match(value):
return value
return ""
def record_extract_dois(record):
"""Returns the DOI(s) of the record."""
record_dois = []
tag = "024"
ind1 = "7"
ind2 = "_"
subfield_source_code = "2"
subfield_value_code = "a"
identifiers_fields = record_get_field_instances(record, tag, ind1, ind2)
for identifer_field in identifiers_fields:
if 'doi' in [val.lower() for val in field_get_subfield_values(identifer_field, subfield_source_code)]:
record_dois.extend(field_get_subfield_values(identifer_field, subfield_value_code))
return record_dois
def print_rec(rec, format=1, tags=None):
"""
prints a record
format = 1 -- XML
format = 2 -- HTML (not implemented)
@param tags: list of tags to be printed
"""
if tags is None:
tags = []
if format == 1:
text = record_xml_output(rec, tags)
else:
return ''
return text
def print_recs(listofrec, format=1, tags=None):
"""
prints a list of records
@param format: 1 XML, 2 HTML (not implemented)
@param tags: list of tags to be printed
if 'listofrec' is not a list it returns empty string
"""
if tags is None:
tags = []
text = ""
if type(listofrec).__name__ !='list':
return ""
else:
for rec in listofrec:
text = "%s\n%s" % (text, print_rec(rec, format, tags))
return text
def concat(alist):
"""Concats a list of lists"""
newl = []
for l in alist:
newl.extend(l)
return newl
def record_find_field(rec, tag, field, strict=False):
"""
Returns the global and local positions of the first occurrence
of the field in a record.
@param rec: A record dictionary structure
@type rec: dictionary
@param tag: The tag of the field to search for
@type tag: string
@param field: A field tuple as returned by create_field()
@type field: tuple
@param strict: A boolean describing the search method. If strict
is False, then the order of the subfields doesn't
matter. Default search method is strict.
@type strict: boolean
@return: A tuple of (global_position, local_position) or a
tuple (None, None) if the field is not present.
@rtype: tuple
@raise InvenioBibRecordFieldError: If the provided field is invalid.
"""
try:
_check_field_validity(field)
except InvenioBibRecordFieldError:
raise
for local_position, field1 in enumerate(rec.get(tag, [])):
if _compare_fields(field, field1, strict):
return (field1[4], local_position)
return (None, None)
def record_strip_empty_volatile_subfields(rec):
"""
Removes unchanged volatile subfields from the record
"""
for tag in rec.keys():
for field in rec[tag]:
field[0][:] = [subfield for subfield in field[0] if subfield[1][:9] != "VOLATILE:"]
def record_strip_empty_fields(rec, tag=None):
"""
Removes empty subfields and fields from the record. If 'tag' is not None, only
a specific tag of the record will be stripped, otherwise the whole record.
@param rec: A record dictionary structure
@type rec: dictionary
@param tag: The tag of the field to strip empty fields from
@type tag: string
"""
# Check whole record
if tag is None:
tags = rec.keys()
for tag in tags:
record_strip_empty_fields(rec, tag)
# Check specific tag of the record
elif tag in rec:
# in case of a controlfield
if tag[:2] == '00':
if len(rec[tag]) == 0 or not rec[tag][0][3]:
del rec[tag]
#in case of a normal field
else:
fields = []
for field in rec[tag]:
subfields = []
for subfield in field[0]:
# check if the subfield has been given a value
if subfield[1]:
subfield = (subfield[0], subfield[1].strip()) # Always strip values
subfields.append(subfield)
if len(subfields) > 0:
new_field = create_field(subfields, field[1], field[2],
field[3])
fields.append(new_field)
if len(fields) > 0:
rec[tag] = fields
else:
del rec[tag]
def record_strip_controlfields(rec):
"""
Removes all non-empty controlfields from the record
@param rec: A record dictionary structure
@type rec: dictionary
"""
for tag in rec.keys():
if tag[:2] == '00' and rec[tag][0][3]:
del rec[tag]
def record_order_subfields(rec, tag=None):
""" Orders subfields from a record alphabetically based on subfield code.
If 'tag' is not None, only a specific tag of the record will be reordered,
otherwise the whole record.
@param rec: bibrecord
@type rec: bibrec
@param tag: tag where the subfields will be ordered
@type tag: string
"""
if rec is None:
return rec
if tag is None:
tags = rec.keys()
for tag in tags:
record_order_subfields(rec, tag)
elif tag in rec:
for i in xrange(len(rec[tag])):
field = rec[tag][i]
# Order subfields alphabetically by subfield code
ordered_subfields = sorted(field[0], key=lambda subfield: subfield[0])
rec[tag][i] = (ordered_subfields, field[1], field[2], field[3], field[4])
def record_empty(rec):
for key in rec.iterkeys():
if key not in ('001', '005'):
return False
return True
### IMPLEMENTATION / INVISIBLE FUNCTIONS
def _compare_fields(field1, field2, strict=True):
"""
Compares 2 fields. If strict is True, then the order of the
subfield will be taken care of, if not then the order of the
subfields doesn't matter.
@return: True if the field are equivalent, False otherwise.
"""
if strict:
# Return a simple equal test on the field minus the position.
return field1[:4] == field2[:4]
else:
if field1[1:4] != field2[1:4]:
# Different indicators or controlfield value.
return False
else:
# Compare subfields in a loose way.
return set(field1[0]) == set(field2[0])
def _check_field_validity(field):
"""
Checks if a field is well-formed.
@param field: A field tuple as returned by create_field()
@type field: tuple
@raise InvenioBibRecordFieldError: If the field is invalid.
"""
if type(field) not in (list, tuple):
raise InvenioBibRecordFieldError("Field of type '%s' should be either "
"a list or a tuple." % type(field))
if len(field) != 5:
raise InvenioBibRecordFieldError("Field of length '%d' should have 5 "
"elements." % len(field))
if type(field[0]) not in (list, tuple):
raise InvenioBibRecordFieldError("Subfields of type '%s' should be "
"either a list or a tuple." % type(field[0]))
if type(field[1]) is not str:
raise InvenioBibRecordFieldError("Indicator 1 of type '%s' should be "
"a string." % type(field[1]))
if type(field[2]) is not str:
raise InvenioBibRecordFieldError("Indicator 2 of type '%s' should be "
"a string." % type(field[2]))
if type(field[3]) is not str:
raise InvenioBibRecordFieldError("Controlfield value of type '%s' "
"should be a string." % type(field[3]))
if type(field[4]) is not int:
raise InvenioBibRecordFieldError("Global position of type '%s' should "
"be an int." % type(field[4]))
for subfield in field[0]:
if (type(subfield) not in (list, tuple) or
len(subfield) != 2 or
type(subfield[0]) is not str or
type(subfield[1]) is not str):
raise InvenioBibRecordFieldError("Subfields are malformed. "
"Should a list of tuples of 2 strings.")
def _shift_field_positions_global(record, start, delta=1):
"""Shifts all global field positions with global field positions
higher or equal to 'start' from the value 'delta'."""
if not delta:
return
for tag, fields in record.items():
newfields = []
for field in fields:
if field[4] < start:
newfields.append(field)
else:
# Increment the global field position by delta.
newfields.append(tuple(list(field[:4]) + [field[4] + delta]))
record[tag] = newfields
def _tag_matches_pattern(tag, pattern):
"""Returns true if MARC 'tag' matches a 'pattern'.
'pattern' is plain text, with % as wildcard
Both parameters must be 3 characters long strings.
For e.g.
>> _tag_matches_pattern("909", "909") -> True
>> _tag_matches_pattern("909", "9%9") -> True
>> _tag_matches_pattern("909", "9%8") -> False
@param tag: a 3 characters long string
@param pattern: a 3 characters long string
@return: False or True"""
for char1, char2 in zip(tag, pattern):
if char2 not in ('%', char1):
return False
return True
def validate_record_field_positions_global(record):
"""
Checks if the global field positions in the record are valid ie no
duplicate global field positions and local field positions in the
list of fields are ascending.
@param record: the record data structure
@return: the first error found as a string or None if no error was found
"""
all_fields = []
for tag, fields in record.items():
previous_field_position_global = -1
for field in fields:
if field[4] < previous_field_position_global:
return "Non ascending global field positions in tag '%s'." % tag
previous_field_position_global = field[4]
if field[4] in all_fields:
return ("Duplicate global field position '%d' in tag '%s'" %
(field[4], tag))
def _record_sort_by_indicators(record):
"""Sorts the fields inside the record by indicators."""
for tag, fields in record.items():
record[tag] = _fields_sort_by_indicators(fields)
def _fields_sort_by_indicators(fields):
"""Sorts a set of fields by their indicators. Returns a sorted list
with correct global field positions."""
field_dict = {}
field_positions_global = []
for field in fields:
field_dict.setdefault(field[1:3], []).append(field)
field_positions_global.append(field[4])
indicators = field_dict.keys()
indicators.sort()
field_list = []
for indicator in indicators:
for field in field_dict[indicator]:
field_list.append(field[:4] + (field_positions_global.pop(0),))
return field_list
def _select_parser(parser=None):
"""Selects the more relevant parser based on the parsers available
and on the parser desired by the user."""
if not AVAILABLE_PARSERS:
# No parser is available. This is bad.
return None
if parser is None or parser not in AVAILABLE_PARSERS:
# Return the best available parser.
return AVAILABLE_PARSERS[0]
else:
return parser
def _create_record_lxml(marcxml,
verbose=CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL,
correct=CFG_BIBRECORD_DEFAULT_CORRECT,
keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
"""Creates a record object using the LXML parser.
If correct == 1, then perform DTD validation
If correct == 0, then do not perform DTD validation
If verbose == 0, the parser will not give warnings.
If 1 <= verbose <= 3, the parser will not give errors, but will warn
the user about possible mistakes (implement me!)
If verbose > 3 then the parser will be strict and will stop in case of
well-formedness errors or DTD errors."""
parser = etree.XMLParser(dtd_validation = correct,
recover = verbose <= 3)
if correct:
marcxml = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<!DOCTYPE collection SYSTEM "file://%s">\n'
'<collection>\n%s\n</collection>' \
% (CFG_MARC21_DTD, marcxml))
try:
tree = etree.parse(StringIO(marcxml), parser)
# parser errors are located in parser.error_log
# if 1 <= verbose <=3 then show them to the user?
# if verbose == 0 then continue
# if verbose >3 then an exception will be thrown
except Exception, e:
raise InvenioBibRecordParserError(str(e))
record = {}
field_position_global = 0
controlfield_iterator = tree.iter(tag='controlfield')
for controlfield in controlfield_iterator:
tag = controlfield.attrib.get('tag', '!').encode("UTF-8")
ind1 = ' '
ind2 = ' '
text = controlfield.text
if text is None:
text = ''
else:
text = text.encode("UTF-8")
subfields = []
if text or keep_singletons:
field_position_global += 1
record.setdefault(tag, []).append((subfields, ind1, ind2, text, field_position_global))
datafield_iterator = tree.iter(tag='datafield')
for datafield in datafield_iterator:
tag = datafield.attrib.get('tag', '!').encode("UTF-8")
ind1 = datafield.attrib.get('ind1', '!').encode("UTF-8")
ind2 = datafield.attrib.get('ind2', '!').encode("UTF-8")
#ind1, ind2 = _wash_indicators(ind1, ind2)
if ind1 in ('', '_'): ind1 = ' '
if ind2 in ('', '_'): ind2 = ' '
subfields = []
subfield_iterator = datafield.iter(tag='subfield')
for subfield in subfield_iterator:
code = subfield.attrib.get('code', '!').encode("UTF-8")
text = subfield.text
if text is None:
text = ''
else:
text = text.encode("UTF-8")
if text or keep_singletons:
subfields.append((code, text))
if subfields or keep_singletons:
text = ''
field_position_global += 1
record.setdefault(tag, []).append((subfields, ind1, ind2, text, field_position_global))
return record
def _create_record_rxp(marcxml, verbose=CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL,
correct=CFG_BIBRECORD_DEFAULT_CORRECT,
keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
"""Creates a record object using the RXP parser.
If verbose>3 then the parser will be strict and will stop in case of
well-formedness errors or DTD errors.
If verbose=0, the parser will not give warnings.
If 0 < verbose <= 3, the parser will not give errors, but will warn
the user about possible mistakes
correct != 0 -> We will try to correct errors such as missing
attributes
correct = 0 -> there will not be any attempt to correct errors"""
if correct:
# Note that with pyRXP < 1.13 a memory leak has been found
# involving DTD parsing. So enable correction only if you have
# pyRXP 1.13 or greater.
marcxml = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<!DOCTYPE collection SYSTEM "file://%s">\n'
'<collection>\n%s\n</collection>' % (CFG_MARC21_DTD, marcxml))
# Create the pyRXP parser.
pyrxp_parser = pyRXP.Parser(ErrorOnValidityErrors=0, ProcessDTD=1,
ErrorOnUnquotedAttributeValues=0, srcName='string input')
if verbose > 3:
pyrxp_parser.ErrorOnValidityErrors = 1
pyrxp_parser.ErrorOnUnquotedAttributeValues = 1
try:
root = pyrxp_parser.parse(marcxml)
except pyRXP.error, ex1:
raise InvenioBibRecordParserError(str(ex1))
# If record is enclosed in a collection tag, extract it.
if root[TAG] == 'collection':
children = _get_children_by_tag_name_rxp(root, 'record')
if not children:
return {}
root = children[0]
record = {}
# This is needed because of the record_xml_output function, where we
# need to know the order of the fields.
field_position_global = 1
# Consider the control fields.
for controlfield in _get_children_by_tag_name_rxp(root, 'controlfield'):
if controlfield[CHILDREN]:
value = ''.join([n for n in controlfield[CHILDREN]])
# Construct the field tuple.
field = ([], ' ', ' ', value, field_position_global)
record.setdefault(controlfield[ATTRS]['tag'], []).append(field)
field_position_global += 1
elif keep_singletons:
field = ([], ' ', ' ', '', field_position_global)
record.setdefault(controlfield[ATTRS]['tag'], []).append(field)
field_position_global += 1
# Consider the data fields.
for datafield in _get_children_by_tag_name_rxp(root, 'datafield'):
subfields = []
for subfield in _get_children_by_tag_name_rxp(datafield, 'subfield'):
if subfield[CHILDREN]:
value = _get_children_as_string_rxp(subfield[CHILDREN])
subfields.append((subfield[ATTRS].get('code', '!'), value))
elif keep_singletons:
subfields.append((subfield[ATTRS].get('code', '!'), ''))
if subfields or keep_singletons:
# Create the field.
tag = datafield[ATTRS].get('tag', '!')
ind1 = datafield[ATTRS].get('ind1', '!')
ind2 = datafield[ATTRS].get('ind2', '!')
ind1, ind2 = _wash_indicators(ind1, ind2)
# Construct the field tuple.
field = (subfields, ind1, ind2, '', field_position_global)
record.setdefault(tag, []).append(field)
field_position_global += 1
return record
def _create_record_from_document(document,
keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
"""Creates a record from the document (of type
xml.dom.minidom.Document or Ft.Xml.Domlette.Document)."""
root = None
for node in document.childNodes:
if node.nodeType == node.ELEMENT_NODE:
root = node
break
if root is None:
return {}
if root.tagName == 'collection':
children = _get_children_by_tag_name(root, 'record')
if not children:
return {}
root = children[0]
field_position_global = 1
record = {}
for controlfield in _get_children_by_tag_name(root, "controlfield"):
tag = controlfield.getAttributeNS(None, "tag").encode('utf-8')
text_nodes = controlfield.childNodes
value = ''.join([n.data for n in text_nodes]).encode("utf-8")
if value or keep_singletons:
field = ([], " ", " ", value, field_position_global)
record.setdefault(tag, []).append(field)
field_position_global += 1
for datafield in _get_children_by_tag_name(root, "datafield"):
subfields = []
for subfield in _get_children_by_tag_name(datafield, "subfield"):
value = _get_children_as_string(subfield.childNodes).encode("utf-8")
if value or keep_singletons:
code = subfield.getAttributeNS(None, 'code').encode("utf-8")
subfields.append((code or '!', value))
if subfields or keep_singletons:
tag = datafield.getAttributeNS(None, "tag").encode("utf-8") or '!'
ind1 = datafield.getAttributeNS(None, "ind1").encode("utf-8")
ind2 = datafield.getAttributeNS(None, "ind2").encode("utf-8")
ind1, ind2 = _wash_indicators(ind1, ind2)
field = (subfields, ind1, ind2, "", field_position_global)
record.setdefault(tag, []).append(field)
field_position_global += 1
return record
def _create_record_minidom(marcxml,
keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
"""Creates a record using minidom."""
try:
dom = xml.dom.minidom.parseString(marcxml)
except xml.parsers.expat.ExpatError, ex1:
raise InvenioBibRecordParserError(str(ex1))
return _create_record_from_document(dom, keep_singletons=keep_singletons)
def _create_record_4suite(marcxml,
keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
"""Creates a record using the 4suite parser."""
try:
dom = Ft.Xml.Domlette.NonvalidatingReader.parseString(marcxml,
"urn:dummy")
except Ft.Xml.ReaderException, ex1:
raise InvenioBibRecordParserError(ex1.message)
return _create_record_from_document(dom, keep_singletons=keep_singletons)
def _concat(alist):
"""Concats a list of lists"""
return [element for single_list in alist for element in single_list]
def _subfield_xml_output(subfield):
"""Generates the XML for a subfield object and return it as a string"""
return ' <subfield code="%s">%s</subfield>' % (subfield[0],
encode_for_xml(subfield[1]))
def _order_by_ord(field1, field2):
"""Function used to order the fields according to their ord value"""
return cmp(field1[1][4], field2[1][4])
def _order_by_tags(field1, field2):
"""Function used to order the fields according to the tags"""
return cmp(field1[0], field2[0])
def _get_children_by_tag_name(node, name):
"""Retrieves all children from node 'node' with name 'name' and
returns them as a list."""
try:
return [child for child in node.childNodes if child.nodeName == name]
except TypeError:
return []
def _get_children_by_tag_name_rxp(node, name):
"""Retrieves all children from 'children' with tag name 'tag' and
returns them as a list.
children is a list returned by the RXP parser"""
try:
return [child for child in node[CHILDREN] if child[TAG] == name]
except TypeError:
return []
def _get_children_as_string(node):
"""
Iterates through all the children of a node and returns one string
containing the values from all the text-nodes recursively.
"""
out = []
if node:
for child in node:
if child.nodeType == child.TEXT_NODE:
out.append(child.data)
else:
out.append(_get_children_as_string(child.childNodes))
return ''.join(out)
def _get_children_as_string_rxp(node):
"""
RXP version of _get_children_as_string():
Iterates through all the children of a node and returns one string
containing the values from all the text-nodes recursively.
"""
out = []
if node:
for child in node:
if type(child) is str:
out.append(child)
else:
out.append(_get_children_as_string_rxp(child[CHILDREN]))
return ''.join(out)
def _wash_indicators(*indicators):
"""
Washes the values of the indicators. An empty string or an
underscore is replaced by a blank space.
@param indicators: a series of indicators to be washed
@return: a list of washed indicators
"""
return [indicator in ('', '_') and ' ' or indicator
for indicator in indicators]
def _correct_record(record):
"""
Checks and corrects the structure of the record.
@param record: the record data structure
@return: a list of errors found
"""
errors = []
for tag in record.keys():
upper_bound = '999'
n = len(tag)
if n > 3:
i = n - 3
while i > 0:
upper_bound = '%s%s' % ('0', upper_bound)
i -= 1
# Missing tag. Replace it with dummy tag '000'.
if tag == '!':
errors.append((1, '(field number(s): ' +
str([f[4] for f in record[tag]]) + ')'))
record['000'] = record.pop(tag)
tag = '000'
elif not ('001' <= tag <= upper_bound or tag in ('FMT', 'FFT', 'BDR', 'BDM')):
errors.append(2)
record['000'] = record.pop(tag)
tag = '000'
fields = []
for field in record[tag]:
# Datafield without any subfield.
if field[0] == [] and field[3] == '':
errors.append((8, '(field number: ' + str(field[4]) + ')'))
subfields = []
for subfield in field[0]:
if subfield[0] == '!':
errors.append((3, '(field number: ' + str(field[4]) + ')'))
newsub = ('', subfield[1])
else:
newsub = subfield
subfields.append(newsub)
if field[1] == '!':
errors.append((4, '(field number: ' + str(field[4]) + ')'))
ind1 = " "
else:
ind1 = field[1]
if field[2] == '!':
errors.append((5, '(field number: ' + str(field[4]) + ')'))
ind2 = " "
else:
ind2 = field[2]
fields.append((subfields, ind1, ind2, field[3], field[4]))
record[tag] = fields
return errors
def _warning(code):
"""It returns a warning message of code 'code'.
If code = (cd, str) it returns the warning message of code 'cd'
and appends str at the end"""
if isinstance(code, str):
return code
message = ''
if isinstance(code, tuple):
if isinstance(code[0], str):
message = code[1]
code = code[0]
return CFG_BIBRECORD_WARNING_MSGS.get(code, '') + message
def _warnings(alist):
"""Applies the function _warning() to every element in l."""
return [_warning(element) for element in alist]
def _compare_lists(list1, list2, custom_cmp):
"""Compares twolists using given comparing function
@param list1: first list to compare
@param list2: second list to compare
@param custom_cmp: a function taking two arguments (element of
list 1, element of list 2) and
@return: True or False depending if the values are the same"""
if len(list1) != len(list2):
return False
for element1, element2 in zip(list1, list2):
if not custom_cmp(element1, element2):
return False
return True
| gpl-2.0 |
hack4sec/hbs-cli | tests/unit/test_ResultParseThread.py | 1 | 8944 | # -*- coding: utf-8 -*-
"""
This is part of HashBruteStation software
Docs EN: http://hack4sec.pro/wiki/index.php/Hash_Brute_Station_en
Docs RU: http://hack4sec.pro/wiki/index.php/Hash_Brute_Station
License: MIT
Copyright (c) Anton Kuzmin <http://anton-kuzmin.ru> (ru) <http://anton-kuzmin.pro> (en)
Class of unit tests for WorkerThread
"""
import sys
import time
import pytest
sys.path.append('../../')
from libs.common import file_put_contents
from classes.ResultParseThread import ResultParseThread
from classes.HbsException import HbsException
from CommonUnit import CommonUnit
class Test_ResultParseThread(CommonUnit):
""" Class of unit tests for WorkerThread """
thrd = None
def setup(self):
""" Setup tests """
self._clean_db()
self._add_work_task()
self.thrd = ResultParseThread()
self.thrd.current_work_task_id = 1
self.thrd.catch_exceptions = False
def teardown(self):
""" Teardown tests """
if isinstance(self.thrd, ResultParseThread):
self.thrd.available = False
time.sleep(1)
del self.thrd
self._clean_db()
def test_update_status(self):
""" Testing update_status() method """
self._add_work_task(id=2)
assert self.db.fetch_one("SELECT status FROM task_works WHERE id=1") == 'wait'
assert self.db.fetch_one("SELECT status FROM task_works WHERE id=2") == 'wait'
self.thrd.update_status('done')
assert self.db.fetch_one("SELECT status FROM task_works WHERE id=1") == 'done'
assert self.db.fetch_one("SELECT status FROM task_works WHERE id=2") == 'wait'
def test_get_work_task_data(self):
""" Testing get_work_task_data() method """
data1 = self.thrd.get_work_task_data()
test_data1 = {'id': 1, 'task_id': 1, 'hashlist_id': 1, 'status': 'wait'}
for field in test_data1:
assert data1[field] == test_data1[field]
self._add_work_task(id=2, hashlist_id=3, task_id=4, status='outparsing')
self.thrd.current_work_task_id = 1
data2 = self.thrd.get_work_task_data()
test_data2 = {'id': 2, 'task_id': 4, 'hashlist_id': 3, 'status': 'outparsing'}
for field in test_data2:
assert data2[field] == test_data1[field]
def test_update_work_task_field(self):
""" Testing update_work_task_field() method """
self.thrd.update_work_task_field('status', 'done')
self.thrd.update_work_task_field('hashlist_id', '2')
assert self.db.fetch_one("SELECT status FROM task_works WHERE id=1") == 'done'
assert self.db.fetch_one("SELECT hashlist_id FROM task_works WHERE id=1") == 2
def test_update_all_hashlists_counts(self):
""" Test of update_all_hashlists_counts() """
self._add_hashlist()
self._add_hash(hash='a')
self._add_hash(hash='b', cracked=1, password='1')
self._add_hashlist(id=2)
self._add_hash(hashlist_id=2, hash='a')
self._add_hash(hashlist_id=2, hash='b')
self._add_hash(hashlist_id=2, hash='c', cracked=1, password='1')
self._add_hashlist(id=3)
self._add_hash(hashlist_id=3, hash='a')
self._add_hash(hashlist_id=3, hash='b')
self._add_hash(hashlist_id=3, hash='c')
self._add_hash(hashlist_id=3, hash='d')
self._add_hash(hashlist_id=3, hash='e', cracked=1, password='2')
self._add_hash(hashlist_id=3, hash='f', cracked=1, password='3')
self.thrd.update_all_hashlists_counts_by_alg_id(3)
assert self.db.fetch_one("SELECT uncracked FROM hashlists WHERE id=1") == 1
assert self.db.fetch_one("SELECT cracked FROM hashlists WHERE id=1") == 1
assert self.db.fetch_one("SELECT uncracked FROM hashlists WHERE id=2") == 2
assert self.db.fetch_one("SELECT cracked FROM hashlists WHERE id=2") == 1
assert self.db.fetch_one("SELECT uncracked FROM hashlists WHERE id=3") == 4
assert self.db.fetch_one("SELECT cracked FROM hashlists WHERE id=3") == 2
def test_get_current_work_task(self):
""" Test of get_current_work_task() """
assert self.thrd.get_current_work_task_id() == 1
self.thrd.current_work_task_id = 2
assert self.thrd.get_current_work_task_id() == 2
with pytest.raises(HbsException) as ex:
self.thrd.current_work_task_id = None
self.thrd.get_current_work_task_id()
assert "Current task for work not set" in str(ex)
def test_get_waiting_task_for_work(self):
""" Test of get_waiting_task_for_work() """
self._add_work_task(id=2, status='waitoutparse')
assert self.thrd.get_waiting_task_for_work() == 2
assert self.thrd.current_work_task_id == 2
self.db.update("task_works", {'status': 'waitoutparse'}, "id = 1")
assert self.thrd.get_waiting_task_for_work() == 1
assert self.thrd.current_work_task_id == 1
self.db.q("UPDATE task_works SET status = 'wait'")
self.thrd.get_waiting_task_for_work()
with pytest.raises(HbsException) as ex:
self.thrd.get_current_work_task_id()
assert "Current task for work not set" in str(ex)
assert self.thrd.current_work_task_id is None
def test_get_hashlist_data(self):
""" Test of get_hashlist_data() """
self._add_hashlist()
assert self.db.fetch_row("SELECT * FROM hashlists WHERE id = 1") == self.thrd.get_hashlist_data(1)
assert self.thrd.get_hashlist_data(33) is None
test_data = [
(
0,
[
{'id': 2, "name": "test2", 'alg_id': 3},
{'id': 3, "name": "test3", 'alg_id': 3},
{'id': 4, "name": "test4", 'alg_id': 4},
],
[
{'id': 1, 'hashlist_id': 2, 'hash': 'a', 'salt': '', 'summ': '0cc175b9c0f1b6a831c399e269772661'},
{'id': 2, 'hashlist_id': 3, 'hash': 'a', 'salt': '', 'summ': '0cc175b9c0f1b6a831c399e269772661'},
{'id': 3, 'hashlist_id': 4, 'hash': 'a', 'salt': '', 'summ': '0cc175b9c0f1b6a831c399e269772661'},
],
"a:70617373"
),
(
1,
[
{'id': 2, "name": "test2", 'alg_id': 3},
{'id': 3, "name": "test3", 'alg_id': 3},
{'id': 4, "name": "test4", 'alg_id': 4},
],
[
{'id': 1, 'hashlist_id': 2, 'hash': 'a', 'salt': 'b', 'summ': 'd8160c9b3dc20d4e931aeb4f45262155'},
{'id': 2, 'hashlist_id': 3, 'hash': 'a', 'salt': 'b', 'summ': 'd8160c9b3dc20d4e931aeb4f45262155'},
{'id': 3, 'hashlist_id': 4, 'hash': 'a', 'salt': 'b', 'summ': 'd8160c9b3dc20d4e931aeb4f45262155'},
],
"a:b:70617373"
),
]
@pytest.mark.parametrize("have_salt,hashlists,hashes,outfile_content", test_data)
def test_parse_outfile_and_fill_found_hashes(self, have_salt, hashlists, hashes, outfile_content):
""" Test of parse_outfile_and_fill_found_hashes() """
for hashlist in hashlists:
self._add_hashlist(id=hashlist['id'], name=hashlist['name'],
alg_id=hashlist['alg_id'], have_salts=have_salt)
for _hash in hashes:
self._add_hash(id=_hash['id'], hashlist_id=_hash['hashlist_id'],
hash=_hash['hash'], salt=_hash['salt'], summ=_hash['summ'])
file_put_contents("/tmp/test.txt", outfile_content)
assert [] == self.db.fetch_all("SELECT h.id, h.password, h.cracked FROM hashes h, hashlists hl "
"WHERE hl.id = h.hashlist_id AND hl.alg_id = 3 AND LENGTH(h.password) "
"AND h.cracked")
self.thrd.parse_outfile_and_fill_found_hashes({'out_file': '/tmp/test.txt'}, {'alg_id': 3})
test_data = [
{'id': 1, 'password': 'pass', 'cracked': 1},
{'id': 2, 'password': 'pass', 'cracked': 1}
]
assert test_data == self.db.fetch_all(
"SELECT h.id, h.password, h.cracked FROM hashes h, hashlists hl WHERE hl.id = h.hashlist_id "
"AND hl.alg_id = 3 AND LENGTH(h.password) AND h.cracked")
assert [{'id': 3, 'password': '', 'cracked': 0}] == self.db.fetch_all(
"SELECT h.id, h.password, h.cracked FROM hashes h, hashlists hl WHERE hl.id = h.hashlist_id "
"AND hl.alg_id = 4")
def test_update_task_uncracked_count(self):
""" Test of update_task_uncracked_count() """
self.db.update("task_works", {"uncracked_after": 100}, "id=1")
self._add_hash(password='p', hash='a', salt='b', cracked=1)
self._add_hash(hash='c', salt='d', cracked=0)
self.thrd.update_task_uncracked_count(1, 1)
assert self.db.fetch_one("SELECT uncracked_after FROM task_works WHERE id=1") == 1
| mit |
TargetCoin/foocoin | contrib/pyminer/pyminer.py | 1257 | 6438 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit |
jamesliu/mxnet | example/reinforcement-learning/ddpg/utils.py | 52 | 3074 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import numpy as np
# random seed for reproduction
SEED = 12345
def define_qfunc(obs, act):
net = mx.symbol.FullyConnected(
data=obs,
name="qfunc_fc1",
num_hidden=32)
net = mx.symbol.Activation(
data=net,
name="qfunc_relu1",
act_type="relu")
net = mx.symbol.FullyConnected(
data=net,
name="qfunc_fc2",
num_hidden=32)
net = mx.symbol.Activation(
data=net,
name="qfunc_relu2",
act_type="relu")
net = mx.symbol.Concat(net, act, name="qunfc_concat")
net = mx.symbol.FullyConnected(
data=net,
name="qfunc_fc3",
num_hidden=32)
net = mx.symbol.Activation(
data=net,
name="qfunc_relu3",
act_type="relu")
qval = mx.symbol.FullyConnected(
data=net,
name="qfunc_qval",
num_hidden=1)
return qval
def define_policy(obs, action_dim):
net = mx.symbol.FullyConnected(
data=obs,
name="policy_fc1",
num_hidden=32)
net = mx.symbol.Activation(
data=net,
name="policy_relu1",
act_type="relu")
net = mx.symbol.FullyConnected(
data=net,
name="policy_fc2",
num_hidden=32)
net = mx.symbol.Activation(
data=net,
name="policy_relu2",
act_type="relu")
net = mx.symbol.FullyConnected(
data=net,
name='policy_fc3',
num_hidden=action_dim)
action = mx.symbol.Activation(
data=net,
name="act",
act_type="tanh")
return action
def discount_return(x, discount):
return np.sum(x * (discount ** np.arange(len(x))))
def rollout(env, agent, max_path_length=np.inf):
reward = []
o = env.reset()
# agent.reset()
path_length = 0
while path_length < max_path_length:
o = o.reshape((1, -1))
a = agent.get_action(o)
next_o, r, d, _ = env.step(a)
reward.append(r)
path_length += 1
if d:
break
o = next_o
return reward
def sample_rewards(env, policy, eval_samples, max_path_length=np.inf):
rewards = []
for _ in range(eval_samples):
rewards.append(rollout(env, policy, max_path_length))
return rewards
| apache-2.0 |
adrienemery/auv-control-pi | auv_control_pi/tests/test_simulator.py | 1 | 1676 | import pytest
from pygc import great_circle
from ..simulator import Navitgator, GPS, Motor, AHRS
from ..components.navigation import Point, distance_to_point
@pytest.fixture
def sim():
starting_point = Point(50, 120)
return Navitgator(gps=GPS(),
current_location=starting_point,
update_period=1)
def test_simulator_move_to_waypoint(sim):
waypoint = Point(49, 120)
sim.move_to_waypoint(waypoint)
assert sim._compass.heading == 180
def test_simulator_update(sim):
# generate a waypoint 100 meters away due South
heading = 140.0
distance = 100
result = great_circle(distance=distance,
azimuth=heading,
latitude=sim._current_location.lat,
longitude=sim._current_location.lng)
waypoint = Point(result['latitude'], result['longitude'])
sim.move_to_waypoint(waypoint)
sim.speed = 10
starting_point = sim._current_location
# since we have an update period of 1s and speed of 10 m/s
# after one update cycle we should have moved 10 meters
# from our last point
sim._update()
distance_moved = distance_to_point(starting_point, sim._current_location)
assert sim.speed == pytest.approx(distance_moved)
assert heading == pytest.approx(sim._compass.heading)
assert sim.arrived is False
# should take 8 updates total to get within 20 meters
# since we have already moved 10 meters we should only need
# to move another 70 meters
for x in range(7):
sim._update()
if x < 6:
assert sim.arrived is False
assert sim.arrived is True
| mit |
mantidproject/mantid | Framework/PythonInterface/plugins/algorithms/VisionReduction.py | 3 | 8882 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=no-init,invalid-name
#from mantid.api import AlgorithmFactory
#from mantid.simpleapi import PythonAlgorithm, WorkspaceProperty
# from mantid.kernel import Direction
from mantid.api import *
from mantid.kernel import *
from mantid.simpleapi import *
import csv
import os
from string import ascii_letters, digits # pylint: disable=deprecated-module
######################################################################
# Remove artifacts such as prompt pulse
######################################################################
def RemoveArtifact(WS,Xmin,Xmax,Xa,Delta):
CropWorkspace(InputWorkspace=WS,OutputWorkspace='__aux0',XMin=str(Xmin),XMax=str(Xa))
CropWorkspace(InputWorkspace=WS,OutputWorkspace='__aux3',XMin=str(Xa+Delta),XMax=str(Xmax))
CropWorkspace(InputWorkspace=WS,OutputWorkspace='__aux1',XMin=str(Xa-Delta),XMax=str(Xa))
CropWorkspace(InputWorkspace=WS,OutputWorkspace='__aux2',XMin=str(Xa+Delta),XMax=str(Xa+2*Delta ) )
ScaleX(InputWorkspace='__aux1',OutputWorkspace='__aux1',Factor=str(Delta),Operation='Add')
ScaleX(InputWorkspace='__aux2',OutputWorkspace='__aux2',Factor=str(-Delta),Operation='Add')
Scale(InputWorkspace='__aux1',OutputWorkspace='__aux1',Factor='0.5',Operation='Multiply')
Scale(InputWorkspace='__aux2',OutputWorkspace='__aux2',Factor='0.5',Operation='Multiply')
Plus(LHSWorkspace='__aux0',RHSWorkspace='__aux1',OutputWorkspace=WS)
Plus(LHSWorkspace=WS,RHSWorkspace='__aux2',OutputWorkspace=WS)
Plus(LHSWorkspace=WS,RHSWorkspace='__aux3',OutputWorkspace=WS)
class VisionReduction(PythonAlgorithm):
__CalFile='/SNS/VIS/shared/autoreduce/VIS_CalTab-03-03-2014.csv'
__MonFile = '/SNS/VIS/shared/autoreduce/VIS_5447-5450_MonitorL-corrected-hist.nxs'
# Pixels to be reduced
ListPX = []
ListPXF = []
ListPXB = []
# Binning parameters
#binT='10,1,33333'
binL='0.281,0.0002,8.199'
binE='-2,0.005,5,-0.001,1000'
def FormatFilename(self,s):
valid_chars = "-_.() %s%s" % (ascii_letters, digits)
outfilename = ''.join(c for c in s if c in valid_chars)
outfilename = outfilename.replace(' ','_')
return outfilename
def category(self):
return "Workflow\\Inelastic;Utility\\Development"
def name(self):
return "VisionReduction"
def summary(self):
return "This algorithm reduces the inelastic detectors on VISION. ** Under Development **"
def PyInit(self):
self.declareProperty(FileProperty("Filename", "", action=FileAction.Load, extensions=[".nxs.h5"]))
self.declareProperty(WorkspaceProperty("OutputWorkspace", "", direction=Direction.Output))
#pylint: disable=too-many-locals
def PyExec(self):
NexusFile = self.getProperty("Filename").value
FileName = NexusFile.split(os.sep)[-1]
#IPTS = NexusFile.split(os.sep)[-3]
RunNumber = int(FileName.strip('VIS_').replace('.nxs.h5',''))
#*********************************************************************
#*********************************************************************
#*********************************************************************
# Banks to be reduced
BanksForward=[2,3,4,5,6]
BanksBackward=[8,9,10,11,12,13,14]
Banks=BanksForward+BanksBackward
#*********************************************************************
#*********************************************************************
PXs=list(range(2*128+48,2*128+80))+ \
list(range(3*128+32,3*128+96))+ \
list(range(4*128+32,4*128+96))+ \
list(range(5*128+48,5*128+80))
for i in BanksForward:
offset=(i-1)*1024
self.ListPX=self.ListPX+[j+offset for j in PXs]
self.ListPXF=self.ListPXF+[j+offset for j in PXs]
for i in BanksBackward:
offset=(i-1)*1024
self.ListPX=self.ListPX+[j+offset for j in PXs]
self.ListPXB=self.ListPXB+[j+offset for j in PXs]
# Create a list of pixels to mask
# Inelastic Pixels = 0-14335
allPixels = set(range(14336))
toKeep = set(self.ListPX)
mask = allPixels.difference(toKeep)
MaskPX = list(mask)
# Read calibration table
CalTab = [[0 for _ in range(2)] for _ in range(1024*14)]
tab = list(csv.reader(open(self.__CalFile,'r')))
for i in range(0,len(tab)):
for j in [0,1]:
tab[i][j]=int(tab[i][j])
for j in [2,3]:
tab[i][j]=float(tab[i][j])
j=(tab[i][0]-1)*1024+tab[i][1]
CalTab[j][0]=tab[i][2]
CalTab[j][1]=tab[i][3]
logger.information('Loading inelastic banks from {}'.format(NexusFile))
bank_list = ["bank%d" % i for i in range(1, 15)]
bank_property = ",".join(bank_list)
LoadEventNexus(Filename=NexusFile, BankName=bank_property, OutputWorkspace='__IED_T', LoadMonitors='0')
LoadInstrument(Workspace='__IED_T',Filename='/SNS/VIS/shared/autoreduce/VISION_Definition_no_efixed.xml',RewriteSpectraMap=True)
MaskDetectors(Workspace='__IED_T', DetectorList=MaskPX)
logger.information('Title: {}'.format(mtd['__IED_T'].getTitle()))
logger.information('Proton charge: {}'.format(mtd['__IED_T'].getRun().getProtonCharge()))
if "Temperature" in mtd['__IED_T'].getTitle():
logger.error('Error: Non-equilibrium runs will not be reduced')
# sys.exit()
if mtd['__IED_T'].getRun().getProtonCharge() < 5.0:
logger.error('Error: Proton charge is too low')
# sys.exit()
NormaliseByCurrent(InputWorkspace='__IED_T',OutputWorkspace='__IED_T')
RemoveArtifact('__IED_T',10,33333,16660,240)
LoadNexusProcessed(Filename=self.__MonFile,OutputWorkspace='__DBM_L',LoadHistory=False)
for i,Pixel in enumerate(self.ListPX):
Ef=CalTab[Pixel][0]
Df=CalTab[Pixel][1]
Efe=(0.7317/Df)**2*Ef
mtd['__IED_T'].setEFixed(Pixel, Efe)
ConvertUnits(InputWorkspace='__IED_T',OutputWorkspace='__IED_L',EMode='Indirect',Target='Wavelength')
Rebin(InputWorkspace='__IED_L',OutputWorkspace='__IED_L',Params=self.binL,PreserveEvents='0')
InterpolatingRebin(InputWorkspace='__DBM_L',OutputWorkspace='__DBM_L',Params=self.binL)
#RebinToWorkspace(WorkspaceToRebin='__DBM_L',WorkspaceToMatch='__IED_L',OutputWorkspace='__DBM_L')
Divide(LHSWorkspace='__IED_L',RHSWorkspace='__DBM_L',OutputWorkspace='__IED_L')
for i,Pixel in enumerate(self.ListPX):
Ef=CalTab[Pixel][0]
mtd['__IED_L'].setEFixed(Pixel, Ef)
ConvertUnits(InputWorkspace='__IED_L',OutputWorkspace='__IED_E',EMode='Indirect',Target='DeltaE')
Rebin(InputWorkspace='__IED_E',OutputWorkspace='__IED_E',Params=self.binE,PreserveEvents='0',IgnoreBinErrors=True)
CorrectKiKf(InputWorkspace='__IED_E',OutputWorkspace='__IED_E',EMode='Indirect')
GroupDetectors(InputWorkspace='__IED_E',OutputWorkspace='__IED_E_Forward',DetectorList=self.ListPXF)
GroupDetectors(InputWorkspace='__IED_E',OutputWorkspace='__IED_E_Backward',DetectorList=self.ListPXB)
GroupDetectors(InputWorkspace='__IED_E',OutputWorkspace='__IED_E_Average',DetectorList=self.ListPX)
Scale(InputWorkspace='__IED_E_Forward',OutputWorkspace='__IED_E_Forward',Factor=str(1.0/len(BanksForward)),Operation='Multiply')
Scale(InputWorkspace='__IED_E_Backward',OutputWorkspace='__IED_E_Backward',Factor=str(1.0/len(BanksBackward)),Operation='Multiply')
Scale(InputWorkspace='__IED_E_Average',OutputWorkspace='__IED_E_Average',Factor=str(1.0/len(Banks)),Operation='Multiply')
AppendSpectra(InputWorkspace1='__IED_E_Backward',InputWorkspace2='__IED_E_Forward',OutputWorkspace='__IED_reduced')
AppendSpectra(InputWorkspace1='__IED_reduced',InputWorkspace2='__IED_E_Average',OutputWorkspace='__IED_reduced')
Title = mtd['__IED_reduced'].getTitle()
Note = Title.split('>')[0]
Note = self.FormatFilename(Note)
INS = str(RunNumber)+'_'+Note
ws = Scale(InputWorkspace='__IED_reduced',OutputWorkspace=INS,Factor='500',Operation='Multiply')
mtd[INS].setYUnitLabel('Normalized intensity')
RemoveLogs(INS)
RemoveWorkspaceHistory(INS)
self.setProperty("OutputWorkspace", ws)
DeleteWorkspace(INS)
# Register
AlgorithmFactory.subscribe(VisionReduction)
| gpl-3.0 |
listamilton/supermilton.repository | script.module.youtube.dl/lib/youtube_dl/extractor/videomore.py | 15 | 8815 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_age_limit,
parse_iso8601,
xpath_text,
)
class VideomoreIE(InfoExtractor):
IE_NAME = 'videomore'
_VALID_URL = r'videomore:(?P<sid>\d+)$|https?://videomore\.ru/(?:(?:embed|[^/]+/[^/]+)/|[^/]+\?.*\btrack_id=)(?P<id>\d+)(?:[/?#&]|\.(?:xml|json)|$)'
_TESTS = [{
'url': 'http://videomore.ru/kino_v_detalayah/5_sezon/367617',
'md5': '70875fbf57a1cd004709920381587185',
'info_dict': {
'id': '367617',
'ext': 'flv',
'title': 'В гостях Алексей Чумаков и Юлия Ковальчук',
'description': 'В гостях – лучшие романтические комедии года, «Выживший» Иньярриту и «Стив Джобс» Дэнни Бойла.',
'series': 'Кино в деталях',
'episode': 'В гостях Алексей Чумаков и Юлия Ковальчук',
'episode_number': None,
'season': 'Сезон 2015',
'season_number': 5,
'thumbnail': 're:^https?://.*\.jpg',
'duration': 2910,
'age_limit': 16,
'view_count': int,
},
}, {
'url': 'http://videomore.ru/embed/259974',
'info_dict': {
'id': '259974',
'ext': 'flv',
'title': '80 серия',
'description': '«Медведей» ждет решающий матч. Макеев выясняет отношения со Стрельцовым. Парни узнают подробности прошлого Макеева.',
'series': 'Молодежка',
'episode': '80 серия',
'episode_number': 40,
'season': '2 сезон',
'season_number': 2,
'thumbnail': 're:^https?://.*\.jpg',
'duration': 2809,
'age_limit': 16,
'view_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://videomore.ru/molodezhka/sezon_promo/341073',
'info_dict': {
'id': '341073',
'ext': 'flv',
'title': 'Команда проиграла из-за Бакина?',
'description': 'Молодежка 3 сезон скоро',
'series': 'Молодежка',
'episode': 'Команда проиграла из-за Бакина?',
'episode_number': None,
'season': 'Промо',
'season_number': 99,
'thumbnail': 're:^https?://.*\.jpg',
'duration': 29,
'age_limit': 16,
'view_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://videomore.ru/elki_3?track_id=364623',
'only_matching': True,
}, {
'url': 'http://videomore.ru/embed/364623',
'only_matching': True,
}, {
'url': 'http://videomore.ru/video/tracks/364623.xml',
'only_matching': True,
}, {
'url': 'http://videomore.ru/video/tracks/364623.json',
'only_matching': True,
}, {
'url': 'http://videomore.ru/video/tracks/158031/quotes/33248',
'only_matching': True,
}, {
'url': 'videomore:367617',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<object[^>]+data=(["\'])https?://videomore.ru/player\.swf\?.*config=(?P<url>https?://videomore\.ru/(?:[^/]+/)+\d+\.xml).*\1',
webpage)
if mobj:
return mobj.group('url')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('sid') or mobj.group('id')
video = self._download_xml(
'http://videomore.ru/video/tracks/%s.xml' % video_id,
video_id, 'Downloading video XML')
video_url = xpath_text(video, './/video_url', 'video url', fatal=True)
formats = self._extract_f4m_formats(video_url, video_id, f4m_id='hds')
self._sort_formats(formats)
data = self._download_json(
'http://videomore.ru/video/tracks/%s.json' % video_id,
video_id, 'Downloading video JSON')
title = data.get('title') or data['project_title']
description = data.get('description') or data.get('description_raw')
timestamp = parse_iso8601(data.get('published_at'))
duration = int_or_none(data.get('duration'))
view_count = int_or_none(data.get('views'))
age_limit = parse_age_limit(data.get('min_age'))
thumbnails = [{
'url': thumbnail,
} for thumbnail in data.get('big_thumbnail_urls', [])]
series = data.get('project_title')
episode = data.get('title')
episode_number = int_or_none(data.get('episode_of_season') or None)
season = data.get('season_title')
season_number = int_or_none(data.get('season_pos') or None)
return {
'id': video_id,
'title': title,
'description': description,
'series': series,
'episode': episode,
'episode_number': episode_number,
'season': season,
'season_number': season_number,
'thumbnails': thumbnails,
'timestamp': timestamp,
'duration': duration,
'view_count': view_count,
'age_limit': age_limit,
'formats': formats,
}
class VideomoreVideoIE(InfoExtractor):
IE_NAME = 'videomore:video'
_VALID_URL = r'https?://videomore\.ru/(?:(?:[^/]+/){2})?(?P<id>[^/?#&]+)[/?#&]*$'
_TESTS = [{
# single video with og:video:iframe
'url': 'http://videomore.ru/elki_3',
'info_dict': {
'id': '364623',
'ext': 'flv',
'title': 'Ёлки 3',
'description': '',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 5579,
'age_limit': 6,
'view_count': int,
},
'params': {
'skip_download': True,
},
}, {
# season single series with og:video:iframe
'url': 'http://videomore.ru/poslednii_ment/1_sezon/14_seriya',
'only_matching': True,
}, {
'url': 'http://videomore.ru/sejchas_v_seti/serii_221-240/226_vypusk',
'only_matching': True,
}, {
# single video without og:video:iframe
'url': 'http://videomore.ru/marin_i_ego_druzya',
'info_dict': {
'id': '359073',
'ext': 'flv',
'title': '1 серия. Здравствуй, Аквавилль!',
'description': 'md5:c6003179538b5d353e7bcd5b1372b2d7',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 754,
'age_limit': 6,
'view_count': int,
},
'params': {
'skip_download': True,
},
}]
@classmethod
def suitable(cls, url):
return False if VideomoreIE.suitable(url) else super(VideomoreVideoIE, cls).suitable(url)
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_url = self._og_search_property(
'video:iframe', webpage, 'video url', default=None)
if not video_url:
video_id = self._search_regex(
(r'config\s*:\s*["\']https?://videomore\.ru/video/tracks/(\d+)\.xml',
r'track-id=["\'](\d+)',
r'xcnt_product_id\s*=\s*(\d+)'), webpage, 'video id')
video_url = 'videomore:%s' % video_id
return self.url_result(video_url, VideomoreIE.ie_key())
class VideomoreSeasonIE(InfoExtractor):
IE_NAME = 'videomore:season'
_VALID_URL = r'https?://videomore\.ru/(?!embed)(?P<id>[^/]+/[^/?#&]+)[/?#&]*$'
_TESTS = [{
'url': 'http://videomore.ru/molodezhka/sezon_promo',
'info_dict': {
'id': 'molodezhka/sezon_promo',
'title': 'Молодежка Промо',
},
'playlist_mincount': 12,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
title = self._og_search_title(webpage)
entries = [
self.url_result(item) for item in re.findall(
r'<a[^>]+href="((?:https?:)?//videomore\.ru/%s/[^/]+)"[^>]+class="widget-item-desc"'
% display_id, webpage)]
return self.playlist_result(entries, display_id, title)
| gpl-2.0 |
factorlibre/stock-logistics-warehouse | stock_inventory_revaluation/migrations/8.0.1.1.0/pre-migration.py | 3 | 2647 | # -*- coding: utf-8 -*-
# © 2016 Eficent Business and IT Consulting Services S.L.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
import logging
__name__ = u"Back up the old account move in inventory revaluation"
_logger = logging.getLogger(__name__)
__name__ = "Upgrade to 8.0.1.1.0"
def copy_account_move_id(cr):
cr.execute("""SELECT column_name
FROM information_schema.columns
WHERE table_name='stock_inventory_revaluation' AND
column_name='old_account_move_id'""")
if not cr.fetchone():
cr.execute(
"""
ALTER TABLE stock_inventory_revaluation
ADD COLUMN old_account_move_id
integer;
COMMENT ON COLUMN stock_inventory_revaluation.old_account_move_id
IS 'Old
Journal Entry';
""")
cr.execute(
"""
UPDATE stock_inventory_revaluation as sir
SET old_account_move_id = account_move_id
""")
def set_revaluation_in_account_move(cr):
cr.execute("""SELECT column_name
FROM information_schema.columns
WHERE table_name='account_move' AND
column_name='stock_inventory_revaluation_id'""")
if not cr.fetchone():
cr.execute(
"""
ALTER TABLE account_move ADD COLUMN stock_inventory_revaluation_id
integer;
COMMENT ON COLUMN account_move.stock_inventory_revaluation_id IS
'Stock Inventory Revaluation';
""")
cr.execute(
"""
UPDATE account_move as am
SET stock_inventory_revaluation_id = sir.id
FROM stock_inventory_revaluation as sir
WHERE old_account_move_id = am.id
""")
def set_revaluation_in_account_move_line(cr):
cr.execute("""SELECT column_name
FROM information_schema.columns
WHERE table_name='account_move_line' AND
column_name='stock_inventory_revaluation_id'""")
if not cr.fetchone():
cr.execute(
"""
ALTER TABLE account_move_line
ADD COLUMN stock_inventory_revaluation_id
integer;
COMMENT ON COLUMN account_move_line.stock_inventory_revaluation_id
IS 'Stock Inventory Revaluation';
""")
cr.execute(
"""
UPDATE account_move_line as aml
SET stock_inventory_revaluation_id = sir.id
FROM stock_inventory_revaluation as sir
WHERE old_account_move_id = aml.move_id
""")
def migrate(cr, version):
if not version:
return
copy_account_move_id(cr)
set_revaluation_in_account_move(cr)
set_revaluation_in_account_move_line(cr)
| agpl-3.0 |
alfa-jor/addon | plugin.video.alfa/servers/thevid.py | 2 | 1242 | # -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Video not found..." in data or "Video removed due to copyright" in data:
return False, config.get_localized_string(70292) % "Thevid"
if "Video removed for inactivity..." in data:
return False, "[Thevid] El video ha sido removido por inactividad"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.error("(page_url='%s')" % page_url)
videos = []
data = httptools.downloadpage(page_url).data
packed = scrapertools.find_single_match(data, "</script>\s*<script>\s*(eval.*?)\s*</script>")
unpacked = jsunpack.unpack(packed)
logger.error(unpacked)
videos = scrapertools.find_multiple_matches(unpacked, 'vldAb="([^"]+)')
video_urls = []
for video in videos:
if not video.startswith("//"):
continue
video = "https:" + video
video_urls.append(["mp4 [Thevid]", video])
logger.info("Url: %s" % videos)
return video_urls
| gpl-3.0 |
superchilli/webapp | venv/lib/python2.7/site-packages/html5lib/sanitizer.py | 805 | 16428 | from __future__ import absolute_import, division, unicode_literals
import re
from xml.sax.saxutils import escape, unescape
from .tokenizer import HTMLTokenizer
from .constants import tokenTypes
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload',
'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc', 'poster',
'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = ['fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = ['ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs']
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in list(tokenTypes.keys()):
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
if token["name"] in self.allowed_elements:
return self.allowed_token(token, token_type)
else:
return self.disallowed_token(token, token_type)
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def allowed_token(self, token, token_type):
if "data" in token:
attrs = dict([(name, val) for name, val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if attr not in attrs:
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
# remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace("\ufffd", "")
if (re.match("^[a-z0-9][-+.a-z0-9]*:", val_unescaped) and
(val_unescaped.split(':')[0] not in
self.allowed_protocols)):
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if 'style' in attrs:
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name, val] for name, val in list(attrs.items())]
return token
def disallowed_token(self, token, token_type):
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k, escape(v)) for k, v in token["data"]])
token["data"] = "<%s%s>" % (token["name"], attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"] = token["data"][:-1] + "/>"
if token["type"] in list(tokenTypes.keys()):
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
del token["name"]
return token
def sanitize_css(self, style):
# disallow urls
style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
return ''
clean = []
for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style):
if not value:
continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
'padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False, parser=None):
# Change case matching defaults as we only output lowercase html anyway
# This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName, parser=parser)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
| mit |
ardi69/pyload-0.4.10 | lib/Python/Lib/Crypto/Random/__init__.py | 126 | 1669 | # -*- coding: utf-8 -*-
#
# Random/__init__.py : PyCrypto random number generation
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
__all__ = ['new']
from Crypto.Random import OSRNG
from Crypto.Random import _UserFriendlyRNG
def new(*args, **kwargs):
"""Return a file-like object that outputs cryptographically random bytes."""
return _UserFriendlyRNG.new(*args, **kwargs)
def atfork():
"""Call this whenever you call os.fork()"""
_UserFriendlyRNG.reinit()
def get_random_bytes(n):
"""Return the specified number of cryptographically-strong random bytes."""
return _UserFriendlyRNG.get_random_bytes(n)
# vim:set ts=4 sw=4 sts=4 expandtab:
| gpl-3.0 |
mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/googletest/googletest/test/gtest_catch_exceptions_test.py | 2139 | 9901 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
[EXE_PATH, LIST_TESTS_FLAG], env=environ).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
[EX_EXE_PATH], env=environ).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG],
env=environ).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
M4gn4tor/python-instagram | instagram/models.py | 30 | 7340 | from .helper import timestamp_to_datetime
import six
class ApiModel(object):
@classmethod
def object_from_dictionary(cls, entry):
# make dict keys all strings
if entry is None:
return ""
entry_str_dict = dict([(str(key), value) for key, value in entry.items()])
return cls(**entry_str_dict)
def __repr__(self):
return str(self)
# if six.PY2:
# return six.text_type(self).encode('utf8')
# else:
# return self.encode('utf8')
def __str__(self):
if six.PY3:
return self.__unicode__()
else:
return unicode(self).encode('utf-8')
class Image(ApiModel):
def __init__(self, url, width, height):
self.url = url
self.height = height
self.width = width
def __unicode__(self):
return "Image: %s" % self.url
class Video(Image):
def __unicode__(self):
return "Video: %s" % self.url
class Media(ApiModel):
def __init__(self, id=None, **kwargs):
self.id = id
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
def get_standard_resolution_url(self):
if self.type == 'image':
return self.images['standard_resolution'].url
else:
return self.videos['standard_resolution'].url
def get_low_resolution_url(self):
if self.type == 'image':
return self.images['low_resolution'].url
else:
return self.videos['low_resolution'].url
def get_thumbnail_url(self):
return self.images['thumbnail'].url
def __unicode__(self):
return "Media: %s" % self.id
@classmethod
def object_from_dictionary(cls, entry):
new_media = Media(id=entry['id'])
new_media.type = entry['type']
new_media.user = User.object_from_dictionary(entry['user'])
new_media.images = {}
for version, version_info in six.iteritems(entry['images']):
new_media.images[version] = Image.object_from_dictionary(version_info)
if new_media.type == 'video':
new_media.videos = {}
for version, version_info in six.iteritems(entry['videos']):
new_media.videos[version] = Video.object_from_dictionary(version_info)
if 'user_has_liked' in entry:
new_media.user_has_liked = entry['user_has_liked']
new_media.like_count = entry['likes']['count']
new_media.likes = []
if 'data' in entry['likes']:
for like in entry['likes']['data']:
new_media.likes.append(User.object_from_dictionary(like))
new_media.comment_count = entry['comments']['count']
new_media.comments = []
for comment in entry['comments']['data']:
new_media.comments.append(Comment.object_from_dictionary(comment))
new_media.users_in_photo = []
if entry.get('users_in_photo'):
for user_in_photo in entry['users_in_photo']:
new_media.users_in_photo.append(UserInPhoto.object_from_dictionary(user_in_photo))
new_media.created_time = timestamp_to_datetime(entry['created_time'])
if entry['location'] and 'id' in entry:
new_media.location = Location.object_from_dictionary(entry['location'])
new_media.caption = None
if entry['caption']:
new_media.caption = Comment.object_from_dictionary(entry['caption'])
new_media.tags = []
if entry['tags']:
for tag in entry['tags']:
new_media.tags.append(Tag.object_from_dictionary({'name': tag}))
new_media.link = entry['link']
new_media.filter = entry.get('filter')
return new_media
class MediaShortcode(Media):
def __init__(self, shortcode=None, **kwargs):
self.shortcode = shortcode
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
class Tag(ApiModel):
def __init__(self, name, **kwargs):
self.name = name
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
def __unicode__(self):
return "Tag: %s" % self.name
class Comment(ApiModel):
def __init__(self, *args, **kwargs):
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
@classmethod
def object_from_dictionary(cls, entry):
user = User.object_from_dictionary(entry['from'])
text = entry['text']
created_at = timestamp_to_datetime(entry['created_time'])
id = entry['id']
return Comment(id=id, user=user, text=text, created_at=created_at)
def __unicode__(self):
return "Comment: %s said \"%s\"" % (self.user.username, self.text)
class Point(ApiModel):
def __init__(self, latitude, longitude):
self.latitude = latitude
self.longitude = longitude
def __unicode__(self):
return "Point: (%s, %s)" % (self.latitude, self.longitude)
class Location(ApiModel):
def __init__(self, id, *args, **kwargs):
self.id = str(id)
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
@classmethod
def object_from_dictionary(cls, entry):
point = None
if 'latitude' in entry:
point = Point(entry.get('latitude'),
entry.get('longitude'))
location = Location(entry.get('id', 0),
point=point,
name=entry.get('name', ''))
return location
def __unicode__(self):
return "Location: %s (%s)" % (self.id, self.point)
class User(ApiModel):
def __init__(self, id, *args, **kwargs):
self.id = id
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
def __unicode__(self):
return "User: %s" % self.username
class Relationship(ApiModel):
def __init__(self, incoming_status="none", outgoing_status="none", target_user_is_private=False):
self.incoming_status = incoming_status
self.outgoing_status = outgoing_status
self.target_user_is_private = target_user_is_private
def __unicode__(self):
follows = False if self.outgoing_status == 'none' else True
followed = False if self.incoming_status == 'none' else True
return "Relationship: (Follows: %s, Followed by: %s)" % (follows, followed)
class Position(ApiModel):
def __init__(self, x, y):
self.x = x
self.y = y
def __unicode__(self):
return "Position: (%s, %s)" % (self.x, self.y)
@classmethod
def object_from_dictionary(cls, entry):
if 'x' in entry:
return Position(entry['x'], entry['y'])
class UserInPhoto(ApiModel):
def __init__(self, user, position):
self.position = position
self.user = user
def __unicode__(self):
return "UserInPhoto: (%s, %s)" % (self.user, self.position)
@classmethod
def object_from_dictionary(cls, entry):
user = None
if 'user' in entry:
user = User.object_from_dictionary(entry['user'])
if 'position' in entry:
position = Position(entry['position']['x'], entry['position']['y'])
return UserInPhoto(user, position)
| bsd-3-clause |
koditr/xbmc-tr-team-turkish-addons | script.module.beautifulsoup4/lib/bs4/tests/test_lxml.py | 115 | 2382 | """Tests to ensure that the lxml tree builder generates good trees."""
import re
import warnings
try:
import lxml.etree
LXML_PRESENT = True
LXML_VERSION = lxml.etree.LXML_VERSION
except ImportError, e:
LXML_PRESENT = False
LXML_VERSION = (0,)
if LXML_PRESENT:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
from bs4 import (
BeautifulSoup,
BeautifulStoneSoup,
)
from bs4.element import Comment, Doctype, SoupStrainer
from bs4.testing import skipIf
from bs4.tests import test_htmlparser
from bs4.testing import (
HTMLTreeBuilderSmokeTest,
XMLTreeBuilderSmokeTest,
SoupTest,
skipIf,
)
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its tree builder.")
class LXMLTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilder()
def test_out_of_range_entity(self):
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
# In lxml < 2.3.5, an empty doctype causes a segfault. Skip this
# test if an old version of lxml is installed.
@skipIf(
not LXML_PRESENT or LXML_VERSION < (2,3,5,0),
"Skipping doctype test for old version of lxml to avoid segfault.")
def test_empty_doctype(self):
soup = self.soup("<!DOCTYPE>")
doctype = soup.contents[0]
self.assertEqual("", doctype.strip())
def test_beautifulstonesoup_is_xml_parser(self):
# Make sure that the deprecated BSS class uses an xml builder
# if one is installed.
with warnings.catch_warnings(record=True) as w:
soup = BeautifulStoneSoup("<b />")
self.assertEqual(u"<b/>", unicode(soup.b))
self.assertTrue("BeautifulStoneSoup class is deprecated" in str(w[0].message))
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its XML tree builder.")
class LXMLXMLTreeBuilderSmokeTest(SoupTest, XMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilderForXML()
| gpl-2.0 |
yi719/python-driver | benchmarks/future_batches.py | 11 | 1463 | # Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from base import benchmark, BenchmarkThread
from six.moves import queue
log = logging.getLogger(__name__)
class Runner(BenchmarkThread):
def run(self):
futures = queue.Queue(maxsize=121)
self.start_profile()
for i in range(self.num_queries):
if i > 0 and i % 120 == 0:
# clear the existing queue
while True:
try:
futures.get_nowait().result()
except queue.Empty:
break
future = self.session.execute_async(self.query, self.values)
futures.put_nowait(future)
while True:
try:
futures.get_nowait().result()
except queue.Empty:
break
self.finish_profile()
if __name__ == "__main__":
benchmark(Runner)
| apache-2.0 |
shootstar/novatest | nova/db/sqlalchemy/migrate_repo/versions/180_fix_175_and_176_migration_sync_shadow_table.py | 15 | 2174 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Mirantis, Inc.
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Boris Pavlovic, Mirantis Inc
from sqlalchemy import MetaData, Integer, String, Table, Column
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
shadow_volume_usage_cache = Table('shadow_volume_usage_cache', meta,
autoload=True)
# fix for 175 migration
shadow_volume_usage_cache.drop_column('instance_id')
instance_id = Column('instance_uuid', String(36))
project_id = Column('project_id', String(36))
user_id = Column('user_id', String(36))
shadow_volume_usage_cache.create_column(instance_id)
shadow_volume_usage_cache.create_column(project_id)
shadow_volume_usage_cache.create_column(user_id)
# fix for 176 migration
availability_zone = Column('availability_zone', String(255))
shadow_volume_usage_cache.create_column(availability_zone)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
shadow_volume_usage_cache = Table('shadow_volume_usage_cache', meta,
autoload=True)
# fix for 175 migration
shadow_volume_usage_cache.drop_column('instance_uuid')
shadow_volume_usage_cache.drop_column('user_id')
shadow_volume_usage_cache.drop_column('project_id')
instance_id = Column('instance_id', Integer)
shadow_volume_usage_cache.create_column(instance_id)
# fix for 176 migration
shadow_volume_usage_cache.drop_column('availability_zone')
| apache-2.0 |
yordan-desta/QgisIns | python/ext-libs/owslib/ows.py | 28 | 10580 | # -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2008 Tom Kralidis
#
# Authors : Tom Kralidis <tomkralidis@gmail.com>
#
# Contact email: tomkralidis@gmail.com
# =============================================================================
"""
API for OGC Web Services Common (OWS) constructs and metadata.
OWS Common: http://www.opengeospatial.org/standards/common
Currently supports version 1.1.0 (06-121r3).
"""
from owslib.etree import etree
from owslib import crs, util
from owslib.namespaces import Namespaces
n = Namespaces()
OWS_NAMESPACE_1_0_0 = n.get_namespace("ows")
OWS_NAMESPACE_1_1_0 = n.get_namespace("ows110")
OWS_NAMESPACE_2_0 = n.get_namespace("ows200")
XSI_NAMESPACE = n.get_namespace("xsi")
XLINK_NAMESPACE = n.get_namespace("xlink")
DEFAULT_OWS_NAMESPACE=OWS_NAMESPACE_1_1_0 #Use this as default for OWSCommon objects
class OwsCommon(object):
"""Initialize OWS Common object"""
def __init__(self,version):
self.version = version
if version == '1.0.0':
self.namespace = OWS_NAMESPACE_1_0_0
else:
self.namespace = OWS_NAMESPACE_1_1_0
class ServiceIdentification(object):
"""Initialize an OWS Common ServiceIdentification construct"""
def __init__(self,infoset,namespace=DEFAULT_OWS_NAMESPACE):
self._root = infoset
val = self._root.find(util.nspath('Title', namespace))
self.title = util.testXMLValue(val)
val = self._root.find(util.nspath('Abstract', namespace))
self.abstract = util.testXMLValue(val)
self.keywords = []
for f in self._root.findall(util.nspath('Keywords/Keyword', namespace)):
if f.text is not None:
self.keywords.append(f.text)
val = self._root.find(util.nspath('AccessConstraints', namespace))
self.accessconstraints = util.testXMLValue(val)
val = self._root.find(util.nspath('Fees', namespace))
self.fees = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceType', namespace))
self.type = util.testXMLValue(val)
self.service=self.type #alternative? keep both?discuss
val = self._root.find(util.nspath('ServiceTypeVersion', namespace))
self.version = util.testXMLValue(val)
self.profiles = []
for p in self._root.findall(util.nspath('Profile', namespace)):
self.profiles.append(util.testXMLValue(val))
class ServiceProvider(object):
"""Initialize an OWS Common ServiceProvider construct"""
def __init__(self, infoset,namespace=DEFAULT_OWS_NAMESPACE):
self._root = infoset
val = self._root.find(util.nspath('ProviderName', namespace))
self.name = util.testXMLValue(val)
self.contact = ServiceContact(infoset, namespace)
val = self._root.find(util.nspath('ProviderSite', namespace))
if val is not None:
urlattrib=val.attrib[util.nspath('href', XLINK_NAMESPACE)]
self.url = util.testXMLValue(urlattrib, True)
else:
self.url =None
class ServiceContact(object):
"""Initialize an OWS Common ServiceContact construct"""
def __init__(self, infoset,namespace=DEFAULT_OWS_NAMESPACE):
self._root = infoset
val = self._root.find(util.nspath('ProviderName', namespace))
self.name = util.testXMLValue(val)
self.organization=util.testXMLValue(self._root.find(util.nspath('ContactPersonPrimary/ContactOrganization', namespace)))
val = self._root.find(util.nspath('ProviderSite', namespace))
if val is not None:
self.site = util.testXMLValue(val.attrib.get(util.nspath('href', XLINK_NAMESPACE)), True)
else:
self.site = None
val = self._root.find(util.nspath('ServiceContact/Role', namespace))
self.role = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/IndividualName', namespace))
self.name = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/PositionName', namespace))
self.position = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Phone/Voice', namespace))
self.phone = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Phone/Facsimile', namespace))
self.fax = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/DeliveryPoint', namespace))
self.address = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/City', namespace))
self.city = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/AdministrativeArea', namespace))
self.region = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/PostalCode', namespace))
self.postcode = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/Country', namespace))
self.country = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/ElectronicMailAddress', namespace))
self.email = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/OnlineResource', namespace))
if val is not None:
self.url = util.testXMLValue(val.attrib.get(util.nspath('href', XLINK_NAMESPACE)), True)
else:
self.url = None
val = self._root.find(util.nspath('ServiceContact/ContactInfo/HoursOfService', namespace))
self.hours = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/ContactInstructions', namespace))
self.instructions = util.testXMLValue(val)
class Constraint(object):
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.name = elem.attrib.get('name')
self.values = [i.text for i in elem.findall(util.nspath('Value', namespace))]
self.values += [i.text for i in elem.findall(util.nspath('AllowedValues/Value', namespace))]
def __repr__(self):
if self.values:
return "Constraint: %s - %s" % (self.name, self.values)
else:
return "Constraint: %s" % self.name
class OperationsMetadata(object):
"""Initialize an OWS OperationMetadata construct"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.name = elem.attrib['name']
self.formatOptions = ['text/xml']
parameters = []
self.methods = []
self.constraints = []
for verb in elem.findall(util.nspath('DCP/HTTP/*', namespace)):
url = util.testXMLAttribute(verb, util.nspath('href', XLINK_NAMESPACE))
if url is not None:
verb_constraints = [Constraint(conts, namespace) for conts in verb.findall(util.nspath('Constraint', namespace))]
self.methods.append({'constraints' : verb_constraints, 'type' : util.xmltag_split(verb.tag), 'url': url})
for parameter in elem.findall(util.nspath('Parameter', namespace)):
if namespace == OWS_NAMESPACE_1_1_0:
parameters.append((parameter.attrib['name'], {'values': [i.text for i in parameter.findall(util.nspath('AllowedValues/Value', namespace))]}))
else:
parameters.append((parameter.attrib['name'], {'values': [i.text for i in parameter.findall(util.nspath('Value', namespace))]}))
self.parameters = dict(parameters)
for constraint in elem.findall(util.nspath('Constraint', namespace)):
self.constraints.append(Constraint(constraint, namespace))
class BoundingBox(object):
"""Initialize an OWS BoundingBox construct"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.minx = None
self.miny = None
self.maxx = None
self.maxy = None
val = elem.attrib.get('crs')
if val is not None:
self.crs = crs.Crs(val)
else:
self.crs = None
val = elem.attrib.get('dimensions')
if val is not None:
self.dimensions = int(util.testXMLValue(val, True))
else: # assume 2
self.dimensions = 2
val = elem.find(util.nspath('LowerCorner', namespace))
tmp = util.testXMLValue(val)
if tmp is not None:
xy = tmp.split()
if len(xy) > 1:
if self.crs is not None and self.crs.axisorder == 'yx':
self.minx, self.miny = xy[1], xy[0]
else:
self.minx, self.miny = xy[0], xy[1]
val = elem.find(util.nspath('UpperCorner', namespace))
tmp = util.testXMLValue(val)
if tmp is not None:
xy = tmp.split()
if len(xy) > 1:
if self.crs is not None and self.crs.axisorder == 'yx':
self.maxx, self.maxy = xy[1], xy[0]
else:
self.maxx, self.maxy = xy[0], xy[1]
class WGS84BoundingBox(BoundingBox):
"""WGS84 bbox, axis order xy"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
BoundingBox.__init__(self, elem, namespace)
self.dimensions = 2
self.crs = crs.Crs('urn:ogc:def:crs:OGC:2:84')
class ExceptionReport(Exception):
"""OWS ExceptionReport"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.exceptions = []
if hasattr(elem, 'getroot'):
elem = elem.getroot()
for i in elem.findall(util.nspath('Exception', namespace)):
tmp = {}
val = i.attrib.get('exceptionCode')
tmp['exceptionCode'] = util.testXMLValue(val, True)
val = i.attrib.get('locator')
tmp['locator'] = util.testXMLValue(val, True)
val = i.find(util.nspath('ExceptionText', namespace))
tmp['ExceptionText'] = util.testXMLValue(val)
self.exceptions.append(tmp)
# set topmost stacktrace as return message
self.code = self.exceptions[0]['exceptionCode']
self.locator = self.exceptions[0]['locator']
self.msg = self.exceptions[0]['ExceptionText']
self.xml = etree.tostring(elem)
def __str__(self):
return repr(self.msg)
| gpl-2.0 |
GinnyN/towerofdimensions-django | django/contrib/auth/tests/decorators.py | 251 | 1562 | from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.tests.views import AuthViewsTestCase
class LoginRequiredTestCase(AuthViewsTestCase):
"""
Tests the login_required decorators
"""
urls = 'django.contrib.auth.tests.urls'
def testCallable(self):
"""
Check that login_required is assignable to callable objects.
"""
class CallableView(object):
def __call__(self, *args, **kwargs):
pass
login_required(CallableView())
def testView(self):
"""
Check that login_required is assignable to normal views.
"""
def normal_view(request):
pass
login_required(normal_view)
def testLoginRequired(self, view_url='/login_required/', login_url=settings.LOGIN_URL):
"""
Check that login_required works on a simple view wrapped in a
login_required decorator.
"""
response = self.client.get(view_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(login_url in response['Location'])
self.login()
response = self.client.get(view_url)
self.assertEqual(response.status_code, 200)
def testLoginRequiredNextUrl(self):
"""
Check that login_required works on a simple view wrapped in a
login_required decorator with a login_url set.
"""
self.testLoginRequired(view_url='/login_required_login_url/',
login_url='/somewhere/')
| bsd-3-clause |
jcatw/scnn | scnn/scnn.py | 1 | 11080 | __author__ = 'jatwood'
import lasagne
import lasagne.layers as layers
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
import util
# This class is not user facing; it contains the Lasagne internals for the SCNN model.
class SearchConvolution(layers.MergeLayer):
"""
A search-convolutional Lasagne layer.
"""
def __init__(self, incomings, n_hops, n_features,
W=lasagne.init.Normal(0.01),
nonlinearity=lasagne.nonlinearities.tanh,
**kwargs):
super(SearchConvolution, self).__init__(incomings, **kwargs)
self.W = self.add_param(W, (n_hops,n_features), name='W')
self.n_hops = n_hops
self.n_features = n_features
self.nonlinearity = nonlinearity
def get_output_for(self, inputs, **kwargs):
"""
Compute search convolution of inputs.
:param inputs: [Apow, X]
:return: Search convolution of inputs with shape (self.nhops, self.nfeatures)
"""
Apow = inputs[0]
X = inputs[1]
def compute_output(i, w, a, x, h):
"""
:param i: index
:param w: weight vector (n_features,)
:param x: feature vector (n_nodes, n_features)
:param h: n_hops
:param a: adjacency matrix (n_nodes, n_nodes)
:return: output[i]
"""
return (T.dot(a, x).transpose()) * T.addbroadcast(T.reshape(w, (w.shape[0],1)),1)
seq_values = np.arange(self.n_hops)
seq = theano.shared(value = seq_values, name="seq", borrow=True)
out, _ = theano.scan(fn=compute_output,
non_sequences=[X, self.n_hops],
sequences=[seq, self.W, Apow],
n_steps = self.n_hops)
return self.nonlinearity(out.transpose())
def get_output_shape_for(self, input_shapes):
print (input_shapes[1][0], self.n_hops, self.n_features)
return (input_shapes[1][0], self.n_hops, self.n_features)
class DeepSearchConvolution(layers.Layer):
"""
A search-convolutional Lasagne layer.
"""
def __init__(self, incoming, n_hops, n_features,
W=lasagne.init.Normal(0.01),
nonlinearity=lasagne.nonlinearities.tanh,
**kwargs):
super(DeepSearchConvolution, self).__init__(incoming, **kwargs)
self.W = T.addbroadcast(self.add_param(W, (1,n_features,n_hops), name='W'),0)
self.n_hops = n_hops
self.n_features = n_features
self.nonlinearity = nonlinearity
def get_output_for(self, input, **kwargs):
return self.nonlinearity(self.W * input)
def get_output_shape_for(self, input_shape):
print input_shape
return input_shape
# This class is user-facing. It contains a full SCNN model.
class SCNN:
"""
The search-convolutional neural network model.
"""
def __init__(self, n_hops=2, transform_fn=util.rw_laplacian):
self.n_hops = n_hops
self.transform_fn = transform_fn
# Initialize Theano variables
self.var_A = T.matrix('A')
self.var_Apow = T.tensor3('Apow')
self.var_X = T.matrix('X')
self.var_Y = T.imatrix('Y')
def _register_layers(self, batch_size, n_nodes, n_features, n_classes):
self.l_in_apow = lasagne.layers.InputLayer((self.n_hops + 1, batch_size, n_nodes), input_var=self.var_Apow)
self.l_in_x = lasagne.layers.InputLayer((n_nodes, n_features), input_var=self.var_X)
self.l_sc = SearchConvolution([self.l_in_apow, self.l_in_x], self.n_hops + 1, n_features)
self.l_out = layers.DenseLayer(self.l_sc, num_units=n_classes, nonlinearity=lasagne.nonlinearities.tanh)
def _get_output_layer(self):
return self.l_out
def fit(self, A, X, Y, train_indices, valid_indices,
learning_rate=0.05, batch_size=100, n_epochs=100,
loss_fn=lasagne.objectives.multiclass_hinge_loss,
update_fn=lasagne.updates.adagrad,
stop_early=True,
stop_window_size=5,
output_weights=False,
show_weights=False):
# Ensure that data have the correct dimensions
assert A.shape[0] == X.shape[0]
assert X.shape[0] == Y.shape[0]
assert len(Y.shape) > 1
if self.transform_fn is not None:
A = self.transform_fn(A)
# Extract dimensions
n_nodes = A.shape[0]
n_features = X.shape[1] + 1
n_classes = Y.shape[1]
n_batch = n_nodes // batch_size
# Compute the matrix power series
Apow = util.A_power_series(A, self.n_hops)
self.Apow = Apow
# Add bias term to X
X = np.hstack([X, np.ones((X.shape[0],1))]).astype('float32')
# Create Lasagne layers
self._register_layers(batch_size, n_nodes, n_features, n_classes)
# Create symbolic representations of predictions, loss, parameters, and updates.
prediction = layers.get_output(self._get_output_layer())
loss = lasagne.objectives.aggregate(loss_fn(prediction, self.var_Y), mode='mean')
params = lasagne.layers.get_all_params(self._get_output_layer())
updates = update_fn(loss, params, learning_rate=learning_rate)
# Create functions that apply the model to data and return loss
apply_loss = theano.function([self.var_Apow, self.var_X, self.var_Y],
loss, updates=updates)
# Train the model
print 'Training model...'
validation_losses = []
validation_loss_window = np.zeros(stop_window_size)
validation_loss_window[:] = float('+inf')
for epoch in range(n_epochs):
train_loss = 0.0
np.random.shuffle(train_indices)
for batch in range(n_batch):
start = batch * batch_size
end = min((batch + 1) * batch_size, train_indices.shape[0])
if start < end:
train_loss += apply_loss(Apow[:,train_indices[start:end],:],
X,
Y[train_indices[start:end],:])
valid_loss = apply_loss(Apow[:,valid_indices,:],
X,
Y[valid_indices,:])
print "Epoch %d training error: %.6f" % (epoch, train_loss)
print "Epoch %d validation error: %.6f" % (epoch, valid_loss)
validation_losses.append(valid_loss)
if output_weights:
W = layers.get_all_param_values(self.l_sc)[0]
np.savetxt('W_%d.csv' % (epoch,), W, delimiter=',')
if show_weights:
W = layers.get_all_param_values(self.l_sc)[0]
plt.imshow(W, aspect='auto', interpolation='none')
plt.show()
if stop_early:
if valid_loss >= validation_loss_window.mean():
print 'Validation loss did not decrease. Stopping early.'
break
validation_loss_window[epoch % stop_window_size] = valid_loss
def predict(self, X, test_indices, A=None):
if A is None:
Apow = self.Apow
else:
if self.transform_fn is not None:
A = self.transform_fn(A)
# Compute the matrix power series
Apow = util.A_power_series(A, self.n_hops)
# add bias term to X
X = np.hstack([X, np.ones((X.shape[0],1))]).astype('float32')
# Create symbolic representation of predictions
pred = layers.get_output(self.l_out)
# Create a function that applies the model to data to predict a class
pred_fn = theano.function([self.var_Apow, self.var_X], T.argmax(pred, axis=1), allow_input_downcast=True)
# Return the predictions
predictions = pred_fn(Apow[:,test_indices,:], X)
return predictions
def predict_proba(self, X, test_indices, A=None):
if A is None:
Apow = self.Apow
else:
if self.transform_fn is not None:
A = self.transform_fn(A)
# Compute the matrix power series
Apow = util.A_power_series(A, self.n_hops)
# add bias term to X
X = np.hstack([X, np.ones((X.shape[0],1))]).astype('float32')
# Create symbolic representation of predictions
pred = layers.get_output(self.l_out)
# Create a function that applies the model to data to predict a class
pred_fn = theano.function([self.var_Apow, self.var_X], T.exp(pred) / T.exp(pred).sum(axis=1,keepdims=True), allow_input_downcast=True)
# Return the predictions
predictions = pred_fn(Apow[:,test_indices,:], X)
return predictions
class DeepSCNN(SCNN):
def __init__(self, n_hops=2, n_layers=4, transform_fn=util.rw_laplacian):
self.n_hops = n_hops
self.n_layers = n_layers
self.transform_fn = transform_fn
# Initialize Theano variables
self.var_A = T.matrix('A')
self.var_Apow = T.tensor3('Apow')
self.var_X = T.matrix('X')
self.var_Y = T.imatrix('Y')
def _register_layers(self, batch_size, n_nodes, n_features, n_classes):
self.l_in_apow = lasagne.layers.InputLayer((self.n_hops + 1, batch_size, n_nodes), input_var=self.var_Apow)
self.l_in_x = lasagne.layers.InputLayer((n_nodes, n_features), input_var=self.var_X)
self.l_sc = SearchConvolution([self.l_in_apow, self.l_in_x], self.n_hops + 1, n_features)
self.l_deep = self.l_sc
for i in range(self.n_layers):
self.l_deep = DeepSearchConvolution(self.l_deep, n_hops=self.n_hops + 1, n_features=n_features)
self.l_out = layers.DenseLayer(self.l_deep, num_units=n_classes, nonlinearity=lasagne.nonlinearities.tanh)
class DeepFeedForwardSCNN(SCNN):
def __init__(self, n_hops=2, n_layers=4, transform_fn=util.rw_laplacian):
self.n_hops = n_hops
self.n_layers = n_layers
self.transform_fn = transform_fn
# Initialize Theano variables
self.var_A = T.matrix('A')
self.var_Apow = T.tensor3('Apow')
self.var_X = T.matrix('X')
self.var_Y = T.imatrix('Y')
def _register_layers(self, batch_size, n_nodes, n_features, n_classes):
self.l_in_apow = lasagne.layers.InputLayer((self.n_hops + 1, batch_size, n_nodes), input_var=self.var_Apow)
self.l_in_x = lasagne.layers.InputLayer((n_nodes, n_features), input_var=self.var_X)
self.l_sc = SearchConvolution([self.l_in_apow, self.l_in_x], self.n_hops + 1, n_features)
self.l_deep = self.l_sc
for i in range(self.n_layers):
self.l_deep = layers.DenseLayer(self.l_deep, num_units=n_classes, nonlinearity=lasagne.nonlinearities.rectify)
self.l_out = layers.DenseLayer(self.l_deep, num_units=n_classes, nonlinearity=lasagne.nonlinearities.tanh)
| mit |
AtheonAnalytics/trext | trext/tests/test_conn.py | 1 | 1460 | from unittest import TestCase
import mock
from trext.db.conn import AnyDB
class TestConnectionDefault(TestCase):
def setUp(self):
self.conn_string = "DSN=some_db_connection_string"
self.adb = AnyDB(self.conn_string)
@mock.patch('trext.db.conn.pyodbc')
def test_is_connected_to_conn_string(self, mock_pyodbc):
self.adb.get_cursor()
mock_pyodbc.connect.assert_called_with(self.conn_string)
@mock.patch('trext.db.conn.pyodbc')
def test_close_connection(self, mock_pyodbc):
self.adb.get_cursor()
self.adb.close()
mock_pyodbc.connect().close.assert_called()
class TestConnectionExasol(TestCase):
def setUp(self):
self.conn_string = "DSN=some_db_connection_string"
self.adb = AnyDB(self.conn_string, dbtype='exasol')
def test_is_connected_to_conn_string(self):
connector = mock.MagicMock()
with mock.patch.dict(
'sys.modules',
{'exasol': connector}
):
self.adb.get_cursor()
connector.connect.assert_called()
connector.connect.assert_called_with(self.conn_string)
def test_close_connection(self):
connector = mock.MagicMock()
with mock.patch.dict(
'sys.modules',
{'exasol': connector}
):
self.adb.get_cursor()
self.adb.close()
connector.connect().close.assert_called()
| mit |
caneruguz/osf.io | api/preprint_providers/views.py | 1 | 13586 | from rest_framework import generics
from rest_framework import permissions as drf_permissions
from modularodm import Q as MQ
from django.db.models import Q
from framework.auth.oauth_scopes import CoreScopes
from osf.models import AbstractNode, Subject, PreprintService, PreprintProvider
from api.base import permissions as base_permissions
from api.base.filters import PreprintFilterMixin, ODMFilterMixin
from api.base.views import JSONAPIBaseView
from api.base.pagination import MaxSizePagination
from api.base.utils import get_object_or_error, get_user_auth
from api.licenses.views import LicenseList
from api.taxonomies.serializers import TaxonomySerializer
from api.preprint_providers.serializers import PreprintProviderSerializer
from api.preprints.serializers import PreprintSerializer
from api.preprints.permissions import PreprintPublishedOrAdmin
class PreprintProviderList(JSONAPIBaseView, generics.ListAPIView, ODMFilterMixin):
"""
Paginated list of verified PreprintProviders available. *Read-only*
Assume undocumented fields are unstable.
##PreprintProvider Attributes
OSF Preprint Providers have the "preprint_providers" `type`.
name type description
=============================================================================================================
name string name of the preprint provider
logo_path string a path to the preprint provider's static logo
banner_path string a path to the preprint provider's banner
description string description of the preprint provider
advisory_board string HTML for the advisory board/steering committee section
email_contact string the contact email for the preprint provider
email_support string the support email for the preprint provider
social_facebook string the preprint provider's Facebook account
social_instagram string the preprint provider's Instagram account
social_twitter string the preprint provider's Twitter account
domain string the domain name of the preprint provider
domain_redirect_enabled boolean whether or not redirects are enabled for the provider's domain
example string an example guid for a preprint created for the preprint provider
##Relationships
###Preprints
Link to the list of preprints from this given preprint provider.
##Links
self: the canonical api endpoint of this preprint provider
preprints: link to the provider's preprints
external_url: link to the preprint provider's external URL (e.g. https://socarxiv.org)
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.ALWAYS_PUBLIC]
required_write_scopes = [CoreScopes.NULL]
model_class = PreprintProvider
pagination_class = MaxSizePagination
serializer_class = PreprintProviderSerializer
view_category = 'preprint_providers'
view_name = 'preprint_providers-list'
ordering = ('name', )
# implement ODMFilterMixin
def get_default_odm_query(self):
return None
# overrides ListAPIView
def get_queryset(self):
return PreprintProvider.find(self.get_query_from_request())
class PreprintProviderDetail(JSONAPIBaseView, generics.RetrieveAPIView):
""" Details about a given preprint provider. *Read-only*
Assume undocumented fields are unstable.
##PreprintProvider Attributes
OSF Preprint Providers have the "preprint_providers" `type`.
name type description
=============================================================================================================
name string name of the preprint provider
logo_path string a path to the preprint provider's static logo
banner_path string a path to the preprint provider's banner
description string description of the preprint provider
advisory_board string HTML for the advisory board/steering committee section
email_contact string the contact email for the preprint provider
email_support string the support email for the preprint provider
social_facebook string the preprint provider's Facebook account
social_instagram string the preprint provider's Instagram account
social_twitter string the preprint provider's Twitter account
domain string the domain name of the preprint provider
domain_redirect_enabled boolean whether or not redirects are enabled for the provider's domain
example string an example guid for a preprint created for the preprint provider
##Relationships
###Preprints
Link to the list of preprints from this given preprint provider.
##Links
self: the canonical api endpoint of this preprint provider
preprints: link to the provider's preprints
external_url: link to the preprint provider's external URL (e.g. https://socarxiv.org)
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.ALWAYS_PUBLIC]
required_write_scopes = [CoreScopes.NULL]
model_class = PreprintProvider
serializer_class = PreprintProviderSerializer
view_category = 'preprint_providers'
view_name = 'preprint_provider-detail'
def get_object(self):
return get_object_or_error(PreprintProvider, self.kwargs['provider_id'], display_name='PreprintProvider')
class PreprintProviderPreprintList(JSONAPIBaseView, generics.ListAPIView, PreprintFilterMixin):
"""Preprints from a given preprint_provider. *Read Only*
To update preprints with a given preprint_provider, see the `<node_id>/relationships/preprint_provider` endpoint
##Preprint Attributes
OSF Preprint entities have the "preprints" `type`.
name type description
====================================================================================
date_created iso8601 timestamp timestamp that the preprint was created
date_modified iso8601 timestamp timestamp that the preprint was last modified
date_published iso8601 timestamp timestamp when the preprint was published
is_published boolean whether or not this preprint is published
is_preprint_orphan boolean whether or not this preprint is orphaned
subjects array of tuples of dictionaries ids of Subject in the BePress taxonomy. Dictionary, containing the subject text and subject ID
doi string bare DOI for the manuscript, as entered by the user
##Relationships
###Node
The node that this preprint was created for
###Primary File
The file that is designated as the preprint's primary file, or the manuscript of the preprint.
###Provider
Link to preprint_provider detail for this preprint
##Links
- `self` -- Preprint detail page for the current preprint
- `html` -- Project on the OSF corresponding to the current preprint
- `doi` -- URL representation of the DOI entered by the user for the preprint manuscript
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PreprintPublishedOrAdmin,
)
ordering = ('-date_created')
serializer_class = PreprintSerializer
model_class = AbstractNode
required_read_scopes = [CoreScopes.NODE_PREPRINTS_READ]
required_write_scopes = [CoreScopes.NULL]
view_category = 'preprint_providers'
view_name = 'preprints-list'
# overrides DjangoFilterMixin
def get_default_django_query(self):
auth = get_user_auth(self.request)
auth_user = getattr(auth, 'user', None)
provider = get_object_or_error(PreprintProvider, self.kwargs['provider_id'], display_name='PreprintProvider')
# Permissions on the list objects are handled by the query
default_query = Q(node__isnull=False, node__is_deleted=False, provider___id=provider._id)
no_user_query = Q(is_published=True, node__is_public=True)
if auth_user:
contrib_user_query = Q(is_published=True, node__contributor__user_id=auth_user.id, node__contributor__read=True)
admin_user_query = Q(node__contributor__user_id=auth_user.id, node__contributor__admin=True)
return (default_query & (no_user_query | contrib_user_query | admin_user_query))
return (default_query & no_user_query)
# overrides ListAPIView
def get_queryset(self):
return PreprintService.objects.filter(self.get_query_from_request()).distinct()
class PreprintProviderTaxonomies(JSONAPIBaseView, generics.ListAPIView):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
view_category = 'preprint_providers'
view_name = 'taxonomy-list'
required_read_scopes = [CoreScopes.ALWAYS_PUBLIC]
required_write_scopes = [CoreScopes.NULL]
serializer_class = TaxonomySerializer
ordering = ('-id',)
def is_valid_subject(self, allows_children, allowed_parents, sub):
# TODO: Delet this when all PreprintProviders have a mapping
if sub._id in allowed_parents:
return True
if sub.parent:
if sub.parent._id in allows_children:
return True
if sub.parent.parent:
if sub.parent.parent._id in allows_children:
return True
return False
def get_queryset(self):
parent = self.request.query_params.get('filter[parents]', None) or self.request.query_params.get('filter[parent]', None)
provider = get_object_or_error(PreprintProvider, self.kwargs['provider_id'], display_name='PreprintProvider')
if parent:
if parent == 'null':
return provider.top_level_subjects
if provider.subjects.exists():
return provider.subjects.filter(parent___id=parent)
else:
# TODO: Delet this when all PreprintProviders have a mapping
# Calculate this here to only have to do it once.
allowed_parents = [id_ for sublist in provider.subjects_acceptable for id_ in sublist[0]]
allows_children = [subs[0][-1] for subs in provider.subjects_acceptable if subs[1]]
return [sub for sub in Subject.find(MQ('parent___id', 'eq', parent)) if provider.subjects_acceptable == [] or self.is_valid_subject(allows_children=allows_children, allowed_parents=allowed_parents, sub=sub)]
return provider.all_subjects
class PreprintProviderHighlightedSubjectList(JSONAPIBaseView, generics.ListAPIView):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
view_category = 'preprint_providers'
view_name = 'highlighted-taxonomy-list'
required_read_scopes = [CoreScopes.ALWAYS_PUBLIC]
required_write_scopes = [CoreScopes.NULL]
serializer_class = TaxonomySerializer
def get_queryset(self):
provider = get_object_or_error(PreprintProvider, self.kwargs['provider_id'], display_name='PreprintProvider')
return Subject.objects.filter(id__in=[s.id for s in provider.highlighted_subjects]).order_by('text')
class PreprintProviderLicenseList(LicenseList):
ordering = () # TODO: should be ordered once the frontend for selecting default licenses no longer relies on order
view_category = 'preprint_providers'
def get_queryset(self):
provider = get_object_or_error(PreprintProvider, self.kwargs['provider_id'], display_name='PreprintProvider')
if not provider.licenses_acceptable.count():
if not provider.default_license:
return super(PreprintProviderLicenseList, self).get_queryset()
return [provider.default_license] + [license for license in super(PreprintProviderLicenseList, self).get_queryset() if license != provider.default_license]
if not provider.default_license:
return provider.licenses_acceptable.get_queryset()
return [provider.default_license] + [license for license in provider.licenses_acceptable.all() if license != provider.default_license]
| apache-2.0 |
walkinreeds/twisted-intro | tests/test_poetry.py | 11 | 2509 | from twisted.internet.defer import Deferred
from twisted.internet.error import ConnectError
from twisted.internet.protocol import ClientFactory, ServerFactory, Protocol
from twisted.trial.unittest import TestCase
# Normally we would import the classes we want to test.
# But to make the examples self-contained, we're just
# copying them here, with a few modifications.
class PoetryServerProtocol(Protocol):
def connectionMade(self):
self.transport.write(self.factory.poem)
self.transport.loseConnection()
class PoetryServerFactory(ServerFactory):
protocol = PoetryServerProtocol
def __init__(self, poem):
self.poem = poem
class PoetryClientProtocol(Protocol):
poem = ''
def dataReceived(self, data):
self.poem += data
def connectionLost(self, reason):
self.poemReceived(self.poem)
def poemReceived(self, poem):
self.factory.poem_finished(poem)
class PoetryClientFactory(ClientFactory):
protocol = PoetryClientProtocol
def __init__(self):
self.deferred = Deferred()
def poem_finished(self, poem):
if self.deferred is not None:
d, self.deferred = self.deferred, None
d.callback(poem)
def clientConnectionFailed(self, connector, reason):
if self.deferred is not None:
d, self.deferred = self.deferred, None
d.errback(reason)
def get_poetry(host, port):
from twisted.internet import reactor
factory = PoetryClientFactory()
reactor.connectTCP(host, port, factory)
return factory.deferred
TEST_POEM = '''\
This is a test.
This is only a test.'''
class PoetryTestCase(TestCase):
def setUp(self):
factory = PoetryServerFactory(TEST_POEM)
from twisted.internet import reactor
self.port = reactor.listenTCP(0, factory, interface="127.0.0.1")
self.portnum = self.port.getHost().port
def tearDown(self):
port, self.port = self.port, None
return port.stopListening()
def test_client(self):
"""The correct poem is returned by get_poetry."""
d = get_poetry('127.0.0.1', self.portnum)
def got_poem(poem):
self.assertEquals(poem, TEST_POEM)
d.addCallback(got_poem)
return d
def test_failure(self):
"""The correct failure is returned by get_poetry when
connecting to a port with no server."""
d = get_poetry('127.0.0.1', 0)
return self.assertFailure(d, ConnectError)
| mit |
SeanTater/albemarle | sketches/15-keras_seq2seq_mod.py | 1 | 26558 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
A Reccurent Neural Network (LSTM) implementation example using TensorFlow library.
Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
import numpy as np
import lzma
import re
import sqlite3
import keras
from keras.models import Sequential
from keras.layers import Layer
from keras.layers.core import RepeatVector, Dense, Dropout, Activation
from keras.layers.wrappers import TimeDistributed
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers.recurrent import time_distributed_dense
import seq2seq
#from seq2seq.models import Seq2seq
from seq2seq.layers.bidirectional import Bidirectional
from seq2seq.layers.encoders import LSTMEncoder
from seq2seq.layers.decoders import AttentionDecoder
# Network Parameters
# Patents have about 6300 chars/claim
__n_chop = 250
__block_size = 8
__n_hidden = 128
# This limits you to ASCII codes. You can use more if you want, it's just slower
n_input = n_output = 128
char_code_start = 0
# Get claims. Pay attention to the memory layout. The vectors are shared.
def get_book():
print ("Reading patent claims.")
with sqlite3.connect("patent-sample.db") as conn:
claims = conn.execute("SELECT claims FROM patent ORDER BY random();").fetchall()
book = b''.join([row[0].encode()[:__n_chop].ljust(__n_chop, b'\x00') for row in claims])
book = np.fromstring(book, dtype=np.uint8).reshape((-1, __n_chop))
# Only model ascii codes between [char_code_start, char_code_start+n_input]
book = np.minimum(book - char_code_start, n_input-1)
# Cut off some samples to be a multiple of block size
book = book[:(book.shape[0]//__block_size*__block_size), :]
lens = (book != 0).sum(axis=1, dtype=np.int32) # for seq2seq
print ("Read {} patents.".format(book.shape[0]))
return (book, lens)
pat = get_book()
#from __future__ import absolute_import
from keras import backend as K
from keras.layers.recurrent import SimpleRNN, GRU, LSTM
from keras import activations, initializations
from keras.engine import InputSpec
#import numpy as np
def get_state_transfer_rnn(RNN):
'''Converts a given Recurrent sub class (e.g, LSTM, GRU) to its state transferable version.
A state transfer RNN can transfer its hidden state to another one of the same type and compatible dimensions.
'''
class StateTransferRNN(RNN):
def __init__(self, state_input=True, **kwargs):
self.state_outputs = []
self.state_input = state_input
super(StateTransferRNN, self).__init__(**kwargs)
def reset_states(self):
stateful = self.stateful
self.stateful = stateful or self.state_input or len(self.state_outputs) > 0
if self.stateful:
super(StateTransferRNN, self).reset_states()
self.stateful = stateful
def build(self,input_shape):
stateful = self.stateful
self.stateful = stateful or self.state_input or len(self.state_outputs) > 0
super(StateTransferRNN, self).build(input_shape)
self.stateful = stateful
def broadcast_state(self, rnns):
rnns = (set if type(rnns) in [list, tuple] else lambda a: {a})(rnns)
rnns -= set(self.state_outputs)
self.state_outputs.extend(rnns)
for rnn in rnns:
rnn.state_input = self
rnn.updates = getattr(rnn, 'updates', [])
rnn.updates.extend(zip(rnn.states, self.states_to_transfer))
def call(self, x, mask=None):
last_output, outputs, states = K.rnn(
self.step,
self.preprocess_input(x),
self.states or self.get_initial_states(x),
go_backwards=self.go_backwards,
mask=mask,
constants=self.get_constants(x),
unroll=self.unroll,
input_length=self.input_spec[0].shape[1])
self.updates = zip(self.states, states)
self.states_to_transfer = states
return outputs if self.return_sequences else last_output
return StateTransferRNN
StateTransferSimpleRNN = get_state_transfer_rnn(SimpleRNN)
StateTransferGRU = get_state_transfer_rnn(GRU)
StateTransferLSTM = get_state_transfer_rnn(LSTM)
class LSTMEncoder(StateTransferLSTM):
def __init__(self, decoder=None, decoders=[], **kwargs):
super(LSTMEncoder, self).__init__(**kwargs)
if decoder:
decoders = [decoder]
self.broadcast_state(decoders)
class LSTMDecoder(StateTransferLSTM):
'''
A basic LSTM decoder. Similar to [1].
The output of at each timestep is the input to the next timestep.
The input to the first timestep is the context vector from the encoder.
Basic equation:
y(t) = LSTM(s(t-1), y(t-1)); Where s is the hidden state of the LSTM (h and c)
y(0) = LSTM(s0, C); C is the context vector from the encoder.
In addition, the hidden state of the encoder is usually used to initialize the hidden
state of the decoder. Checkout models.py to see how its done.
'''
input_ndim = 2
def __init__(self, output_length, hidden_dim=None, **kwargs):
self.output_length = output_length
self.hidden_dim = hidden_dim
input_dim = None
if 'input_dim' in kwargs:
kwargs['output_dim'] = input_dim
if 'input_shape' in kwargs:
kwargs['output_dim'] = kwargs['input_shape'][-1]
elif 'batch_input_shape' in kwargs:
kwargs['output_dim'] = kwargs['batch_input_shape'][-1]
elif 'output_dim' not in kwargs:
kwargs['output_dim'] = None
super(LSTMDecoder, self).__init__(**kwargs)
self.return_sequences = True
self.updates = []
self.consume_less = 'mem'
def build(self, input_shape):
input_shape = list(input_shape)
input_shape = input_shape[:1] + [self.output_length] + input_shape[1:]
if not self.hidden_dim:
self.hidden_dim = input_shape[-1]
output_dim = input_shape[-1]
self.output_dim = self.hidden_dim
initial_weights = self.initial_weights
self.initial_weights = None
super(LSTMDecoder, self).build(input_shape)
self.output_dim = output_dim
self.initial_weights = initial_weights
self.W_y = self.init((self.hidden_dim, self.output_dim), name='{}_W_y'.format(self.name))
self.b_y = K.zeros((self.output_dim), name='{}_b_y'.format(self.name))
self.trainable_weights += [self.W_y, self.b_y]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
input_shape.pop(1)
self.input_spec = [InputSpec(shape=tuple(input_shape))]
def get_constants(self, x):
output_dim = self.output_dim
self.output_dim = self.hidden_dim
consts = super(LSTMDecoder, self).get_constants(x)
self.output_dim = output_dim
return consts
def reset_states(self):
output_dim = self.output_dim
self.output_dim = self.hidden_dim
super(LSTMDecoder, self).reset_states()
self.output_dim = output_dim
def get_initial_states(self, x):
output_dim = self.output_dim
self.output_dim = self.hidden_dim
initial_states = super(LSTMDecoder, self).get_initial_states(x)
self.output_dim = output_dim
return initial_states
def step(self, x, states):
assert len(states) == 5, len(states)
states = list(states)
y_tm1 = states.pop(2)
output_dim = self.output_dim
self.output_dim = self.hidden_dim
h_t, new_states = super(LSTMDecoder, self).step(y_tm1, states)
self.output_dim = output_dim
y_t = self.activation(K.dot(h_t, self.W_y) + self.b_y)
new_states += [y_t]
return y_t, new_states
def call(self, x, mask=None):
X = K.repeat(x, self.output_length)
input_shape = list(self.input_spec[0].shape)
input_shape = input_shape[:1] + [self.output_length] + input_shape[1:]
self.input_spec = [InputSpec(shape=tuple(input_shape))]
if self.stateful or self.state_input or len(self.state_outputs) > 0:
initial_states = self.states[:]
else:
initial_states = self.get_initial_states(X)
constants = self.get_constants(X)
y_0 = K.permute_dimensions(X, (1, 0, 2))[0, :, :]
initial_states += [y_0]
last_output, outputs, states = K.rnn(self.step, X,
initial_states,
go_backwards=self.go_backwards,
mask=mask,
constants=constants,
unroll=self.unroll,
input_length=self.output_length)
if self.stateful and not self.state_input:
self.updates = []
for i in range(2):
self.updates.append((self.states[i], states[i]))
self.states_to_transfer = states
input_shape.pop(1)
self.input_spec = [InputSpec(shape=input_shape)]
return outputs
def assert_input_compatibility(self, x):
shape = x._keras_shape
assert K.ndim(x) == 2, "LSTMDecoder requires 2D input, not " + str(K.ndim(x)) + "D."
assert shape[-1] == self.output_dim or not self.output_dim, "output_dim of LSTMDecoder should be same as the last dimension in the input shape. output_dim = "+ str(self.output_dim) + ", got tensor with shape : " + str(shape) + "."
def get_output_shape_for(self, input_shape):
input_shape = list(input_shape)
output_shape = input_shape[:1] + [self.output_length] + input_shape[1:]
return tuple(output_shape)
def get_config(self):
config = {'name': self.__class__.__name__,
'hidden_dim': self.hidden_dim,
'output_length': self.output_length}
base_config = super(LSTMDecoder, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AttentionSeq2seq(Sequential):
'''
This is an attention Seq2seq model based on [3].
Here, there is a soft allignment between the input and output sequence elements.
A bidirection encoder is used by default. There is no hidden state transfer in this
model.
The math:
Encoder:
X = Input Sequence of length m.
H = Bidirection_LSTM(X); Note that here the LSTM has return_sequences = True,
so H is a sequence of vectors of length m.
Decoder:
y(i) = LSTM(s(i-1), y(i-1), v(i)); Where s is the hidden state of the LSTM (h and c)
and v (called the context vector) is a weighted sum over H:
v(i) = sigma(j = 0 to m-1) alpha(i, j) * H(j)
The weight alpha[i, j] for each hj is computed as follows:
energy = a(s(i-1), H(j))
alhpa = softmax(energy)
Where a is a feed forward network.
'''
def __init__(self, output_dim, hidden_dim, output_length, depth=1,bidirectional=True, dropout=0.1, **kwargs):
if bidirectional and hidden_dim % 2 != 0:
raise Exception ("hidden_dim for AttentionSeq2seq should be even (Because of bidirectional RNN).")
super(AttentionSeq2seq, self).__init__()
if type(depth) not in [list, tuple]:
depth = (depth, depth)
if 'batch_input_shape' in kwargs:
shape = kwargs['batch_input_shape']
del kwargs['batch_input_shape']
elif 'input_shape' in kwargs:
shape = (None,) + tuple(kwargs['input_shape'])
del kwargs['input_shape']
elif 'input_dim' in kwargs:
if 'input_length' in kwargs:
input_length = kwargs['input_length']
else:
input_length = None
shape = (None, input_length, kwargs['input_dim'])
del kwargs['input_dim']
self.add(Layer(batch_input_shape=shape))
if bidirectional:
self.add(Bidirectional(LSTMEncoder(output_dim=int(hidden_dim / 2), state_input=False, return_sequences=True, **kwargs)))
else:
self.add(LSTMEncoder(output_dim=hidden_dim, state_input=False, return_sequences=True, **kwargs))
for i in range(0, depth[0] - 1):
self.add(Dropout(dropout))
if bidirectional:
self.add(Bidirectional(LSTMEncoder(output_dim=int(hidden_dim / 2), state_input=False, return_sequences=True, **kwargs)))
else:
self.add(LSTMEncoder(output_dim=hidden_dim, state_input=False, return_sequences=True, **kwargs))
encoder = self.layers[-1]
self.add(Dropout(dropout))
self.add(TimeDistributed(Dense(hidden_dim if depth[1] > 1 else output_dim)))
decoder = AttentionDecoder(hidden_dim=hidden_dim, output_length=output_length, state_input=False, **kwargs)
self.add(Dropout(dropout))
self.add(decoder)
for i in range(0, depth[1] - 1):
self.add(Dropout(dropout))
self.add(LSTMEncoder(output_dim=hidden_dim, state_input=False, return_sequences=True, **kwargs))
self.add(Dropout(dropout))
self.add(TimeDistributed(Dense(output_dim, activation='softmax')))
self.encoder = encoder
self.decoder = decoder
class LSTMDecoder2(LSTMDecoder):
'''
This decoder is similar to the first one, except that at every timestep the decoder gets
a peek at the context vector.
Similar to [2].
Basic equation:
y(t) = LSTM(s(t-1), y(t-1), C)
y(0) = LSTM(s0, C, C)
Where s is the hidden state of the LSTM (h and c), and C is the context vector
from the encoder.
'''
def build(self, input_shape):
initial_weights = self.initial_weights
self.initial_weights = None
super(LSTMDecoder2, self).build(input_shape)
self.initial_weights = initial_weights
dim = self.input_spec[0].shape[-1]
self.W_x = self.init((dim, dim), name='{}_W_x'.format(self.name))
self.b_x = K.zeros((dim,), name='{}_b_x'.format(self.name))
self.trainable_weights += [self.W_x, self.b_x]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def step(self, x, states):
assert len(states) == 5, len(states)
states = list(states)
y_tm1 = states.pop(2)
v = self.activation(K.dot(x, self.W_x) + self.b_x)
y_tm1 += v
output_dim = self.output_dim
self.output_dim = self.hidden_dim
h_t, new_states = super(LSTMDecoder, self).step(y_tm1, states)
self.output_dim = output_dim
y_t = self.activation(K.dot(h_t, self.W_y) + self.b_y)
new_states += [y_t]
return y_t, new_states
def get_config(self):
config = {'name': self.__class__.__name__}
base_config = super(LSTMDecoder2, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AttentionDecoder(LSTMDecoder2):
'''
This is an attention decoder based on [3].
Unlike the other decoders, AttentionDecoder requires the encoder to return
a sequence of hidden states, instead of just the final context vector.
Or in Keras language, while using AttentionDecoder, the encoder should have
return_sequences = True.
Also, the encoder should be a bidirectional RNN for best results.
Working:
A sequence of vectors X = {x0, x1, x2,....xm-1}, where m = input_length is input
to the encoder.
The encoder outputs a hidden state at each timestep H = {h0, h1, h2,....hm-1}
The decoder uses H to generate a sequence of vectors Y = {y0, y1, y2,....yn-1},
where n = output_length
Decoder equations:
Note: hk means H(k).
y(i) = LSTM(s(i-1), y(i-1), v(i)); Where s is the hidden state of the LSTM (h and c)
and v (called the context vector) is a weighted sum over H:
v(i) = sigma(j = 0 to m-1) alpha[i, j] * hj
The weight alpha(i, j) for each hj is computed as follows:
energy = a(s(i-1), hj)
alhpa = softmax(energy)
Where a is a feed forward network.
'''
input_ndim = 3
def build(self, input_shape):
self.input_length = input_shape[1]
if not self.input_length:
raise Exception ('AttentionDecoder requires input_length.')
initial_weights = self.initial_weights
self.initial_weights = None
super(AttentionDecoder, self).build(input_shape[:1] + input_shape[2:])
self.initial_weights = initial_weights
dim = self.input_dim
hdim = self.hidden_dim
self.W_h = self.init((hdim, dim), name='{}_W_h'.format(self.name))
self.b_h = K.zeros((dim, ), name='{}_b_h'.format(self.name))
self.W_a = self.init((dim, 1), name='{}_W_a'.format(self.name))
self.b_a = K.zeros((1,), name='{}_b_a'.format(self.name))
self.trainable_weights += [self.W_a, self.b_a, self.W_h, self.b_h]
if self.initial_weights is not None:
self.set_weights(self.inital_weights)
del self.initial_weights
def step(self, x, states):
h_tm1, c_tm1, y_tm1, B, U, H = states
s = K.dot(c_tm1, self.W_h) + self.b_h
s = K.repeat(s, self.input_length)
energy = time_distributed_dense(s + H, self.W_a, self.b_a)
energy = K.squeeze(energy, 2)
alpha = K.softmax(energy)
alpha = K.repeat(alpha, self.input_dim)
alpha = K.permute_dimensions(alpha, (0, 2, 1))
weighted_H = H * alpha
v = K.sum(weighted_H, axis=1)
y, new_states = super(AttentionDecoder, self).step(v, states[:-1])
return y, new_states
def call(self, x, mask=None):
print("AttentionDecoder.call")
H = x
x = K.permute_dimensions(H, (1, 0, 2))[-1, :, :]
if self.stateful or self.state_input or len(self.state_outputs) > 0:
initial_states = self.states[:]
else:
initial_states = self.get_initial_states(H)
constants = self.get_constants(H) + [H]
y_0 = x
x = K.repeat(x, self.output_length)
initial_states += [y_0]
last_output, outputs, states = K.rnn(
self.step,
x,
initial_states,
go_backwards=self.go_backwards,
mask=mask,
constants=constants,
unroll=self.unroll,
input_length=self.output_length)
if self.stateful and not self.state_input:
self.updates = zip(self.states, states)
self.states_to_transfer = states
return outputs
def assert_input_compatibility(self, x):
shape = x._keras_shape
assert K.ndim(x) == 3, "AttentionDecoder requires 3D input, not " + str(K.ndim(x)) + "D."
assert shape[-1] == self.output_dim or not self.output_dim, "output_dim of AttentionDecoder should be same as the last dimension in the input shape. output_dim = "+ str(self.output_dim) + ", got tensor with shape : " + str(shape) + "."
def get_output_shape_for(self, input_shape):
input_shape = list(input_shape)
output_shape = input_shape[:1] + [self.output_length] + input_shape[2:]
return tuple(output_shape)
def get_config(self):
config = {'name': self.__class__.__name__}
base_config = super(AttentionDecoder, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Seq2seq(Sequential):
'''
Seq2seq model based on [1] and [2].
This model has the ability to transfer the encoder hidden state to the decoder's
hidden state(specified by the broadcast_state argument). Also, in deep models
(depth > 1), the hidden state is propogated throughout the LSTM stack(specified by
the inner_broadcast_state argument. You can switch between [1] based model and [2]
based model using the peek argument.(peek = True for [2], peek = False for [1]).
When peek = True, the decoder gets a 'peek' at the context vector at every timestep.
[1] based model:
Encoder:
X = Input sequence
C = LSTM(X); The context vector
Decoder:
y(t) = LSTM(s(t-1), y(t-1)); Where s is the hidden state of the LSTM (h and c)
y(0) = LSTM(s0, C); C is the context vector from the encoder.
[2] based model:
Encoder:
X = Input sequence
C = LSTM(X); The context vector
Decoder:
y(t) = LSTM(s(t-1), y(t-1), C)
y(0) = LSTM(s0, C, C)
Where s is the hidden state of the LSTM (h and c), and C is the context vector
from the encoder.
Arguments:
output_dim : Required output dimension.
hidden_dim : The dimension of the internal representations of the model.
output_length : Length of the required output sequence.
depth : Used to create a deep Seq2seq model. For example, if depth = 3,
there will be 3 LSTMs on the enoding side and 3 LSTMs on the
decoding side. You can also specify depth as a tuple. For example,
if depth = (4, 5), 4 LSTMs will be added to the encoding side and
5 LSTMs will be added to the decoding side.
broadcast_state : Specifies whether the hidden state from encoder should be
transfered to the deocder.
inner_broadcast_state : Specifies whether hidden states should be propogated
throughout the LSTM stack in deep models.
peek : Specifies if the decoder should be able to peek at the context vector
at every timestep.
dropout : Dropout probability in between layers.
'''
def __init__(self, output_dim, hidden_dim, output_length, depth=1, broadcast_state=True, inner_broadcast_state=True, peek=False, dropout=0.1, **kwargs):
super(Seq2seq, self).__init__()
if type(depth) not in [list, tuple]:
depth = (depth, depth)
if 'batch_input_shape' in kwargs:
shape = kwargs['batch_input_shape']
del kwargs['batch_input_shape']
elif 'input_shape' in kwargs:
shape = (None,) + tuple(kwargs['input_shape'])
del kwargs['input_shape']
elif 'input_dim' in kwargs:
shape = (None, None, kwargs['input_dim'])
del kwargs['input_dim']
lstms = []
layer = LSTMEncoder(batch_input_shape=shape, output_dim=hidden_dim, state_input=False, return_sequences=depth[0] > 1, **kwargs)
self.add(layer)
lstms += [layer]
for i in range(depth[0] - 1):
self.add(Dropout(dropout))
layer = LSTMEncoder(output_dim=hidden_dim, state_input=inner_broadcast_state, return_sequences=i < depth[0] - 2, **kwargs)
self.add(layer)
lstms += [layer]
if inner_broadcast_state:
for i in range(len(lstms) - 1):
lstms[i].broadcast_state(lstms[i + 1])
encoder = self.layers[-1]
self.add(Dropout(dropout))
decoder_type = LSTMDecoder2 if peek else LSTMDecoder
decoder = decoder_type(hidden_dim=hidden_dim, output_length=output_length, state_input=broadcast_state, **kwargs)
self.add(decoder)
lstms = [decoder]
for i in range(depth[1] - 1):
self.add(Dropout(dropout))
layer = LSTMEncoder(output_dim=hidden_dim, state_input=inner_broadcast_state, return_sequences=True, **kwargs)
self.add(layer)
lstms += [layer]
if inner_broadcast_state:
for i in range(len(lstms) - 1):
lstms[i].broadcast_state(lstms[i + 1])
if broadcast_state:
encoder.broadcast_state(decoder)
self.add(Dropout(dropout))
self.add(TimeDistributed(Dense(output_dim, **kwargs)))
self.encoder = encoder
self.decoder = decoder
class S2s(Sequential):
def __init__(self, output_dim, hidden_dim, output_length, depth=1, broadcast_state=True, inner_broadcast_state=True, peek=False, dropout=0.1, **kwargs):
super(S2s, self).__init__()
depth = (depth, depth)
shape = kwargs['batch_input_shape']
del kwargs['batch_input_shape']
self.add(LSTMEncoder(batch_input_shape=shape, output_dim=hidden_dim, state_input=False, return_sequences=True, **kwargs))
self.add(AttentionDecoder(hidden_dim=hidden_dim, output_length=output_length, state_input=broadcast_state, **kwargs))
self.layers[0].broadcast_state(self.layers[1])
[self.encoder,self.decoder] = self.layers
class GiveExamples(keras.callbacks.Callback):
def on_epoch_end(self, batch, logs={}):
snippet = lambda s: s.tostring()[:50].decode(errors='ignore').replace('\n', '<NL>')
sample = pat[:3, :]
chars = model.predict(
np.eye(128, dtype=np.float32)[sample],
batch_size=bs
).argmax(axis=2).astype(np.uint8)
print('\n' +
'\n'.join(
[ snippet(st) for st in chars]
+ [snippet(st) for st in sample]))
model = Seq2seq(
batch_input_shape=(__block_size, __n_chop, n_input),
hidden_dim=__n_hidden, # can be anything
#input_length=__n_chop,
output_length=__n_chop,
output_dim=n_output,
activation='softmax',
#depth=1,
#consume_less="cpu"
)
#model.add(keras.layers.core.Activation('softmax'))
#model = keras.models.Sequential([
# keras.layers.Lambda(
# lambda x: keras.backend.one_hot(keras.backend.cast(x, 'int16'), n_input),
# batch_input_shape=(__block_size, __n_chop,),
# output_shape=(__n_chop, n_input)
# ),
# model
#])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
early_stopping = EarlyStopping(monitor='val_loss', patience=2)
checkpoint = ModelCheckpoint("15-weights", save_best_only=True)
eye = np.eye(128)
give_examples = GiveExamples()
model.fit(
eye[pat[0]],
np.expand_dims(pat[0], 2),
batch_size=__block_size,
callbacks=[early_stopping, checkpoint, give_examples],
shuffle=True,
validation_split=0.1,
)
model.save("15-weights")
| bsd-3-clause |
gdietz/OpenMEE | common_wizard_pages/methods_and_parameters_page.py | 1 | 18344 | ##################
# #
# George Dietz #
# Byron Wallace #
# CEBM@Brown #################
# #
# This is basically a modified #
# version of the same thing from #
# OpenMetaAnalyst #
##################################
from PyQt4 import QtCore, QtGui
from PyQt4.Qt import *
from ome_globals import *
import python_to_R
import ui_methods_and_parameters_page
class MethodsAndParametersPage(QWizardPage, ui_methods_and_parameters_page.Ui_WizardPage):
def __init__(self, model, meta_f_str=None, external_params=None, disable_forest_plot_tab=False, funnel_mode=False, parent=None):
super(MethodsAndParametersPage, self).__init__(parent)
self.setupUi(self)
self.external_params = external_params
self.model = model
self.meta_f_str = meta_f_str
self.funnel_mode = funnel_mode
# previous values not restored currently
self.default_method = self.model.get_method_selection()
self.default_param_vals = self.model.get_ma_param_vals()
if disable_forest_plot_tab:
self.specs_tab.setTabEnabled(1, False)
def initializePage(self):
self.current_param_vals = self.external_params or {}
self.data_type, self.metric = self.wizard().get_data_type_and_metric()
self.data_location = self.wizard().get_data_location()
QObject.connect(self.save_btn, SIGNAL("pressed()"), self.select_out_path)
QObject.connect(self.method_cbo_box, SIGNAL("currentIndexChanged(QString)"), self.method_changed)
if self.meta_f_str is not None:
# we pre-prend the data type to the meta-method function
# name. thus the caller (meta_form) needn't worry about
# the data type, only about the method name (e.g., cumulative)
if not self.meta_f_str.endswith(OMA_CONVENTION[self.data_type]):
self.meta_f_str = ".".join((self.meta_f_str, OMA_CONVENTION[self.data_type]))
if self.data_type != TWO_BY_TWO_CONTINGENCY_TABLE:
self.disable_bin_only_fields()
self.current_method = None
self.current_params = None
self.current_defaults = None
self.var_order = None
self.populate_cbo_box()
def get_included_studies_in_proper_order(self):
return self.wizard().get_included_studies_in_proper_order()
def get_modified_meta_f_str(self):
return self.meta_f_str
def select_out_path(self):
out_f = "."
out_f = unicode(QFileDialog.getSaveFileName(self, "OpenMeta[analyst] - Plot Path",
out_f, "png image files: (.png)"))
if out_f == "" or out_f == None:
return None
else:
self.image_path.setText(out_f)
def disable_bin_only_fields(self):
self.col3_str_edit.setEnabled(False)
self.col4_str_edit.setEnabled(False)
self.show_3.setChecked(False)
self.show_3.setEnabled(False)
self.show_4.setChecked(False)
self.show_4.setEnabled(False)
def method_changed(self):
print("method_changed")
if self.parameter_grp_box.layout() is not None:
print("Layout items count before: %d" % self.parameter_grp_box.layout().count())
self.clear_param_ui()
self.current_method = self.available_method_d[str(self.method_cbo_box.currentText())]
self.setup_params()
self.parameter_grp_box.setTitle(self.current_method)
self.ui_for_params()
def clear_param_ui(self):
param_layout = self.parameter_grp_box.layout()
if param_layout is None:
return
for i in reversed(range(param_layout.count())):
param_layout.itemAt(i).widget().setParent(None)
def ui_for_params(self):
if self.parameter_grp_box.layout() is None:
layout = QGridLayout()
self.parameter_grp_box.setLayout(layout)
cur_grid_row = 0
# add the method description
method_description = python_to_R.get_method_description(self.current_method)
self.add_label(self.parameter_grp_box.layout(), cur_grid_row, \
"Description: %s" % method_description)
cur_grid_row += 1
if self.var_order is not None:
for var_name in self.var_order:
val = self.current_params[var_name]
self.add_param(self.parameter_grp_box.layout(), cur_grid_row, var_name, val)
cur_grid_row+=1
else:
# no ordering was provided; let's try and do something
# sane with respect to the order in which parameters
# are displayed.
#
# we want to add the parameters in groups, for example,
# we add combo boxes (which will be lists of values) together,
# followed by numerical inputs. thus we create an ordered list
# of functions to check if the argument is the corresponding
# type (float, list); if it is, we add it otherwise we pass. this isn't
# the most efficient way to do things, but the number of parameters
# is going to be relatively tiny anyway
ordered_types = [lambda x: isinstance(x, list),
lambda x: isinstance(x, str) and x.lower()=="float"]
for is_right_type in ordered_types:
for key, val in self.current_params.items():
if is_right_type(val):
self.add_param(self.parameter_grp_box.layout(), cur_grid_row, key, val)
cur_grid_row+=1
# do we need to set forest plot parameters? if not,
# e.g., in the case of HSROC or other methdos that
# don't use our forest plotting, we don't show the
# corresponding tab for forest plot params.
# @TODO this is hacky; plus, really we should keep
# a list of methods that *DO* take forest plot params
if self.current_method in METHODS_WITH_NO_FOREST_PLOT:
self.plot_tab.setEnabled(False)
else:
self.plot_tab.setEnabled(True)
def populate_cbo_box(self):
print("populating combo box")
self.method_cbo_box.clear()
# we first build an R object with the current data. this is to pass off
# to the R side to check the feasibility of the methods over the current data.
# i.e., we do not display methods that cannot be performed over the
# current data.
tmp_obj_name = "tmp_obj"
covs_to_include = []
#if self.mode==SUBGROUP_MODE:
# covs_to_include = [self.wizard().get_subgroup_variable(),]
covs_to_include = []
if OMA_CONVENTION[self.data_type] == "binary":
python_to_R.dataset_to_simple_binary_robj(
self.model,
included_studies=self.get_included_studies_in_proper_order(),
data_location=self.data_location,
var_name=tmp_obj_name,
covs_to_include=covs_to_include,
one_arm=False,
)
elif OMA_CONVENTION[self.data_type] == "continuous":
python_to_R.dataset_to_simple_cont_robj(
model=self.model,
included_studies=self.get_included_studies_in_proper_order(),
data_location=self.data_location,
data_type=self.data_type,
var_name=tmp_obj_name,
covs_to_include=covs_to_include,
one_arm=False,
)
self.available_method_d = python_to_R.get_available_methods(
for_data_type=OMA_CONVENTION[self.data_type],
data_obj_name=tmp_obj_name,
metric=self.metric,
funnel_mode=self.funnel_mode,
)
print "\n\navailable %s methods: %s" % (self.data_type, ", ".join(self.available_method_d.keys()))
method_names = self.available_method_d.keys()
method_names.sort(reverse=True)
for method in method_names:
self.method_cbo_box.addItem(method)
self.current_method = self.available_method_d[str(self.method_cbo_box.currentText())]
self.setup_params()
self.parameter_grp_box.setTitle(self.current_method)
def setup_params(self):
# parses out information about the parameters of the current method
# param_d holds (meta) information about the parameter -- it's a each param
# itself maps to a dictionary with a pretty name and description (assuming
# they were provided for the given param)
self.current_params, self.current_defaults, self.var_order, self.param_d = \
python_to_R.get_params(self.current_method)
###
# user selections overwrite the current parameter defaults.
# ie., if the user has run this analysis before, the preferences
# they selected then are automatically set as the defaults now.
# these defaults, if they exist, are stored in the user_preferences
# dictionary
# override default from openmetar with choice from table preferences
self.current_defaults['digits'] = self.model.get_precision()
print self.current_defaults
def add_label(self, layout, cur_grid_row, name, tool_tip_text=None):
lbl = QLabel(name, self.parameter_grp_box)
if not tool_tip_text is None:
lbl.setToolTip(tool_tip_text)
layout.addWidget(lbl, cur_grid_row, 0)
def add_enum(self, layout, cur_grid_row, name, values):
'''
Adds an enumeration to the UI, with the name and possible
values as specified per the parameters.
'''
###
# using the pretty name for the label now.
self.add_label(layout, cur_grid_row, self.param_d[name]["pretty.name"], \
tool_tip_text=self.param_d[name]["description"])
cbo_box = QComboBox()
for index, value in enumerate(values):
name_str = self._get_enum_item_pretty_name(name,value)
cbo_box.addItem(name_str) # TODO: replace value with pretty values
cbo_box.setItemData(index, QVariant(value))
if self.current_defaults.has_key(name):
cbo_box.setCurrentIndex(cbo_box.findData(self.current_defaults[name]))
self.current_param_vals[name] = self.current_defaults[name]
QObject.connect(cbo_box, QtCore.SIGNAL("currentIndexChanged(int)"),
self.set_param_f_from_itemdata(name))
layout.addWidget(cbo_box, cur_grid_row, 1)
def _get_enum_item_pretty_name(self, enum_name, item_name):
if "rm.method.names" in self.param_d[enum_name]:
if item_name in self.param_d[enum_name]["rm.method.names"]:
return item_name + ": " + str(self.param_d[enum_name]["rm.method.names"][item_name])
return item_name
def add_float_box(self, layout, cur_grid_row, name):
self.add_label(layout, cur_grid_row, self.param_d[name]["pretty.name"],\
tool_tip_text=self.param_d[name]["description"])
# now add the float input line edit
finput = QLineEdit()
# if a default value has been specified, use it
if self.current_defaults.has_key(name):
finput.setText(str(self.current_defaults[name]))
self.current_param_vals[name] = self.current_defaults[name]
finput.setMaximumWidth(50)
QObject.connect(finput, QtCore.SIGNAL("textChanged(QString)"),
self.set_param_f(name, to_type=float))
layout.addWidget(finput, cur_grid_row, 1)
def add_int_box(self, layout, cur_grid_row, name):
self.add_label(layout, cur_grid_row, self.param_d[name]["pretty.name"],\
tool_tip_text=self.param_d[name]["description"])
# now add the int input line edit
iinput = QLineEdit()
# if a default value has been specified, use it
if self.current_defaults.has_key(name):
iinput.setText(str(int(self.current_defaults[name])))
self.current_param_vals[name] = self.current_defaults[name]
iinput.setMaximumWidth(50)
QObject.connect(iinput, QtCore.SIGNAL("textChanged(QString)"),
self.set_param_f(name, to_type=int))
layout.addWidget(iinput, cur_grid_row, 1)
def add_param(self, layout, cur_grid_row, name, value):
print "adding param. name: %s, value: %s" % (name, value)
if isinstance(value, list):
# then it's an enumeration of values
self.add_enum(layout, cur_grid_row, name, value)
elif value.lower() == "float":
self.add_float_box(layout, cur_grid_row, name)
elif value.lower() == "int":
self.add_int_box(layout, cur_grid_row, name)
# should we add an array type?
elif value.lower() == "string":
self.add_text_box(layout, cur_grid_row, name)
else:
print "unknown type! throwing up. bleccch."
print "name:%s. value: %s" % (name, value)
# throw exception here
def add_text_box(self, layout, cur_grid_row, name):
self.add_label(layout, cur_grid_row, self.param_d[name]["pretty.name"],\
tool_tip_text=self.param_d[name]["description"])
# now add the text
txt_input = QLineEdit()
# if a default value has been specified, use it
if self.current_defaults.has_key(name):
txt_input.setText(str(self.current_defaults[name]))
self.current_param_vals[name] = self.current_defaults[name]
txt_input.setMaximumWidth(200)
QObject.connect(txt_input, QtCore.SIGNAL("textChanged(QString)"),
self.set_param_f(name, to_type=float))
layout.addWidget(txt_input, cur_grid_row, 1)
def set_param_f(self, name, to_type=str):
'''
Returns a function f(x) such that f(x) will set the key
name in the parameters dictionary to the value x.
'''
def set_param(x):
self.current_param_vals[name] = to_type(x)
print self.current_param_vals
return set_param
@QtCore.pyqtSlot()
def set_param_f_from_itemdata(self, name, to_type=str):
'''
hackier version....
Returns a function f(x) such that f(x) will set the key
name in the parameters dictionary to the value x.
'''
def set_param(index):
combo_box = self.sender()
x = combo_box.itemData(index).toString()
self.current_param_vals[name] = to_type(x)
print str(self.current_param_vals) + " -> weirdo sender thing"
return set_param
def setup_fields_for_one_arm(self):
self.show_4.setChecked(False)
self.show_4.setEnabled(False)
############### Getters ###################################################
# adapted from 'add_plot_params' in ma_specs in OMA, also returns params for the meta-analysis
def get_plot_params(self):
self.current_param_vals["fp_show_col1"] = self.show_1.isChecked()
self.current_param_vals["fp_col1_str"] = unicode(self.col1_str_edit.text().toUtf8(), "utf-8")
self.current_param_vals["fp_show_col2"] = self.show_2.isChecked()
self.current_param_vals["fp_col2_str"] = unicode(self.col2_str_edit.text().toUtf8(), "utf-8")
self.current_param_vals["fp_show_col3"] = self.show_3.isChecked()
self.current_param_vals["fp_col3_str"] = unicode(self.col3_str_edit.text().toUtf8(), "utf-8")
self.current_param_vals["fp_show_col4"] = self.show_4.isChecked()
self.current_param_vals["fp_col4_str"] = unicode(self.col4_str_edit.text().toUtf8(), "utf-8")
self.current_param_vals["fp_xlabel"] = unicode(self.x_lbl_le.text().toUtf8(), "utf-8")
self.current_param_vals["fp_outpath"] = unicode(self.image_path.text().toUtf8(), "utf-8")
plot_lb = unicode(self.plot_lb_le.text().toUtf8(), "utf-8")
self.current_param_vals["fp_plot_lb"] = "[default]"
if plot_lb != "[default]" and check_plot_bound(plot_lb):
self.current_param_vals["fp_plot_lb"] = plot_lb
plot_ub = unicode(self.plot_ub_le.text().toUtf8(), "utf-8")
self.current_param_vals["fp_plot_ub"] = "[default]"
if plot_ub != "[default]" and check_plot_bound(plot_ub):
self.current_param_vals["fp_plot_ub"] = plot_ub
xticks = unicode(self.x_ticks_le.text().toUtf8(), "utf-8")
self.current_param_vals["fp_xticks"] = "[default]"
if xticks != "[default]" and seems_sane(xticks):
self.current_param_vals["fp_xticks"] = xticks
self.current_param_vals["fp_show_summary_line"] = self.show_summary_line.isChecked()
return self.current_param_vals
def get_current_method(self):
return self.current_method
def get_current_method_pretty_name(self):
return str(self.method_cbo_box.currentText())
###########################################################################
def __str__(self):
chosen_method_str = "Chosen Method: %s" % self.get_current_method_pretty_name()
random_effects_method_str = None
# stupid fix....
if "fixed" not in self.get_current_method() and "rm.method" in self.current_param_vals:
random_effects_method_str = "Random Effects Method: " + python_to_R.get_random_effects_methods_descriptions(self.get_current_method())[self.current_param_vals['rm.method']]
summary = "\n".join([chosen_method_str, random_effects_method_str])
else:
summary = chosen_method_str
return summary | gpl-3.0 |
abstract-open-solutions/l10n-italy | l10n_it_ricevute_bancarie/wizard/__init__.py | 3 | 1354 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 Andrea Cometa.
# Email: info@andreacometa.it
# Web site: http://www.andreacometa.it
# Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2012 Domsense srl (<http://www.domsense.com>)
# Copyright (C) 2012 Associazione OpenERP Italia
# (<http://www.odoo-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import wizard_riba_issue
from . import wizard_riba_file_export
from . import wizard_accreditation
from . import wizard_unsolved
| agpl-3.0 |
aselle/tensorflow | tensorflow/python/kernel_tests/batchtospace_op_test.py | 18 | 11766 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for BatchToSpace op.
Additional tests are included in spacetobatch_op_test.py, where the BatchToSpace
op is tested in tandem with its reverse SpaceToBatch op.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class PythonOpImpl(object):
@staticmethod
def batch_to_space(*args, **kwargs):
return array_ops.batch_to_space(*args, **kwargs)
class CppOpImpl(object):
@staticmethod
def batch_to_space(*args, **kwargs):
return gen_array_ops.batch_to_space(*args, **kwargs)
class BatchToSpaceDepthToSpace(test.TestCase, PythonOpImpl):
# Verifies that: batch_to_space(x) = transpose(depth_to_space(transpose(x)))
def testDepthToSpaceTranspose(self):
x = np.arange(20 * 5 * 8 * 7, dtype=np.float32).reshape([20, 5, 8, 7])
block_size = 2
for crops_dtype in [dtypes.int64, dtypes.int32]:
crops = array_ops.zeros((2, 2), dtype=crops_dtype)
y1 = self.batch_to_space(x, crops, block_size=block_size)
y2 = array_ops.transpose(
array_ops.depth_to_space(
array_ops.transpose(x, [3, 1, 2, 0]), block_size=block_size),
[3, 1, 2, 0])
with self.test_session():
self.assertAllEqual(y1.eval(), y2.eval())
class BatchToSpaceDepthToSpaceCpp(BatchToSpaceDepthToSpace, CppOpImpl):
pass
class BatchToSpaceErrorHandlingTest(test.TestCase, PythonOpImpl):
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
x_np = [[[1], [2]], [[3], [4]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 2
with self.assertRaises(ValueError):
_ = self.batch_to_space(x_np, crops, block_size)
def testBlockSize0(self):
# The block size is 0.
x_np = [[[[1], [2]], [[3], [4]]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 0
with self.assertRaises(ValueError):
out_tf = self.batch_to_space(x_np, crops, block_size)
out_tf.eval()
def testBlockSizeOne(self):
# The block size is 1. The block size needs to be > 1.
x_np = [[[[1], [2]], [[3], [4]]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 1
with self.assertRaises(ValueError):
out_tf = self.batch_to_space(x_np, crops, block_size)
out_tf.eval()
def testBlockSizeLarger(self):
# The block size is too large for this input.
x_np = [[[[1], [2]], [[3], [4]]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 10
with self.assertRaises(ValueError):
out_tf = self.batch_to_space(x_np, crops, block_size)
out_tf.eval()
def testBlockSizeSquaredNotDivisibleBatch(self):
# The block size squared does not divide the batch.
x_np = [[[[1], [2], [3]], [[3], [4], [7]]]]
crops = np.zeros((2, 2), dtype=np.int32)
block_size = 3
with self.assertRaises(ValueError):
_ = self.batch_to_space(x_np, crops, block_size)
def testUnknownShape(self):
t = self.batch_to_space(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32),
block_size=4)
self.assertEqual(4, t.get_shape().ndims)
class BatchToSpaceErrorHandlingCppTest(BatchToSpaceErrorHandlingTest,
CppOpImpl):
pass
class BatchToSpaceNDErrorHandlingTest(test.TestCase):
def _testStaticShape(self, input_shape, block_shape, paddings, error):
block_shape = np.array(block_shape)
paddings = np.array(paddings)
# Try with sizes known at graph construction time.
with self.assertRaises(error):
_ = array_ops.batch_to_space_nd(
np.zeros(input_shape, np.float32), block_shape, paddings)
def _testDynamicShape(self, input_shape, block_shape, paddings):
block_shape = np.array(block_shape)
paddings = np.array(paddings)
# Try with sizes unknown at graph construction time.
input_placeholder = array_ops.placeholder(dtypes.float32)
block_shape_placeholder = array_ops.placeholder(
dtypes.int32, shape=block_shape.shape)
paddings_placeholder = array_ops.placeholder(dtypes.int32)
t = array_ops.batch_to_space_nd(input_placeholder, block_shape_placeholder,
paddings_placeholder)
with self.assertRaises(ValueError):
_ = t.eval({
input_placeholder: np.zeros(input_shape, np.float32),
block_shape_placeholder: block_shape,
paddings_placeholder: paddings
})
def _testShape(self, input_shape, block_shape, paddings, error):
self._testStaticShape(input_shape, block_shape, paddings, error)
self._testDynamicShape(input_shape, block_shape, paddings)
def testInputWrongDimMissingBatch(self):
self._testShape([2, 2], [2, 2], [[0, 0], [0, 0]], ValueError)
self._testShape([2, 2, 3], [2, 2, 3], [[0, 0], [0, 0]], ValueError)
def testBlockSize0(self):
# The block size is 0.
self._testShape([1, 2, 2, 1], [0, 1], [[0, 0], [0, 0]], ValueError)
def testBlockSizeNegative(self):
self._testShape([1, 2, 2, 1], [-1, 1], [[0, 0], [0, 0]], ValueError)
def testNegativePadding(self):
self._testShape([1, 2, 2], [1, 1], [[0, -1], [0, 0]], ValueError)
def testCropTooLarge(self):
# The amount to crop exceeds the padded size.
self._testShape([1 * 2 * 2, 2, 3, 1], [2, 2], [[3, 2], [0, 0]], ValueError)
def testBlockSizeSquaredNotDivisibleBatch(self):
# The batch dimension is not divisible by the product of the block_shape.
self._testShape([3, 1, 1, 1], [2, 3], [[0, 0], [0, 0]], ValueError)
def testUnknownShape(self):
# Verify that input shape and paddings shape can be unknown.
_ = array_ops.batch_to_space_nd(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
# Only number of input dimensions is known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(None, None, None, None)),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
self.assertEqual(4, t.get_shape().ndims)
# Dimensions are partially known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(None, None, None, 2)),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(3 * 2 * 3, None, None, 2)), [2, 3],
array_ops.placeholder(dtypes.int32))
self.assertEqual([3, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(3 * 2 * 3, None, 2, 2)), [2, 3],
[[1, 1], [0, 1]])
self.assertEqual([3, None, 5, 2], t.get_shape().as_list())
# Dimensions are fully known.
t = array_ops.batch_to_space_nd(
array_ops.placeholder(
dtypes.float32, shape=(3 * 2 * 3, 2, 1, 2)), [2, 3],
[[1, 1], [0, 0]])
self.assertEqual([3, 2, 3, 2], t.get_shape().as_list())
class BatchToSpaceGradientTest(test.TestCase, PythonOpImpl):
# Check the gradients.
def _checkGrad(self, x, crops, block_size):
assert 4 == x.ndim
with self.test_session():
tf_x = ops.convert_to_tensor(x)
tf_y = self.batch_to_space(tf_x, crops, block_size)
epsilon = 1e-5
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for batch_to_space of x which is a four dimensional
# tensor of shape [b * block_size * block_size, h, w, d].
def _compare(self, b, h, w, d, block_size, crop_beg, crop_end):
block_size_sq = block_size * block_size
x = np.random.normal(0, 1, b * h * w * d *
block_size_sq).astype(np.float32).reshape(
[b * block_size * block_size, h, w, d])
crops = np.array(
[[crop_beg, crop_end], [crop_beg, crop_end]], dtype=np.int32)
self._checkGrad(x, crops, block_size)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
def testSmall(self):
block_size = 2
crop_beg = 0
crop_end = 0
self._compare(1, 2, 3, 5, block_size, crop_beg, crop_end)
def testSmall2(self):
block_size = 2
crop_beg = 0
crop_end = 0
self._compare(2, 4, 3, 2, block_size, crop_beg, crop_end)
def testSmallCrop1x1(self):
block_size = 2
crop_beg = 1
crop_end = 1
self._compare(1, 2, 3, 5, block_size, crop_beg, crop_end)
class BatchToSpaceGradientCppTest(BatchToSpaceGradientTest, CppOpImpl):
pass
class BatchToSpaceNDGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_shape, crops, crops_dtype):
block_shape = np.array(block_shape)
crops = constant_op.constant(
np.array(crops).reshape((len(block_shape), 2)), crops_dtype)
with self.test_session():
tf_x = ops.convert_to_tensor(x)
tf_y = array_ops.batch_to_space_nd(tf_x, block_shape, crops)
epsilon = 1e-5
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
def _compare(self, input_shape, block_shape, crops, crops_dtype):
input_shape = list(input_shape)
input_shape[0] *= np.prod(block_shape)
x = np.random.normal(
0, 1, np.prod(input_shape)).astype(np.float32).reshape(input_shape)
self._checkGrad(x, block_shape, crops, crops_dtype)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
def testSmall(self):
for dtype in [dtypes.int64, dtypes.int32]:
self._compare([1, 2, 3, 5], [2, 2], [[0, 0], [0, 0]], dtype)
def testSmall2(self):
for dtype in [dtypes.int64, dtypes.int32]:
self._compare([2, 4, 3, 2], [2, 2], [[0, 0], [0, 0]], dtype)
def testSmallCrop1x1(self):
for dtype in [dtypes.int64, dtypes.int32]:
self._compare([1, 2, 3, 5], [2, 2], [[1, 1], [1, 1]], dtype)
if __name__ == "__main__":
test.main()
| apache-2.0 |
sbidoul/buildbot | master/buildbot/buildslave/libvirt.py | 11 | 1438 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
# This module is left for backward compatibility of old-named worker API.
# It should never be imported by Buildbot.
from __future__ import absolute_import
from __future__ import print_function
from buildbot.worker.libvirt import LibVirtWorker as _LibVirtWorker
from buildbot.worker_transition import deprecatedWorkerModuleAttribute
from buildbot.worker_transition import reportDeprecatedWorkerModuleUsage
reportDeprecatedWorkerModuleUsage(
"'{old}' module is deprecated, use "
"'buildbot.worker.libvirt' module instead".format(old=__name__))
deprecatedWorkerModuleAttribute(locals(), _LibVirtWorker,
compat_name="LibVirtSlave",
new_name="LibVirtWorker")
| gpl-2.0 |
Wilee999/panda3d | direct/src/fsm/ClassicFSM.py | 11 | 13963 | """Undocumented Module"""
__all__ = ['ClassicFSM']
"""Finite State Machine module: contains the ClassicFSM class.
This module and class exist only for backward compatibility with
existing code. New code should use the FSM module instead.
"""
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.showbase.DirectObject import DirectObject
import types
import weakref
if __debug__:
_debugFsms={}
def printDebugFsmList():
global _debugFsms
keys=_debugFsms.keys()
keys.sort()
for k in keys:
print k, _debugFsms[k]()
__builtins__['debugFsmList']=printDebugFsmList
class ClassicFSM(DirectObject):
"""
Finite State Machine class.
This module and class exist only for backward compatibility with
existing code. New code should use the FSM class instead.
"""
# create ClassicFSM DirectNotify category
notify = directNotify.newCategory("ClassicFSM")
# special methods
# these are flags that tell the ClassicFSM what to do when an
# undefined transition is requested:
ALLOW = 0 # print a warning, and do the transition
DISALLOW = 1 # silently ignore the request (don't do the transition)
DISALLOW_VERBOSE = 2 # print a warning, and don't do the transition
ERROR = 3 # print an error message and raise an exception
def __init__(self, name, states=[], initialStateName=None,
finalStateName=None, onUndefTransition=DISALLOW_VERBOSE):
"""__init__(self, string, State[], string, string, int)
ClassicFSM constructor: takes name, list of states, initial state and
final state as:
fsm = ClassicFSM.ClassicFSM('stopLight',
[State.State('red', enterRed, exitRed, ['green']),
State.State('yellow', enterYellow, exitYellow, ['red']),
State.State('green', enterGreen, exitGreen, ['yellow'])],
'red',
'red')
each state's last argument, a list of allowed state transitions,
is optional; if left out (or explicitly specified to be
State.State.Any) then any transition from the state is 'defined'
and allowed
'onUndefTransition' flag determines behavior when undefined
transition is requested; see flag definitions above
"""
self.setName(name)
self.setStates(states)
self.setInitialState(initialStateName)
self.setFinalState(finalStateName)
self.onUndefTransition = onUndefTransition
# Flag to see if we are inspecting
self.inspecting = 0
# We do not enter the initial state to separate
# construction from activation
self.__currentState = None
# We set this while we are modifying the state. No one else
# should recursively attempt to modify the state while we are
# doing this.
self.__internalStateInFlux = 0
if __debug__:
global _debugFsms
_debugFsms[name]=weakref.ref(self)
# I know this isn't how __repr__ is supposed to be used, but it
# is nice and convenient.
def __repr__(self):
return self.__str__()
def __str__(self):
"""
Print out something useful about the fsm
"""
currentState = self.getCurrentState()
if currentState:
str = ("ClassicFSM " + self.getName() + ' in state "' +
currentState.getName() + '"')
else:
str = ("ClassicFSM " + self.getName() + ' not in any state')
return str
def enterInitialState(self, argList=[]):
assert not self.__internalStateInFlux
if self.__currentState == self.__initialState:
return
assert self.__currentState == None
self.__internalStateInFlux = 1
self.__enter(self.__initialState, argList)
assert not self.__internalStateInFlux
# setters and getters
def getName(self):
return(self.__name)
def setName(self, name):
self.__name = name
def getStates(self):
return self.__states.values()
def setStates(self, states):
"""setStates(self, State[])"""
# Make a dictionary from stateName -> state
self.__states = {}
for state in states:
self.__states[state.getName()] = state
def addState(self, state):
self.__states[state.getName()] = state
def getInitialState(self):
return(self.__initialState)
def setInitialState(self, initialStateName):
self.__initialState = self.getStateNamed(initialStateName)
def getFinalState(self):
return(self.__finalState)
def setFinalState(self, finalStateName):
self.__finalState = self.getStateNamed(finalStateName)
def requestFinalState(self):
self.request(self.getFinalState().getName())
def getCurrentState(self):
return(self.__currentState)
# lookup funcs
def getStateNamed(self, stateName):
"""
Return the state with given name if found, issue warning otherwise
"""
state = self.__states.get(stateName)
if state:
return state
else:
ClassicFSM.notify.warning("[%s]: getStateNamed: %s, no such state" %
(self.__name, stateName))
def hasStateNamed(self, stateName):
"""
Return True if stateName is a valid state, False otherwise.
"""
result = False
state = self.__states.get(stateName)
if state:
result = True
return result
# basic ClassicFSM functionality
def __exitCurrent(self, argList):
"""
Exit the current state
"""
assert self.__internalStateInFlux
assert ClassicFSM.notify.debug("[%s]: exiting %s" % (self.__name, self.__currentState.getName()))
self.__currentState.exit(argList)
# Only send the state change event if we are inspecting it
# If this event turns out to be generally useful, we can
# turn it on all the time, but for now nobody else is using it
if self.inspecting:
messenger.send(self.getName() + '_' +
self.__currentState.getName() + '_exited')
self.__currentState = None
def __enter(self, aState, argList=[]):
"""
Enter a given state, if it exists
"""
assert self.__internalStateInFlux
stateName = aState.getName()
if (stateName in self.__states):
assert ClassicFSM.notify.debug("[%s]: entering %s" % (self.__name, stateName))
self.__currentState = aState
# Only send the state change event if we are inspecting it
# If this event turns out to be generally useful, we can
# turn it on all the time, but for now nobody else is using it
if self.inspecting:
messenger.send(self.getName() + '_' + stateName + '_entered')
# Once we begin entering the new state, we're allow to
# recursively request a transition to another state.
# Indicate this by marking our internal state no longer in
# flux.
self.__internalStateInFlux = 0
aState.enter(argList)
else:
# notify.error is going to raise an exception; reset the
# flux flag first
self.__internalStateInFlux = 0
ClassicFSM.notify.error("[%s]: enter: no such state" % (self.__name))
def __transition(self, aState, enterArgList=[], exitArgList=[]):
"""
Exit currentState and enter given one
"""
assert not self.__internalStateInFlux
self.__internalStateInFlux = 1
self.__exitCurrent(exitArgList)
self.__enter(aState, enterArgList)
assert not self.__internalStateInFlux
def request(self, aStateName, enterArgList=[], exitArgList=[],
force=0):
"""
Attempt transition from currentState to given one.
Return true is transition exists to given state,
false otherwise.
"""
# If you trigger this assertion failure, you must have
# recursively requested a state transition from within the
# exitState() function for the previous state. This is not
# supported because we're not fully transitioned into the new
# state yet.
assert not self.__internalStateInFlux
if not self.__currentState:
# Make this a warning for now
ClassicFSM.notify.warning("[%s]: request: never entered initial state" %
(self.__name))
self.__currentState = self.__initialState
if isinstance(aStateName, types.StringType):
aState = self.getStateNamed(aStateName)
else:
# Allow the caller to pass in a state in itself, not just
# the name of a state.
aState = aStateName
aStateName = aState.getName()
if aState == None:
ClassicFSM.notify.error("[%s]: request: %s, no such state" %
(self.__name, aStateName))
# is the transition defined? if it isn't, should we allow it?
transitionDefined = self.__currentState.isTransitionDefined(aStateName)
transitionAllowed = transitionDefined
if self.onUndefTransition == ClassicFSM.ALLOW:
transitionAllowed = 1
if not transitionDefined:
# the transition is not defined, but we're going to do it
# anyway. print a warning.
ClassicFSM.notify.warning(
"[%s]: performing undefined transition from %s to %s" %
(self.__name,
self.__currentState.getName(),
aStateName))
if transitionAllowed or force:
self.__transition(aState,
enterArgList,
exitArgList)
return 1
# We can implicitly always transition to our final state.
elif (aStateName == self.__finalState.getName()):
if (self.__currentState == self.__finalState):
# Do not do the transition if we are already in the
# final state
assert ClassicFSM.notify.debug(
"[%s]: already in final state: %s" %
(self.__name, aStateName))
return 1
else:
# Force a transition to allow for cleanup
assert ClassicFSM.notify.debug(
"[%s]: implicit transition to final state: %s" %
(self.__name, aStateName))
self.__transition(aState,
enterArgList,
exitArgList)
return 1
# are we already in this state?
elif (aStateName == self.__currentState.getName()):
assert ClassicFSM.notify.debug(
"[%s]: already in state %s and no self transition" %
(self.__name, aStateName))
return 0
else:
msg = ("[%s]: no transition exists from %s to %s" %
(self.__name,
self.__currentState.getName(),
aStateName))
if self.onUndefTransition == ClassicFSM.ERROR:
ClassicFSM.notify.error(msg)
elif self.onUndefTransition == ClassicFSM.DISALLOW_VERBOSE:
ClassicFSM.notify.warning(msg)
return 0
def forceTransition(self, aStateName, enterArgList=[], exitArgList=[]):
"""
force a transition -- for debugging ONLY
"""
self.request(aStateName, enterArgList, exitArgList, force=1)
def conditional_request(self, aStateName, enterArgList=[], exitArgList=[]):
"""
'if this transition is defined, do it'
Attempt transition from currentState to given one, if it exists.
Return true if transition exists to given state, false otherwise.
It is NOT an error/warning to attempt a cond_request if the
transition doesn't exist. This lets people be sloppy about
ClassicFSM transitions, letting the same fn be used for different
states that may not have the same out transitions.
"""
assert not self.__internalStateInFlux
if not self.__currentState:
# Make this a warning for now
ClassicFSM.notify.warning("[%s]: request: never entered initial state" %
(self.__name))
self.__currentState = self.__initialState
if isinstance(aStateName, types.StringType):
aState = self.getStateNamed(aStateName)
else:
# Allow the caller to pass in a state in itself, not just
# the name of a state.
aState = aStateName
aStateName = aState.getName()
if aState == None:
ClassicFSM.notify.error("[%s]: request: %s, no such state" %
(self.__name, aStateName))
transitionDefined = (
self.__currentState.isTransitionDefined(aStateName) or
aStateName in [self.__currentState.getName(),
self.__finalState.getName()]
)
if transitionDefined:
return self.request(aStateName, enterArgList, exitArgList)
else:
assert ClassicFSM.notify.debug(
"[%s]: condition_request: %s, transition doesnt exist" %
(self.__name, aStateName))
return 0
def view(self):
from direct.tkpanels import FSMInspector
FSMInspector.FSMInspector(self)
def isInternalStateInFlux(self):
return self.__internalStateInFlux
| bsd-3-clause |
mathspace/django | tests/forms_tests/tests/test_regressions.py | 18 | 9153 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.forms import (
CharField, ChoiceField, Form, HiddenInput, IntegerField, ModelForm,
ModelMultipleChoiceField, MultipleChoiceField, RadioSelect, Select,
TextInput,
)
from django.test import TestCase, ignore_warnings
from django.utils import translation
from django.utils.translation import gettext_lazy, ugettext_lazy
from ..models import Cheese
class FormsRegressionsTestCase(TestCase):
def test_class(self):
# Tests to prevent against recurrences of earlier bugs.
extra_attrs = {'class': 'special'}
class TestForm(Form):
f1 = CharField(max_length=10, widget=TextInput(attrs=extra_attrs))
f2 = CharField(widget=TextInput(attrs=extra_attrs))
self.assertHTMLEqual(
TestForm(auto_id=False).as_p(),
'<p>F1: <input type="text" class="special" name="f1" maxlength="10" required /></p>\n'
'<p>F2: <input type="text" class="special" name="f2" required /></p>'
)
def test_regression_3600(self):
# Tests for form i18n #
# There were some problems with form translations in #3600
class SomeForm(Form):
username = CharField(max_length=10, label=ugettext_lazy('username'))
f = SomeForm()
self.assertHTMLEqual(
f.as_p(),
'<p><label for="id_username">username:</label>'
'<input id="id_username" type="text" name="username" maxlength="10" required /></p>'
)
# Translations are done at rendering time, so multi-lingual apps can define forms)
with translation.override('de'):
self.assertHTMLEqual(
f.as_p(),
'<p><label for="id_username">Benutzername:</label>'
'<input id="id_username" type="text" name="username" maxlength="10" required /></p>'
)
with translation.override('pl'):
self.assertHTMLEqual(
f.as_p(),
'<p><label for="id_username">u\u017cytkownik:</label>'
'<input id="id_username" type="text" name="username" maxlength="10" required /></p>'
)
def test_regression_5216(self):
# There was some problems with form translations in #5216
class SomeForm(Form):
field_1 = CharField(max_length=10, label=ugettext_lazy('field_1'))
field_2 = CharField(
max_length=10,
label=ugettext_lazy('field_2'),
widget=TextInput(attrs={'id': 'field_2_id'}),
)
f = SomeForm()
self.assertHTMLEqual(f['field_1'].label_tag(), '<label for="id_field_1">field_1:</label>')
self.assertHTMLEqual(f['field_2'].label_tag(), '<label for="field_2_id">field_2:</label>')
# Unicode decoding problems...
GENDERS = (('\xc5', 'En tied\xe4'), ('\xf8', 'Mies'), ('\xdf', 'Nainen'))
class SomeForm(Form):
somechoice = ChoiceField(choices=GENDERS, widget=RadioSelect(), label='\xc5\xf8\xdf')
f = SomeForm()
self.assertHTMLEqual(
f.as_p(),
'<p><label for="id_somechoice_0">\xc5\xf8\xdf:</label>'
'<ul id="id_somechoice">\n'
'<li><label for="id_somechoice_0">'
'<input type="radio" id="id_somechoice_0" value="\xc5" name="somechoice" required /> '
'En tied\xe4</label></li>\n'
'<li><label for="id_somechoice_1">'
'<input type="radio" id="id_somechoice_1" value="\xf8" name="somechoice" required /> '
'Mies</label></li>\n<li><label for="id_somechoice_2">'
'<input type="radio" id="id_somechoice_2" value="\xdf" name="somechoice" required /> '
'Nainen</label></li>\n</ul></p>'
)
# Translated error messages used to be buggy.
with translation.override('ru'):
f = SomeForm({})
self.assertHTMLEqual(
f.as_p(),
'<ul class="errorlist"><li>'
'\u041e\u0431\u044f\u0437\u0430\u0442\u0435\u043b\u044c'
'\u043d\u043e\u0435 \u043f\u043e\u043b\u0435.</li></ul>\n'
'<p><label for="id_somechoice_0">\xc5\xf8\xdf:</label>'
' <ul id="id_somechoice">\n<li><label for="id_somechoice_0">'
'<input type="radio" id="id_somechoice_0" value="\xc5" name="somechoice" required /> '
'En tied\xe4</label></li>\n'
'<li><label for="id_somechoice_1">'
'<input type="radio" id="id_somechoice_1" value="\xf8" name="somechoice" required /> '
'Mies</label></li>\n<li><label for="id_somechoice_2">'
'<input type="radio" id="id_somechoice_2" value="\xdf" name="somechoice" required /> '
'Nainen</label></li>\n</ul></p>'
)
# Deep copying translated text shouldn't raise an error)
class CopyForm(Form):
degree = IntegerField(widget=Select(choices=((1, gettext_lazy('test')),)))
f = CopyForm()
@ignore_warnings(category=UnicodeWarning)
def test_regression_5216_b(self):
# Testing choice validation with UTF-8 bytestrings as input (these are the
# Russian abbreviations "мес." and "шт.".
UNITS = ((b'\xd0\xbc\xd0\xb5\xd1\x81.', b'\xd0\xbc\xd0\xb5\xd1\x81.'),
(b'\xd1\x88\xd1\x82.', b'\xd1\x88\xd1\x82.'))
f = ChoiceField(choices=UNITS)
self.assertEqual(f.clean('\u0448\u0442.'), '\u0448\u0442.')
self.assertEqual(f.clean(b'\xd1\x88\xd1\x82.'), '\u0448\u0442.')
def test_misc(self):
# There once was a problem with Form fields called "data". Let's make sure that
# doesn't come back.
class DataForm(Form):
data = CharField(max_length=10)
f = DataForm({'data': 'xyzzy'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'data': 'xyzzy'})
# A form with *only* hidden fields that has errors is going to be very unusual.
class HiddenForm(Form):
data = IntegerField(widget=HiddenInput)
f = HiddenForm({})
self.assertHTMLEqual(
f.as_p(),
'<ul class="errorlist nonfield">'
'<li>(Hidden field data) This field is required.</li></ul>\n<p> '
'<input type="hidden" name="data" id="id_data" /></p>'
)
self.assertHTMLEqual(
f.as_table(),
'<tr><td colspan="2"><ul class="errorlist nonfield">'
'<li>(Hidden field data) This field is required.</li></ul>'
'<input type="hidden" name="data" id="id_data" /></td></tr>'
)
def test_xss_error_messages(self):
###################################################
# Tests for XSS vulnerabilities in error messages #
###################################################
# The forms layer doesn't escape input values directly because error messages
# might be presented in non-HTML contexts. Instead, the message is just marked
# for escaping by the template engine. So we'll need to construct a little
# silly template to trigger the escaping.
from django.template import Template, Context
t = Template('{{ form.errors }}')
class SomeForm(Form):
field = ChoiceField(choices=[('one', 'One')])
f = SomeForm({'field': '<script>'})
self.assertHTMLEqual(
t.render(Context({'form': f})),
'<ul class="errorlist"><li>field<ul class="errorlist">'
'<li>Select a valid choice. <script> is not one of the '
'available choices.</li></ul></li></ul>'
)
class SomeForm(Form):
field = MultipleChoiceField(choices=[('one', 'One')])
f = SomeForm({'field': ['<script>']})
self.assertHTMLEqual(
t.render(Context({'form': f})),
'<ul class="errorlist"><li>field<ul class="errorlist">'
'<li>Select a valid choice. <script> is not one of the '
'available choices.</li></ul></li></ul>'
)
from forms_tests.models import ChoiceModel
class SomeForm(Form):
field = ModelMultipleChoiceField(ChoiceModel.objects.all())
f = SomeForm({'field': ['<script>']})
self.assertHTMLEqual(
t.render(Context({'form': f})),
'<ul class="errorlist"><li>field<ul class="errorlist">'
'<li>"<script>" is not a valid value for a '
'primary key.</li></ul></li></ul>'
)
def test_regression_14234(self):
"""
Re-cleaning an instance that was added via a ModelForm should not raise
a pk uniqueness error.
"""
class CheeseForm(ModelForm):
class Meta:
model = Cheese
fields = '__all__'
form = CheeseForm({
'name': 'Brie',
})
self.assertTrue(form.is_valid())
obj = form.save()
obj.name = 'Camembert'
obj.full_clean()
| bsd-3-clause |
leiferikb/bitpop | src/chrome/common/extensions/docs/server2/host_file_system_provider_test.py | 97 | 1715 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from copy import deepcopy
import unittest
from extensions_paths import CHROME_API
from file_system import FileNotFoundError
from host_file_system_provider import HostFileSystemProvider
from object_store_creator import ObjectStoreCreator
from test_data.canned_data import CANNED_API_FILE_SYSTEM_DATA
from test_file_system import TestFileSystem
class HostFileSystemProviderTest(unittest.TestCase):
def setUp(self):
self._idle_path = CHROME_API + 'idle.json'
self._canned_data = deepcopy(CANNED_API_FILE_SYSTEM_DATA)
def _constructor_for_test(self, branch, **optargs):
return TestFileSystem(self._canned_data[branch])
def testWithCaching(self):
creator = HostFileSystemProvider(
ObjectStoreCreator.ForTest(),
constructor_for_test=self._constructor_for_test)
fs = creator.GetBranch('1500')
first_read = fs.ReadSingle(self._idle_path).Get()
self._canned_data['1500']['chrome']['common']['extensions'].get('api'
)['idle.json'] = 'blah blah blah'
second_read = fs.ReadSingle(self._idle_path).Get()
self.assertEqual(first_read, second_read)
def testWithOffline(self):
creator = HostFileSystemProvider(
ObjectStoreCreator.ForTest(),
offline=True,
constructor_for_test=self._constructor_for_test)
fs = creator.GetBranch('1500')
# Offline file system should raise a FileNotFoundError if read is attempted.
self.assertRaises(FileNotFoundError, fs.ReadSingle(self._idle_path).Get)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
obnoxxx/samba | python/samba/tests/xattr.py | 30 | 4213 | # Unix SMB/CIFS implementation. Tests for xattr manipulation
# Copyright (C) Matthieu Patou <mat@matws.net> 2009
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.xattr_native and samba.xattr_tdb."""
import samba.xattr_native, samba.xattr_tdb
from samba.xattr import copytree_with_xattrs
from samba.dcerpc import xattr
from samba.ndr import ndr_pack
from samba.tests import (
SkipTest,
TestCase,
TestCaseInTempDir,
)
import random
import shutil
import os
class XattrTests(TestCase):
def _tmpfilename(self):
random.seed()
path = os.environ['SELFTEST_PREFIX']
return os.path.join(path, "pytests"+str(int(100000*random.random())))
def _eadbpath(self):
return os.path.join(os.environ['SELFTEST_PREFIX'], "eadb.tdb")
def test_set_xattr_native(self):
if not samba.xattr_native.is_xattr_supported():
raise SkipTest()
ntacl = xattr.NTACL()
ntacl.version = 1
tempf = self._tmpfilename()
open(tempf, 'w').write("empty")
try:
samba.xattr_native.wrap_setxattr(tempf, "user.unittests",
ndr_pack(ntacl))
except IOError:
raise SkipTest("the filesystem where the tests are runned do not support XATTR")
os.unlink(tempf)
def test_set_and_get_native(self):
if not samba.xattr_native.is_xattr_supported():
raise SkipTest()
tempf = self._tmpfilename()
reftxt = "this is a test"
open(tempf, 'w').write("empty")
try:
samba.xattr_native.wrap_setxattr(tempf, "user.unittests", reftxt)
text = samba.xattr_native.wrap_getxattr(tempf, "user.unittests")
self.assertEquals(text, reftxt)
except IOError:
raise SkipTest("the filesystem where the tests are runned do not support XATTR")
os.unlink(tempf)
def test_set_xattr_tdb(self):
tempf = self._tmpfilename()
eadb_path = self._eadbpath()
ntacl = xattr.NTACL()
ntacl.version = 1
open(tempf, 'w').write("empty")
try:
samba.xattr_tdb.wrap_setxattr(eadb_path,
tempf, "user.unittests", ndr_pack(ntacl))
finally:
os.unlink(tempf)
os.unlink(eadb_path)
def test_set_tdb_not_open(self):
tempf = self._tmpfilename()
ntacl = xattr.NTACL()
ntacl.version = 1
open(tempf, 'w').write("empty")
try:
self.assertRaises(IOError, samba.xattr_tdb.wrap_setxattr,
os.path.join("nonexistent", "eadb.tdb"), tempf,
"user.unittests", ndr_pack(ntacl))
finally:
os.unlink(tempf)
def test_set_and_get_tdb(self):
tempf = self._tmpfilename()
eadb_path = self._eadbpath()
reftxt = "this is a test"
open(tempf, 'w').write("empty")
try:
samba.xattr_tdb.wrap_setxattr(eadb_path, tempf, "user.unittests",
reftxt)
text = samba.xattr_tdb.wrap_getxattr(eadb_path, tempf,
"user.unittests")
self.assertEquals(text, reftxt)
finally:
os.unlink(tempf)
os.unlink(eadb_path)
class TestCopyTreeWithXattrs(TestCaseInTempDir):
def test_simple(self):
os.chdir(self.tempdir)
os.mkdir("a")
os.mkdir("a/b")
os.mkdir("a/b/c")
f = open('a/b/c/d', 'w')
try:
f.write("foo")
finally:
f.close()
copytree_with_xattrs("a", "b")
shutil.rmtree("a")
shutil.rmtree("b")
| gpl-3.0 |
dracos/QGIS | python/ext-libs/pygments/lexers/_asybuiltins.py | 369 | 27319 | # -*- coding: utf-8 -*-
"""
pygments.lexers._asybuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the asy-function names and asy-variable names of
Asymptote.
Do not edit the ASYFUNCNAME and ASYVARNAME sets by hand.
TODO: perl/python script in Asymptote SVN similar to asy-list.pl but only
for function and variable names.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
ASYFUNCNAME = set([
'AND',
'Arc',
'ArcArrow',
'ArcArrows',
'Arrow',
'Arrows',
'Automatic',
'AvantGarde',
'BBox',
'BWRainbow',
'BWRainbow2',
'Bar',
'Bars',
'BeginArcArrow',
'BeginArrow',
'BeginBar',
'BeginDotMargin',
'BeginMargin',
'BeginPenMargin',
'Blank',
'Bookman',
'Bottom',
'BottomTop',
'Bounds',
'Break',
'Broken',
'BrokenLog',
'Ceil',
'Circle',
'CircleBarIntervalMarker',
'Cos',
'Courier',
'CrossIntervalMarker',
'DefaultFormat',
'DefaultLogFormat',
'Degrees',
'Dir',
'DotMargin',
'DotMargins',
'Dotted',
'Draw',
'Drawline',
'Embed',
'EndArcArrow',
'EndArrow',
'EndBar',
'EndDotMargin',
'EndMargin',
'EndPenMargin',
'Fill',
'FillDraw',
'Floor',
'Format',
'Full',
'Gaussian',
'Gaussrand',
'Gaussrandpair',
'Gradient',
'Grayscale',
'Helvetica',
'Hermite',
'HookHead',
'InOutTicks',
'InTicks',
'J',
'Label',
'Landscape',
'Left',
'LeftRight',
'LeftTicks',
'Legend',
'Linear',
'Link',
'Log',
'LogFormat',
'Margin',
'Margins',
'Mark',
'MidArcArrow',
'MidArrow',
'NOT',
'NewCenturySchoolBook',
'NoBox',
'NoMargin',
'NoModifier',
'NoTicks',
'NoTicks3',
'NoZero',
'NoZeroFormat',
'None',
'OR',
'OmitFormat',
'OmitTick',
'OutTicks',
'Ox',
'Oy',
'Palatino',
'PaletteTicks',
'Pen',
'PenMargin',
'PenMargins',
'Pentype',
'Portrait',
'RadialShade',
'Rainbow',
'Range',
'Relative',
'Right',
'RightTicks',
'Rotate',
'Round',
'SQR',
'Scale',
'ScaleX',
'ScaleY',
'ScaleZ',
'Seascape',
'Shift',
'Sin',
'Slant',
'Spline',
'StickIntervalMarker',
'Straight',
'Symbol',
'Tan',
'TeXify',
'Ticks',
'Ticks3',
'TildeIntervalMarker',
'TimesRoman',
'Top',
'TrueMargin',
'UnFill',
'UpsideDown',
'Wheel',
'X',
'XEquals',
'XOR',
'XY',
'XYEquals',
'XYZero',
'XYgrid',
'XZEquals',
'XZZero',
'XZero',
'XZgrid',
'Y',
'YEquals',
'YXgrid',
'YZ',
'YZEquals',
'YZZero',
'YZero',
'YZgrid',
'Z',
'ZX',
'ZXgrid',
'ZYgrid',
'ZapfChancery',
'ZapfDingbats',
'_cputime',
'_draw',
'_eval',
'_image',
'_labelpath',
'_projection',
'_strokepath',
'_texpath',
'aCos',
'aSin',
'aTan',
'abort',
'abs',
'accel',
'acos',
'acosh',
'acot',
'acsc',
'add',
'addArrow',
'addMargins',
'addSaveFunction',
'addnode',
'addnodes',
'addpenarc',
'addpenline',
'addseg',
'adjust',
'alias',
'align',
'all',
'altitude',
'angabscissa',
'angle',
'angpoint',
'animate',
'annotate',
'anticomplementary',
'antipedal',
'apply',
'approximate',
'arc',
'arcarrowsize',
'arccircle',
'arcdir',
'arcfromcenter',
'arcfromfocus',
'arclength',
'arcnodesnumber',
'arcpoint',
'arcsubtended',
'arcsubtendedcenter',
'arctime',
'arctopath',
'array',
'arrow',
'arrow2',
'arrowbase',
'arrowbasepoints',
'arrowsize',
'asec',
'asin',
'asinh',
'ask',
'assert',
'asy',
'asycode',
'asydir',
'asyfigure',
'asyfilecode',
'asyinclude',
'asywrite',
'atan',
'atan2',
'atanh',
'atbreakpoint',
'atexit',
'atime',
'attach',
'attract',
'atupdate',
'autoformat',
'autoscale',
'autoscale3',
'axes',
'axes3',
'axialshade',
'axis',
'axiscoverage',
'azimuth',
'babel',
'background',
'bangles',
'bar',
'barmarksize',
'barsize',
'basealign',
'baseline',
'bbox',
'beep',
'begin',
'beginclip',
'begingroup',
'beginpoint',
'between',
'bevel',
'bezier',
'bezierP',
'bezierPP',
'bezierPPP',
'bezulate',
'bibliography',
'bibliographystyle',
'binarytree',
'binarytreeNode',
'binomial',
'binput',
'bins',
'bisector',
'bisectorpoint',
'blend',
'boutput',
'box',
'bqe',
'breakpoint',
'breakpoints',
'brick',
'buildRestoreDefaults',
'buildRestoreThunk',
'buildcycle',
'bulletcolor',
'canonical',
'canonicalcartesiansystem',
'cartesiansystem',
'case1',
'case2',
'case3',
'cbrt',
'cd',
'ceil',
'center',
'centerToFocus',
'centroid',
'cevian',
'change2',
'changecoordsys',
'checkSegment',
'checkconditionlength',
'checker',
'checklengths',
'checkposition',
'checktriangle',
'choose',
'circle',
'circlebarframe',
'circlemarkradius',
'circlenodesnumber',
'circumcenter',
'circumcircle',
'clamped',
'clear',
'clip',
'clipdraw',
'close',
'cmyk',
'code',
'colatitude',
'collect',
'collinear',
'color',
'colorless',
'colors',
'colorspace',
'comma',
'compassmark',
'complement',
'complementary',
'concat',
'concurrent',
'cone',
'conic',
'conicnodesnumber',
'conictype',
'conj',
'connect',
'containmentTree',
'contains',
'contour',
'contour3',
'controlSpecifier',
'convert',
'coordinates',
'coordsys',
'copy',
'cos',
'cosh',
'cot',
'countIntersections',
'cputime',
'crop',
'cropcode',
'cross',
'crossframe',
'crosshatch',
'crossmarksize',
'csc',
'cubicroots',
'curabscissa',
'curlSpecifier',
'curpoint',
'currentarrow',
'currentexitfunction',
'currentmomarrow',
'currentpolarconicroutine',
'curve',
'cut',
'cutafter',
'cutbefore',
'cyclic',
'cylinder',
'debugger',
'deconstruct',
'defaultdir',
'defaultformat',
'defaultpen',
'defined',
'degenerate',
'degrees',
'delete',
'deletepreamble',
'determinant',
'diagonal',
'diamond',
'diffdiv',
'dir',
'dirSpecifier',
'dirtime',
'display',
'distance',
'divisors',
'do_overpaint',
'dot',
'dotframe',
'dotsize',
'downcase',
'draw',
'drawAll',
'drawDoubleLine',
'drawFermion',
'drawGhost',
'drawGluon',
'drawMomArrow',
'drawPhoton',
'drawScalar',
'drawVertex',
'drawVertexBox',
'drawVertexBoxO',
'drawVertexBoxX',
'drawVertexO',
'drawVertexOX',
'drawVertexTriangle',
'drawVertexTriangleO',
'drawVertexX',
'drawarrow',
'drawarrow2',
'drawline',
'drawtick',
'duplicate',
'elle',
'ellipse',
'ellipsenodesnumber',
'embed',
'embed3',
'empty',
'enclose',
'end',
'endScript',
'endclip',
'endgroup',
'endl',
'endpoint',
'endpoints',
'eof',
'eol',
'equation',
'equations',
'erase',
'erasestep',
'erf',
'erfc',
'error',
'errorbar',
'errorbars',
'eval',
'excenter',
'excircle',
'exit',
'exitXasyMode',
'exitfunction',
'exp',
'expfactors',
'expi',
'expm1',
'exradius',
'extend',
'extension',
'extouch',
'fabs',
'factorial',
'fermat',
'fft',
'fhorner',
'figure',
'file',
'filecode',
'fill',
'filldraw',
'filloutside',
'fillrule',
'filltype',
'find',
'finite',
'finiteDifferenceJacobian',
'firstcut',
'firstframe',
'fit',
'fit2',
'fixedscaling',
'floor',
'flush',
'fmdefaults',
'fmod',
'focusToCenter',
'font',
'fontcommand',
'fontsize',
'foot',
'format',
'frac',
'frequency',
'fromCenter',
'fromFocus',
'fspline',
'functionshade',
'gamma',
'generate_random_backtrace',
'generateticks',
'gergonne',
'getc',
'getint',
'getpair',
'getreal',
'getstring',
'gettriple',
'gluon',
'gouraudshade',
'graph',
'graphic',
'gray',
'grestore',
'grid',
'grid3',
'gsave',
'halfbox',
'hatch',
'hdiffdiv',
'hermite',
'hex',
'histogram',
'history',
'hline',
'hprojection',
'hsv',
'hyperbola',
'hyperbolanodesnumber',
'hyperlink',
'hypot',
'identity',
'image',
'incenter',
'incentral',
'incircle',
'increasing',
'incrementposition',
'indexedTransform',
'indexedfigure',
'initXasyMode',
'initdefaults',
'input',
'inradius',
'insert',
'inside',
'integrate',
'interactive',
'interior',
'interp',
'interpolate',
'intersect',
'intersection',
'intersectionpoint',
'intersectionpoints',
'intersections',
'intouch',
'inverse',
'inversion',
'invisible',
'is3D',
'isDuplicate',
'isogonal',
'isogonalconjugate',
'isotomic',
'isotomicconjugate',
'isparabola',
'italic',
'item',
'key',
'kurtosis',
'kurtosisexcess',
'label',
'labelaxis',
'labelmargin',
'labelpath',
'labels',
'labeltick',
'labelx',
'labelx3',
'labely',
'labely3',
'labelz',
'labelz3',
'lastcut',
'latex',
'latitude',
'latticeshade',
'layer',
'layout',
'ldexp',
'leastsquares',
'legend',
'legenditem',
'length',
'lift',
'light',
'limits',
'line',
'linear',
'linecap',
'lineinversion',
'linejoin',
'linemargin',
'lineskip',
'linetype',
'linewidth',
'link',
'list',
'lm_enorm',
'lm_evaluate_default',
'lm_lmdif',
'lm_lmpar',
'lm_minimize',
'lm_print_default',
'lm_print_quiet',
'lm_qrfac',
'lm_qrsolv',
'locale',
'locate',
'locatefile',
'location',
'log',
'log10',
'log1p',
'logaxiscoverage',
'longitude',
'lookup',
'magnetize',
'makeNode',
'makedraw',
'makepen',
'map',
'margin',
'markangle',
'markangleradius',
'markanglespace',
'markarc',
'marker',
'markinterval',
'marknodes',
'markrightangle',
'markuniform',
'mass',
'masscenter',
'massformat',
'math',
'max',
'max3',
'maxbezier',
'maxbound',
'maxcoords',
'maxlength',
'maxratio',
'maxtimes',
'mean',
'medial',
'median',
'midpoint',
'min',
'min3',
'minbezier',
'minbound',
'minipage',
'minratio',
'mintimes',
'miterlimit',
'momArrowPath',
'momarrowsize',
'monotonic',
'multifigure',
'nativeformat',
'natural',
'needshipout',
'newl',
'newpage',
'newslide',
'newton',
'newtree',
'nextframe',
'nextnormal',
'nextpage',
'nib',
'nodabscissa',
'none',
'norm',
'normalvideo',
'notaknot',
'nowarn',
'numberpage',
'nurb',
'object',
'offset',
'onpath',
'opacity',
'opposite',
'orientation',
'orig_circlenodesnumber',
'orig_circlenodesnumber1',
'orig_draw',
'orig_ellipsenodesnumber',
'orig_ellipsenodesnumber1',
'orig_hyperbolanodesnumber',
'orig_parabolanodesnumber',
'origin',
'orthic',
'orthocentercenter',
'outformat',
'outline',
'outprefix',
'output',
'overloadedMessage',
'overwrite',
'pack',
'pad',
'pairs',
'palette',
'parabola',
'parabolanodesnumber',
'parallel',
'partialsum',
'path',
'path3',
'pattern',
'pause',
'pdf',
'pedal',
'periodic',
'perp',
'perpendicular',
'perpendicularmark',
'phantom',
'phi1',
'phi2',
'phi3',
'photon',
'piecewisestraight',
'point',
'polar',
'polarconicroutine',
'polargraph',
'polygon',
'postcontrol',
'postscript',
'pow10',
'ppoint',
'prc',
'prc0',
'precision',
'precontrol',
'prepend',
'print_random_addresses',
'project',
'projection',
'purge',
'pwhermite',
'quadrant',
'quadraticroots',
'quantize',
'quarticroots',
'quotient',
'radialshade',
'radians',
'radicalcenter',
'radicalline',
'radius',
'rand',
'randompath',
'rd',
'readline',
'realmult',
'realquarticroots',
'rectangle',
'rectangular',
'rectify',
'reflect',
'relabscissa',
'relative',
'relativedistance',
'reldir',
'relpoint',
'reltime',
'remainder',
'remark',
'removeDuplicates',
'rename',
'replace',
'report',
'resetdefaultpen',
'restore',
'restoredefaults',
'reverse',
'reversevideo',
'rf',
'rfind',
'rgb',
'rgba',
'rgbint',
'rms',
'rotate',
'rotateO',
'rotation',
'round',
'roundbox',
'roundedpath',
'roundrectangle',
'samecoordsys',
'sameside',
'sample',
'save',
'savedefaults',
'saveline',
'scale',
'scale3',
'scaleO',
'scaleT',
'scaleless',
'scientific',
'search',
'searchtree',
'sec',
'secondaryX',
'secondaryY',
'seconds',
'section',
'sector',
'seek',
'seekeof',
'segment',
'sequence',
'setpens',
'sgn',
'sgnd',
'sharpangle',
'sharpdegrees',
'shift',
'shiftless',
'shipout',
'shipout3',
'show',
'side',
'simeq',
'simpson',
'sin',
'single',
'sinh',
'size',
'size3',
'skewness',
'skip',
'slant',
'sleep',
'slope',
'slopefield',
'solve',
'solveBVP',
'sort',
'sourceline',
'sphere',
'split',
'sqrt',
'square',
'srand',
'standardizecoordsys',
'startScript',
'startTrembling',
'stdev',
'step',
'stickframe',
'stickmarksize',
'stickmarkspace',
'stop',
'straight',
'straightness',
'string',
'stripdirectory',
'stripextension',
'stripfile',
'strokepath',
'subdivide',
'subitem',
'subpath',
'substr',
'sum',
'surface',
'symmedial',
'symmedian',
'system',
'tab',
'tableau',
'tan',
'tangent',
'tangential',
'tangents',
'tanh',
'tell',
'tensionSpecifier',
'tensorshade',
'tex',
'texcolor',
'texify',
'texpath',
'texpreamble',
'texreset',
'texshipout',
'texsize',
'textpath',
'thick',
'thin',
'tick',
'tickMax',
'tickMax3',
'tickMin',
'tickMin3',
'ticklabelshift',
'ticklocate',
'tildeframe',
'tildemarksize',
'tile',
'tiling',
'time',
'times',
'title',
'titlepage',
'topbox',
'transform',
'transformation',
'transpose',
'tremble',
'trembleFuzz',
'tremble_circlenodesnumber',
'tremble_circlenodesnumber1',
'tremble_draw',
'tremble_ellipsenodesnumber',
'tremble_ellipsenodesnumber1',
'tremble_hyperbolanodesnumber',
'tremble_marknodes',
'tremble_markuniform',
'tremble_parabolanodesnumber',
'triangle',
'triangleAbc',
'triangleabc',
'triangulate',
'tricoef',
'tridiagonal',
'trilinear',
'trim',
'trueMagnetize',
'truepoint',
'tube',
'uncycle',
'unfill',
'uniform',
'unit',
'unitrand',
'unitsize',
'unityroot',
'unstraighten',
'upcase',
'updatefunction',
'uperiodic',
'upscale',
'uptodate',
'usepackage',
'usersetting',
'usetypescript',
'usleep',
'value',
'variance',
'variancebiased',
'vbox',
'vector',
'vectorfield',
'verbatim',
'view',
'vline',
'vperiodic',
'vprojection',
'warn',
'warning',
'windingnumber',
'write',
'xaxis',
'xaxis3',
'xaxis3At',
'xaxisAt',
'xequals',
'xinput',
'xlimits',
'xoutput',
'xpart',
'xscale',
'xscaleO',
'xtick',
'xtick3',
'xtrans',
'yaxis',
'yaxis3',
'yaxis3At',
'yaxisAt',
'yequals',
'ylimits',
'ypart',
'yscale',
'yscaleO',
'ytick',
'ytick3',
'ytrans',
'zaxis3',
'zaxis3At',
'zero',
'zero3',
'zlimits',
'zpart',
'ztick',
'ztick3',
'ztrans'
])
ASYVARNAME = set([
'AliceBlue',
'Align',
'Allow',
'AntiqueWhite',
'Apricot',
'Aqua',
'Aquamarine',
'Aspect',
'Azure',
'BeginPoint',
'Beige',
'Bisque',
'Bittersweet',
'Black',
'BlanchedAlmond',
'Blue',
'BlueGreen',
'BlueViolet',
'Both',
'Break',
'BrickRed',
'Brown',
'BurlyWood',
'BurntOrange',
'CCW',
'CW',
'CadetBlue',
'CarnationPink',
'Center',
'Centered',
'Cerulean',
'Chartreuse',
'Chocolate',
'Coeff',
'Coral',
'CornflowerBlue',
'Cornsilk',
'Crimson',
'Crop',
'Cyan',
'Dandelion',
'DarkBlue',
'DarkCyan',
'DarkGoldenrod',
'DarkGray',
'DarkGreen',
'DarkKhaki',
'DarkMagenta',
'DarkOliveGreen',
'DarkOrange',
'DarkOrchid',
'DarkRed',
'DarkSalmon',
'DarkSeaGreen',
'DarkSlateBlue',
'DarkSlateGray',
'DarkTurquoise',
'DarkViolet',
'DeepPink',
'DeepSkyBlue',
'DefaultHead',
'DimGray',
'DodgerBlue',
'Dotted',
'Draw',
'E',
'ENE',
'EPS',
'ESE',
'E_Euler',
'E_PC',
'E_RK2',
'E_RK3BS',
'Emerald',
'EndPoint',
'Euler',
'Fill',
'FillDraw',
'FireBrick',
'FloralWhite',
'ForestGreen',
'Fuchsia',
'Gainsboro',
'GhostWhite',
'Gold',
'Goldenrod',
'Gray',
'Green',
'GreenYellow',
'Honeydew',
'HookHead',
'Horizontal',
'HotPink',
'I',
'IgnoreAspect',
'IndianRed',
'Indigo',
'Ivory',
'JOIN_IN',
'JOIN_OUT',
'JungleGreen',
'Khaki',
'LM_DWARF',
'LM_MACHEP',
'LM_SQRT_DWARF',
'LM_SQRT_GIANT',
'LM_USERTOL',
'Label',
'Lavender',
'LavenderBlush',
'LawnGreen',
'LeftJustified',
'LeftSide',
'LemonChiffon',
'LightBlue',
'LightCoral',
'LightCyan',
'LightGoldenrodYellow',
'LightGreen',
'LightGrey',
'LightPink',
'LightSalmon',
'LightSeaGreen',
'LightSkyBlue',
'LightSlateGray',
'LightSteelBlue',
'LightYellow',
'Lime',
'LimeGreen',
'Linear',
'Linen',
'Log',
'Logarithmic',
'Magenta',
'Mahogany',
'Mark',
'MarkFill',
'Maroon',
'Max',
'MediumAquamarine',
'MediumBlue',
'MediumOrchid',
'MediumPurple',
'MediumSeaGreen',
'MediumSlateBlue',
'MediumSpringGreen',
'MediumTurquoise',
'MediumVioletRed',
'Melon',
'MidPoint',
'MidnightBlue',
'Min',
'MintCream',
'MistyRose',
'Moccasin',
'Move',
'MoveQuiet',
'Mulberry',
'N',
'NE',
'NNE',
'NNW',
'NW',
'NavajoWhite',
'Navy',
'NavyBlue',
'NoAlign',
'NoCrop',
'NoFill',
'NoSide',
'OldLace',
'Olive',
'OliveDrab',
'OliveGreen',
'Orange',
'OrangeRed',
'Orchid',
'Ox',
'Oy',
'PC',
'PaleGoldenrod',
'PaleGreen',
'PaleTurquoise',
'PaleVioletRed',
'PapayaWhip',
'Peach',
'PeachPuff',
'Periwinkle',
'Peru',
'PineGreen',
'Pink',
'Plum',
'PowderBlue',
'ProcessBlue',
'Purple',
'RK2',
'RK3',
'RK3BS',
'RK4',
'RK5',
'RK5DP',
'RK5F',
'RawSienna',
'Red',
'RedOrange',
'RedViolet',
'Rhodamine',
'RightJustified',
'RightSide',
'RosyBrown',
'RoyalBlue',
'RoyalPurple',
'RubineRed',
'S',
'SE',
'SSE',
'SSW',
'SW',
'SaddleBrown',
'Salmon',
'SandyBrown',
'SeaGreen',
'Seashell',
'Sepia',
'Sienna',
'Silver',
'SimpleHead',
'SkyBlue',
'SlateBlue',
'SlateGray',
'Snow',
'SpringGreen',
'SteelBlue',
'Suppress',
'SuppressQuiet',
'Tan',
'TeXHead',
'Teal',
'TealBlue',
'Thistle',
'Ticksize',
'Tomato',
'Turquoise',
'UnFill',
'VERSION',
'Value',
'Vertical',
'Violet',
'VioletRed',
'W',
'WNW',
'WSW',
'Wheat',
'White',
'WhiteSmoke',
'WildStrawberry',
'XYAlign',
'YAlign',
'Yellow',
'YellowGreen',
'YellowOrange',
'addpenarc',
'addpenline',
'align',
'allowstepping',
'angularsystem',
'animationdelay',
'appendsuffix',
'arcarrowangle',
'arcarrowfactor',
'arrow2sizelimit',
'arrowangle',
'arrowbarb',
'arrowdir',
'arrowfactor',
'arrowhookfactor',
'arrowlength',
'arrowsizelimit',
'arrowtexfactor',
'authorpen',
'axis',
'axiscoverage',
'axislabelfactor',
'background',
'backgroundcolor',
'backgroundpen',
'barfactor',
'barmarksizefactor',
'basealign',
'baselinetemplate',
'beveljoin',
'bigvertexpen',
'bigvertexsize',
'black',
'blue',
'bm',
'bottom',
'bp',
'brown',
'bullet',
'byfoci',
'byvertices',
'camerafactor',
'chartreuse',
'circlemarkradiusfactor',
'circlenodesnumberfactor',
'circleprecision',
'circlescale',
'cm',
'codefile',
'codepen',
'codeskip',
'colorPen',
'coloredNodes',
'coloredSegments',
'conditionlength',
'conicnodesfactor',
'count',
'cputimeformat',
'crossmarksizefactor',
'currentcoordsys',
'currentlight',
'currentpatterns',
'currentpen',
'currentpicture',
'currentposition',
'currentprojection',
'curvilinearsystem',
'cuttings',
'cyan',
'darkblue',
'darkbrown',
'darkcyan',
'darkgray',
'darkgreen',
'darkgrey',
'darkmagenta',
'darkolive',
'darkred',
'dashdotted',
'dashed',
'datepen',
'dateskip',
'debuggerlines',
'debugging',
'deepblue',
'deepcyan',
'deepgray',
'deepgreen',
'deepgrey',
'deepmagenta',
'deepred',
'default',
'defaultControl',
'defaultS',
'defaultbackpen',
'defaultcoordsys',
'defaultfilename',
'defaultformat',
'defaultmassformat',
'defaultpen',
'diagnostics',
'differentlengths',
'dot',
'dotfactor',
'dotframe',
'dotted',
'doublelinepen',
'doublelinespacing',
'down',
'duplicateFuzz',
'ellipsenodesnumberfactor',
'eps',
'epsgeo',
'epsilon',
'evenodd',
'extendcap',
'fermionpen',
'figureborder',
'figuremattpen',
'firstnode',
'firststep',
'foregroundcolor',
'fuchsia',
'fuzz',
'gapfactor',
'ghostpen',
'gluonamplitude',
'gluonpen',
'gluonratio',
'gray',
'green',
'grey',
'hatchepsilon',
'havepagenumber',
'heavyblue',
'heavycyan',
'heavygray',
'heavygreen',
'heavygrey',
'heavymagenta',
'heavyred',
'hline',
'hwratio',
'hyperbolanodesnumberfactor',
'identity4',
'ignore',
'inXasyMode',
'inch',
'inches',
'includegraphicscommand',
'inf',
'infinity',
'institutionpen',
'intMax',
'intMin',
'invert',
'invisible',
'itempen',
'itemskip',
'itemstep',
'labelmargin',
'landscape',
'lastnode',
'left',
'legendhskip',
'legendlinelength',
'legendmargin',
'legendmarkersize',
'legendmaxrelativewidth',
'legendvskip',
'lightblue',
'lightcyan',
'lightgray',
'lightgreen',
'lightgrey',
'lightmagenta',
'lightolive',
'lightred',
'lightyellow',
'linemargin',
'lm_infmsg',
'lm_shortmsg',
'longdashdotted',
'longdashed',
'magenta',
'magneticPoints',
'magneticRadius',
'mantissaBits',
'markangleradius',
'markangleradiusfactor',
'markanglespace',
'markanglespacefactor',
'mediumblue',
'mediumcyan',
'mediumgray',
'mediumgreen',
'mediumgrey',
'mediummagenta',
'mediumred',
'mediumyellow',
'middle',
'minDistDefault',
'minblockheight',
'minblockwidth',
'mincirclediameter',
'minipagemargin',
'minipagewidth',
'minvertexangle',
'miterjoin',
'mm',
'momarrowfactor',
'momarrowlength',
'momarrowmargin',
'momarrowoffset',
'momarrowpen',
'monoPen',
'morepoints',
'nCircle',
'newbulletcolor',
'ngraph',
'nil',
'nmesh',
'nobasealign',
'nodeMarginDefault',
'nodesystem',
'nomarker',
'nopoint',
'noprimary',
'nullpath',
'nullpen',
'numarray',
'ocgindex',
'oldbulletcolor',
'olive',
'orange',
'origin',
'overpaint',
'page',
'pageheight',
'pagemargin',
'pagenumberalign',
'pagenumberpen',
'pagenumberposition',
'pagewidth',
'paleblue',
'palecyan',
'palegray',
'palegreen',
'palegrey',
'palemagenta',
'palered',
'paleyellow',
'parabolanodesnumberfactor',
'perpfactor',
'phi',
'photonamplitude',
'photonpen',
'photonratio',
'pi',
'pink',
'plain',
'plus',
'preamblenodes',
'pt',
'purple',
'r3',
'r4a',
'r4b',
'randMax',
'realDigits',
'realEpsilon',
'realMax',
'realMin',
'red',
'relativesystem',
'reverse',
'right',
'roundcap',
'roundjoin',
'royalblue',
'salmon',
'saveFunctions',
'scalarpen',
'sequencereal',
'settings',
'shipped',
'signedtrailingzero',
'solid',
'springgreen',
'sqrtEpsilon',
'squarecap',
'squarepen',
'startposition',
'stdin',
'stdout',
'stepfactor',
'stepfraction',
'steppagenumberpen',
'stepping',
'stickframe',
'stickmarksizefactor',
'stickmarkspacefactor',
'textpen',
'ticksize',
'tildeframe',
'tildemarksizefactor',
'tinv',
'titlealign',
'titlepagepen',
'titlepageposition',
'titlepen',
'titleskip',
'top',
'trailingzero',
'treeLevelStep',
'treeMinNodeWidth',
'treeNodeStep',
'trembleAngle',
'trembleFrequency',
'trembleRandom',
'tremblingMode',
'undefined',
'unitcircle',
'unitsquare',
'up',
'urlpen',
'urlskip',
'version',
'vertexpen',
'vertexsize',
'viewportmargin',
'viewportsize',
'vline',
'white',
'wye',
'xformStack',
'yellow',
'ylabelwidth',
'zerotickfuzz',
'zerowinding'
])
| gpl-2.0 |
Cynerd/mcserver-wrapper | mcwrapper/wrapper.py | 2 | 4051 | # vim: expandtab ft=python ts=4 sw=4 sts=4:
import os
import sys
import subprocess
import time
from threading import Thread
from .import prints
__INPUTPIPE__ = 'input_pipe'
__PIDFILE__ = 'server.pid'
class MCWrapper:
"Minecraft server wrapper class"
def __init__(self, command):
self.process = None
self.command = command
self._running = False
self._hook_start = []
self._hook_stop = []
self._hook_line = []
prints.info("Server wrapper initializing")
if os.path.isfile(__PIDFILE__):
with open(__PIDFILE__) as file:
lpid = int(file.readline())
try:
os.kill(lpid, 0)
except OSError:
prints.warning("Detected forced termination of previous server"
" wrapper instance.")
else:
prints.error("Another wrapper is running with given "
"identifier.", -1, 1)
try:
os.mkfifo(__INPUTPIPE__, 0o640)
except FileExistsError:
pass
self.inputthread = Thread(target=self.__input_thread__,
daemon=True)
self.outputhread = Thread(target=self.__output_thread__,
daemon=True)
def clean(self):
"Cleans files generated by wrapper"
prints.info("Server wrapper clean.")
try:
os.remove(__INPUTPIPE__)
except FileNotFoundError:
pass
try:
os.remove(__PIDFILE__)
except FileNotFoundError:
pass
def start(self):
"Start Minecraft server"
self.process = subprocess.Popen(
self.command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
start_new_session=False)
for h in self._hook_start:
h()
self._running = True
with open(__PIDFILE__, "w") as file:
file.write(str(self.process.pid))
if not self.inputthread.is_alive():
self.inputthread.start()
if not self.outputhread.is_alive():
self.outputhread.start()
def stop(self):
"Sends /stop command to Minecraft server"
if self.running():
self.process.stdin.write(bytes(
"/stop\n", sys.getdefaultencoding()))
self.process.stdin.flush()
self._running = False
def running(self):
"Returns True if mc server is running. Othervise False."
return True
def write_to_terminal(self, text):
"Write to server terminal. If server not running it does nothing"
if self._running:
prints.info("Input: " + text.rstrip(), 1)
self.process.stdin.write(bytes(text, sys.getdefaultencoding()))
self.process.stdin.flush()
return True
else:
return False
def hook_start(self, handler):
self._hook_start.append(handler)
def hook_stop(self, handler):
self._hook_stop.append(handler)
def hook_line(self, contains, handler):
n = dict()
n["contains"] = contains
n["handler"] = handler
self._hook_line.append(n)
def __parse_line__(self, line):
i = 0
while i < len(self._hook_line):
if self._hook_line[i]["contains"] in line:
self._hook_line[i]["handler"](line)
i += 1
def __output_thread__(self):
for linen in self.process.stdout:
line = linen.decode(sys.getdefaultencoding())
prints.info(line.rstrip(), 2, notime=True)
self.__parse_line__(line.rstrip())
def __input_thread__(self):
with open(__INPUTPIPE__, 'r') as pipe:
while True:
line = pipe.readline().rstrip()
# TODO use polling
if line:
self.write_to_terminal(line + "\n")
else:
time.sleep(3)
| gpl-2.0 |
frreiss/tensorflow-fred | tensorflow/python/lib/io/tf_record.py | 9 | 11773 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""For reading and writing TFRecords files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import _pywrap_record_io
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export(
v1=["io.TFRecordCompressionType", "python_io.TFRecordCompressionType"])
@deprecation.deprecated_endpoints("io.TFRecordCompressionType",
"python_io.TFRecordCompressionType")
class TFRecordCompressionType(object):
"""The type of compression for the record."""
NONE = 0
ZLIB = 1
GZIP = 2
@tf_export(
"io.TFRecordOptions",
v1=["io.TFRecordOptions", "python_io.TFRecordOptions"])
@deprecation.deprecated_endpoints("python_io.TFRecordOptions")
class TFRecordOptions(object):
"""Options used for manipulating TFRecord files."""
compression_type_map = {
TFRecordCompressionType.ZLIB: "ZLIB",
TFRecordCompressionType.GZIP: "GZIP",
TFRecordCompressionType.NONE: ""
}
def __init__(self,
compression_type=None,
flush_mode=None,
input_buffer_size=None,
output_buffer_size=None,
window_bits=None,
compression_level=None,
compression_method=None,
mem_level=None,
compression_strategy=None):
# pylint: disable=line-too-long
"""Creates a `TFRecordOptions` instance.
Options only effect TFRecordWriter when compression_type is not `None`.
Documentation, details, and defaults can be found in
[`zlib_compression_options.h`](https://www.tensorflow.org/code/tensorflow/core/lib/io/zlib_compression_options.h)
and in the [zlib manual](http://www.zlib.net/manual.html).
Leaving an option as `None` allows C++ to set a reasonable default.
Args:
compression_type: `"GZIP"`, `"ZLIB"`, or `""` (no compression).
flush_mode: flush mode or `None`, Default: Z_NO_FLUSH.
input_buffer_size: int or `None`.
output_buffer_size: int or `None`.
window_bits: int or `None`.
compression_level: 0 to 9, or `None`.
compression_method: compression method or `None`.
mem_level: 1 to 9, or `None`.
compression_strategy: strategy or `None`. Default: Z_DEFAULT_STRATEGY.
Returns:
A `TFRecordOptions` object.
Raises:
ValueError: If compression_type is invalid.
"""
# pylint: enable=line-too-long
# Check compression_type is valid, but for backwards compatibility don't
# immediately convert to a string.
self.get_compression_type_string(compression_type)
self.compression_type = compression_type
self.flush_mode = flush_mode
self.input_buffer_size = input_buffer_size
self.output_buffer_size = output_buffer_size
self.window_bits = window_bits
self.compression_level = compression_level
self.compression_method = compression_method
self.mem_level = mem_level
self.compression_strategy = compression_strategy
@classmethod
def get_compression_type_string(cls, options):
"""Convert various option types to a unified string.
Args:
options: `TFRecordOption`, `TFRecordCompressionType`, or string.
Returns:
Compression type as string (e.g. `'ZLIB'`, `'GZIP'`, or `''`).
Raises:
ValueError: If compression_type is invalid.
"""
if not options:
return ""
elif isinstance(options, TFRecordOptions):
return cls.get_compression_type_string(options.compression_type)
elif isinstance(options, TFRecordCompressionType):
return cls.compression_type_map[options]
elif options in TFRecordOptions.compression_type_map:
return cls.compression_type_map[options]
elif options in TFRecordOptions.compression_type_map.values():
return options
else:
raise ValueError('Not a valid compression_type: "{}"'.format(options))
def _as_record_writer_options(self):
"""Convert to RecordWriterOptions for use with PyRecordWriter."""
options = _pywrap_record_io.RecordWriterOptions(
compat.as_bytes(
self.get_compression_type_string(self.compression_type)))
if self.flush_mode is not None:
options.zlib_options.flush_mode = self.flush_mode
if self.input_buffer_size is not None:
options.zlib_options.input_buffer_size = self.input_buffer_size
if self.output_buffer_size is not None:
options.zlib_options.output_buffer_size = self.output_buffer_size
if self.window_bits is not None:
options.zlib_options.window_bits = self.window_bits
if self.compression_level is not None:
options.zlib_options.compression_level = self.compression_level
if self.compression_method is not None:
options.zlib_options.compression_method = self.compression_method
if self.mem_level is not None:
options.zlib_options.mem_level = self.mem_level
if self.compression_strategy is not None:
options.zlib_options.compression_strategy = self.compression_strategy
return options
@tf_export(v1=["io.tf_record_iterator", "python_io.tf_record_iterator"])
@deprecation.deprecated(
date=None,
instructions=("Use eager execution and: \n"
"`tf.data.TFRecordDataset(path)`"))
def tf_record_iterator(path, options=None):
"""An iterator that read the records from a TFRecords file.
Args:
path: The path to the TFRecords file.
options: (optional) A TFRecordOptions object.
Returns:
An iterator of serialized TFRecords.
Raises:
IOError: If `path` cannot be opened for reading.
"""
compression_type = TFRecordOptions.get_compression_type_string(options)
return _pywrap_record_io.RecordIterator(path, compression_type)
def tf_record_random_reader(path):
"""Creates a reader that allows random-access reads from a TFRecords file.
The created reader object has the following method:
- `read(offset)`, which returns a tuple of `(record, ending_offset)`, where
`record` is the TFRecord read at the offset, and
`ending_offset` is the ending offset of the read record.
The method throws a `tf.errors.DataLossError` if data is corrupted at
the given offset. The method throws `IndexError` if the offset is out of
range for the TFRecords file.
Usage example:
```py
reader = tf_record_random_reader(file_path)
record_1, offset_1 = reader.read(0) # 0 is the initial offset.
# offset_1 is the ending offset of the 1st record and the starting offset of
# the next.
record_2, offset_2 = reader.read(offset_1)
# offset_2 is the ending offset of the 2nd record and the starting offset of
# the next.
# We can jump back and read the first record again if so desired.
reader.read(0)
```
Args:
path: The path to the TFRecords file.
Returns:
An object that supports random-access reading of the serialized TFRecords.
Raises:
IOError: If `path` cannot be opened for reading.
"""
return _pywrap_record_io.RandomRecordReader(path)
@tf_export(
"io.TFRecordWriter", v1=["io.TFRecordWriter", "python_io.TFRecordWriter"])
@deprecation.deprecated_endpoints("python_io.TFRecordWriter")
class TFRecordWriter(_pywrap_record_io.RecordWriter):
"""A class to write records to a TFRecords file.
[TFRecords tutorial](https://www.tensorflow.org/tutorials/load_data/tfrecord)
TFRecords is a binary format which is optimized for high throughput data
retrieval, generally in conjunction with `tf.data`. `TFRecordWriter` is used
to write serialized examples to a file for later consumption. The key steps
are:
Ahead of time:
- [Convert data into a serialized format](
https://www.tensorflow.org/tutorials/load_data/tfrecord#tfexample)
- [Write the serialized data to one or more files](
https://www.tensorflow.org/tutorials/load_data/tfrecord#tfrecord_files_in_python)
During training or evaluation:
- [Read serialized examples into memory](
https://www.tensorflow.org/tutorials/load_data/tfrecord#reading_a_tfrecord_file)
- [Parse (deserialize) examples](
https://www.tensorflow.org/tutorials/load_data/tfrecord#reading_a_tfrecord_file)
A minimal example is given below:
>>> import tempfile
>>> example_path = os.path.join(tempfile.gettempdir(), "example.tfrecords")
>>> np.random.seed(0)
>>> # Write the records to a file.
... with tf.io.TFRecordWriter(example_path) as file_writer:
... for _ in range(4):
... x, y = np.random.random(), np.random.random()
...
... record_bytes = tf.train.Example(features=tf.train.Features(feature={
... "x": tf.train.Feature(float_list=tf.train.FloatList(value=[x])),
... "y": tf.train.Feature(float_list=tf.train.FloatList(value=[y])),
... })).SerializeToString()
... file_writer.write(record_bytes)
>>> # Read the data back out.
>>> def decode_fn(record_bytes):
... return tf.io.parse_single_example(
... # Data
... record_bytes,
...
... # Schema
... {"x": tf.io.FixedLenFeature([], dtype=tf.float32),
... "y": tf.io.FixedLenFeature([], dtype=tf.float32)}
... )
>>> for batch in tf.data.TFRecordDataset([example_path]).map(decode_fn):
... print("x = {x:.4f}, y = {y:.4f}".format(**batch))
x = 0.5488, y = 0.7152
x = 0.6028, y = 0.5449
x = 0.4237, y = 0.6459
x = 0.4376, y = 0.8918
This class implements `__enter__` and `__exit__`, and can be used
in `with` blocks like a normal file. (See the usage example above.)
"""
# TODO(josh11b): Support appending?
def __init__(self, path, options=None):
"""Opens file `path` and creates a `TFRecordWriter` writing to it.
Args:
path: The path to the TFRecords file.
options: (optional) String specifying compression type,
`TFRecordCompressionType`, or `TFRecordOptions` object.
Raises:
IOError: If `path` cannot be opened for writing.
ValueError: If valid compression_type can't be determined from `options`.
"""
if not isinstance(options, TFRecordOptions):
options = TFRecordOptions(compression_type=options)
# pylint: disable=protected-access
super(TFRecordWriter, self).__init__(
compat.as_bytes(path), options._as_record_writer_options())
# pylint: enable=protected-access
# TODO(slebedev): The following wrapper methods are there to compensate
# for lack of signatures in pybind11-generated classes. Switch to
# __text_signature__ when TensorFlow drops Python 2.X support.
# See https://github.com/pybind/pybind11/issues/945
# pylint: disable=useless-super-delegation
def write(self, record):
"""Write a string record to the file.
Args:
record: str
"""
super(TFRecordWriter, self).write(record)
def flush(self):
"""Flush the file."""
super(TFRecordWriter, self).flush()
def close(self):
"""Close the file."""
super(TFRecordWriter, self).close()
# pylint: enable=useless-super-delegation
| apache-2.0 |
basvandijk/nixops | nixops/backends/container.py | 4 | 7973 | # -*- coding: utf-8 -*-
from nixops.backends import MachineDefinition, MachineState
import nixops.util
import nixops.ssh_util
import subprocess
class ContainerDefinition(MachineDefinition):
"""Definition of a NixOS container."""
@classmethod
def get_type(cls):
return "container"
def __init__(self, xml, config):
MachineDefinition.__init__(self, xml, config)
x = xml.find("attrs/attr[@name='container']/attrs")
assert x is not None
self.host = x.find("attr[@name='host']/string").get("value")
class ContainerState(MachineState):
"""State of a NixOS container."""
@classmethod
def get_type(cls):
return "container"
state = nixops.util.attr_property("state", MachineState.MISSING, int) # override
private_ipv4 = nixops.util.attr_property("privateIpv4", None)
host = nixops.util.attr_property("container.host", None)
client_private_key = nixops.util.attr_property("container.clientPrivateKey", None)
client_public_key = nixops.util.attr_property("container.clientPublicKey", None)
public_host_key = nixops.util.attr_property("container.publicHostKey", None)
def __init__(self, depl, name, id):
MachineState.__init__(self, depl, name, id)
self.host_ssh = nixops.ssh_util.SSH(self.logger)
self.host_ssh.register_host_fun(self.get_host_ssh)
self.host_ssh.register_flag_fun(self.get_host_ssh_flags)
@property
def resource_id(self):
return self.vm_id
def address_to(self, m):
if isinstance(m, ContainerState) and self.host == m.host:
return m.private_ipv4
return MachineState.address_to(self, m)
def get_ssh_name(self):
assert self.private_ipv4
if self.host == "localhost":
return self.private_ipv4
else:
return self.get_host_ssh() + "~" + self.private_ipv4
def get_ssh_private_key_file(self):
return self._ssh_private_key_file or self.write_ssh_private_key(self.client_private_key)
def get_ssh_flags(self):
# When using a remote container host, we have to proxy the ssh
# connection to the container via the host.
flags = ["-i", self.get_ssh_private_key_file()]
if self.host == "localhost":
flags.extend(MachineState.get_ssh_flags(self))
else:
cmd = "ssh -x -a root@{0} {1} nc -c {2} {3}".format(self.get_host_ssh(), " ".join(self.get_host_ssh_flags()), self.private_ipv4, self.ssh_port)
flags.extend(["-o", "ProxyCommand=" + cmd])
return flags
def get_ssh_for_copy_closure(self):
# NixOS containers share the Nix store of the host, so we
# should copy closures to the host.
return self.host_ssh
def copy_closure_to(self, path):
if self.host == "localhost": return
MachineState.copy_closure_to(self, path)
def get_host_ssh(self):
if self.host.startswith("__machine-"):
m = self.depl.get_machine(self.host[10:])
if not m.started:
raise Exception("host machine ‘{0}’ of container ‘{1}’ is not up".format(m.name, self.name))
return m.get_ssh_name()
else:
return self.host
def get_host_ssh_flags(self):
if self.host.startswith("__machine-"):
m = self.depl.get_machine(self.host[10:])
if not m.started:
raise Exception("host machine ‘{0}’ of container ‘{1}’ is not up".format(m.name, self.name))
return m.get_ssh_flags()
else:
return []
def wait_for_ssh(self, check=False):
return True
# Run a command in the container via ‘nixos-container run’. Since
# this uses ‘nsenter’, we don't need SSH in the container.
def run_command(self, command, **kwargs):
command = command.replace("'", r"'\''")
return self.host_ssh.run_command(
"nixos-container run {0} -- bash --login -c 'HOME=/root {1}'".format(self.vm_id, command),
**kwargs)
def get_physical_spec(self):
return {('users', 'extraUsers', 'root', 'openssh', 'authorizedKeys', 'keys'): [self.client_public_key]}
def create_after(self, resources, defn):
host = defn.host if defn else self.host
if host and host.startswith("__machine-"):
return {self.depl.get_machine(host[10:])}
else:
return {}
def create(self, defn, check, allow_reboot, allow_recreate):
assert isinstance(defn, ContainerDefinition)
if not self.client_private_key:
(self.client_private_key, self.client_public_key) = nixops.util.create_key_pair()
if self.vm_id is None:
self.log("building initial configuration...")
expr = " ".join([
'{ imports = [ <nixops/container-base.nix> ];',
' boot.isContainer = true;',
' networking.hostName = "{0}";'.format(self.name),
' users.extraUsers.root.openssh.authorizedKeys.keys = [ "{0}" ];'.format(self.client_public_key),
'}'])
expr_file = self.depl.tempdir + "/{0}-initial.nix".format(self.name)
nixops.util.write_file(expr_file, expr)
path = subprocess.check_output(
["nix-build", "<nixpkgs/nixos>", "-A", "system",
"-I", "nixos-config={0}".format(expr_file)]
+ self.depl._nix_path_flags()).rstrip()
self.log("creating container...")
self.host = defn.host
self.copy_closure_to(path)
self.vm_id = self.host_ssh.run_command(
"nixos-container create {0} --ensure-unique-name --system-path '{1}'"
.format(self.name[:7], path), capture_stdout=True).rstrip()
self.state = self.STOPPED
if self.state == self.STOPPED:
self.host_ssh.run_command("nixos-container start {0}".format(self.vm_id))
self.state = self.UP
if self.private_ipv4 is None:
self.private_ipv4 = self.host_ssh.run_command("nixos-container show-ip {0}".format(self.vm_id), capture_stdout=True).rstrip()
self.log("IP address is {0}".format(self.private_ipv4))
if self.public_host_key is None:
self.public_host_key = self.host_ssh.run_command("nixos-container show-host-key {0}".format(self.vm_id), capture_stdout=True).rstrip()
nixops.known_hosts.add(self.get_ssh_name(), self.public_host_key)
def destroy(self, wipe=False):
if not self.vm_id: return True
if not self.depl.logger.confirm("are you sure you want to destroy NixOS container ‘{0}’?".format(self.name)): return False
nixops.known_hosts.remove(self.get_ssh_name(), self.public_host_key)
self.host_ssh.run_command("nixos-container destroy {0}".format(self.vm_id))
return True
def stop(self):
if not self.vm_id: return True
self.log("stopping container...")
self.state = self.STOPPING
self.host_ssh.run_command("nixos-container stop {0}".format(self.vm_id))
self.state = self.STOPPED
def start(self):
if not self.vm_id: return True
self.log("starting container...")
self.host_ssh.run_command("nixos-container start {0}".format(self.vm_id))
self.state = self.STARTING
def _check(self, res):
if not self.vm_id:
res.exists = False
return
status = self.host_ssh.run_command("nixos-container status {0}".format(self.vm_id), capture_stdout=True).rstrip()
if status == "gone":
res.exists = False
self.state = self.MISSING
return
res.exists = True
if status == "down":
res.is_up = False
self.state = self.STOPPED
return
res.is_up = True
MachineState._check(self, res)
| lgpl-3.0 |
x303597316/hue | desktop/core/ext-py/requests-2.6.0/requests/packages/chardet/jpcntx.py | 1777 | 19348 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .compat import wrap_ord
NUM_OF_CATEGORY = 6
DONT_KNOW = -1
ENOUGH_REL_THRESHOLD = 100
MAX_REL_THRESHOLD = 1000
MINIMUM_DATA_THRESHOLD = 4
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
jp2CharContext = (
(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
)
class JapaneseContextAnalysis:
def __init__(self):
self.reset()
def reset(self):
self._mTotalRel = 0 # total sequence received
# category counters, each interger counts sequence in its category
self._mRelSample = [0] * NUM_OF_CATEGORY
# if last byte in current buffer is not the last byte of a character,
# we need to know how many bytes to skip in next buffer
self._mNeedToSkipCharNum = 0
self._mLastCharOrder = -1 # The order of previous char
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
def feed(self, aBuf, aLen):
if self._mDone:
return
# The buffer we got is byte oriented, and a character may span in more than one
# buffers. In case the last one or two byte in last buffer is not
# complete, we record how many byte needed to complete that character
# and skip these bytes here. We can choose to record those bytes as
# well and analyse the character once it is complete, but since a
# character will not make much difference, by simply skipping
# this character will simply our logic and improve performance.
i = self._mNeedToSkipCharNum
while i < aLen:
order, charLen = self.get_order(aBuf[i:i + 2])
i += charLen
if i > aLen:
self._mNeedToSkipCharNum = i - aLen
self._mLastCharOrder = -1
else:
if (order != -1) and (self._mLastCharOrder != -1):
self._mTotalRel += 1
if self._mTotalRel > MAX_REL_THRESHOLD:
self._mDone = True
break
self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1
self._mLastCharOrder = order
def got_enough_data(self):
return self._mTotalRel > ENOUGH_REL_THRESHOLD
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._mTotalRel > MINIMUM_DATA_THRESHOLD:
return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
else:
return DONT_KNOW
def get_order(self, aBuf):
return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis):
def __init__(self):
self.charset_name = "SHIFT_JIS"
def get_charset_name(self):
return self.charset_name
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)):
charLen = 2
if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
self.charset_name = "CP932"
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 202) and (0x9F <= second_char <= 0xF1):
return second_char - 0x9F, charLen
return -1, charLen
class EUCJPContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
charLen = 2
elif first_char == 0x8F:
charLen = 3
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
return second_char - 0xA1, charLen
return -1, charLen
# flake8: noqa
| apache-2.0 |
edx/edx-analytics-dashboard | common/tests/course_fixtures.py | 1 | 4343 | import uuid
class CourseStructureAPIFixtureMixin:
"""Represents a course that can serialize itself in the form generated by the Course Structure API."""
# pylint: disable=unused-argument
def __init__(self, *args, **kwargs):
self._uuid = uuid.uuid4().hex
self._type = None
self._display_name = kwargs.get('display_name', '')
self.graded = False
self._assignment_type = None
self._children = []
# Course data
self.org = None
self.course = None
self.run = None
def __repr__(self):
return self.id
def __getitem__(self, item):
"""
Allows course structure fixture objects to be treated like the
dict data structures they represent.
"""
return self.to_dict().__getitem__(item)
@property
def children(self):
return self._children
def to_dict(self):
"""Return a dict representation of this block in the form generated by the Course Structure API."""
return {
'id': self.id,
'type': self._type,
'display_name': self._display_name,
'graded': self.graded,
'format': self._assignment_type,
'children': [child.id for child in self._children]
}
def add_children(self, *children):
"""Add children to this block"""
def _add_course_info(parent, child):
"""
Children should inherit and cache org, course, and run for ID generation.
'graded' is also inherited.
"""
child.org = parent.org
child.course = parent.course
child.run = parent.run
child.graded = parent.graded
self._children += children
self.pre_order(visit_fn=_add_course_info)
return self
def pre_order(self, visit_fn=None):
"""Return the fixture rooted at `self` as a list visited in pre-order."""
visited = [self]
for child in self._children:
if visit_fn:
visit_fn(self, child)
visited += child.pre_order(visit_fn=visit_fn)
return visited
@property
def id(self):
"""Uniquely identifies this block in the format used by the Course Structure API."""
return 'i4x://{org}/{course}/{type}/{_uuid}'.format(
org=self.org, course=self.course, type=self._type, _uuid=self._uuid
)
class CourseFixture(CourseStructureAPIFixtureMixin):
"""Represents a course as returned by the Course Structure API."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._type = 'course'
self.org = kwargs.get('org', 'test_org')
self.course = kwargs.get('course', 'test_course')
self.run = kwargs.get('run', 'test_run')
self._uuid = self.run # The org/course/run triple uniquely identifies the course
def course_structure(self):
"""Return a dict representing this course in the form generated by the Course Structure API."""
return {
'root': self.id,
'blocks': {
child.id: child.to_dict()
for child in self.pre_order()
}
}
class ChapterFixture(CourseStructureAPIFixtureMixin):
"""Represents a chapter as returned by the Course Structure API."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._type = 'chapter'
class SequentialFixture(CourseStructureAPIFixtureMixin):
"""Represents a sequential as returned by the Course Structure API."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.graded = kwargs.get('graded', False)
self._assignment_type = kwargs.get('assignment_type')
self._type = 'sequential'
class VerticalFixture(CourseStructureAPIFixtureMixin):
"""Represents a vertical as returned by the Course Structure API."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._type = 'vertical'
class VideoFixture(CourseStructureAPIFixtureMixin):
"""Represents a video as returned by the Course Structure API."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._type = 'video'
| agpl-3.0 |
ProjectSWGCore/NGECore2 | scripts/mobiles/instances/tuskenking/tusken/tusken_bantha.py | 2 | 1352 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('heroic_tusken_bantha')
mobileTemplate.setLevel(90)
mobileTemplate.setDifficulty(Difficulty.ELITE)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("heroic tusken")
mobileTemplate.setAssistRange(12)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_bantha_hue.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('heroic_tusken_bantha', mobileTemplate)
return | lgpl-3.0 |
hynnet/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/test_cmath.py | 84 | 18637 | from test.test_support import run_unittest
from test.test_math import parse_testfile, test_file
import unittest
import cmath, math
from cmath import phase, polar, rect, pi
INF = float('inf')
NAN = float('nan')
complex_zeros = [complex(x, y) for x in [0.0, -0.0] for y in [0.0, -0.0]]
complex_infinities = [complex(x, y) for x, y in [
(INF, 0.0), # 1st quadrant
(INF, 2.3),
(INF, INF),
(2.3, INF),
(0.0, INF),
(-0.0, INF), # 2nd quadrant
(-2.3, INF),
(-INF, INF),
(-INF, 2.3),
(-INF, 0.0),
(-INF, -0.0), # 3rd quadrant
(-INF, -2.3),
(-INF, -INF),
(-2.3, -INF),
(-0.0, -INF),
(0.0, -INF), # 4th quadrant
(2.3, -INF),
(INF, -INF),
(INF, -2.3),
(INF, -0.0)
]]
complex_nans = [complex(x, y) for x, y in [
(NAN, -INF),
(NAN, -2.3),
(NAN, -0.0),
(NAN, 0.0),
(NAN, 2.3),
(NAN, INF),
(-INF, NAN),
(-2.3, NAN),
(-0.0, NAN),
(0.0, NAN),
(2.3, NAN),
(INF, NAN)
]]
class CMathTests(unittest.TestCase):
# list of all functions in cmath
test_functions = [getattr(cmath, fname) for fname in [
'acos', 'acosh', 'asin', 'asinh', 'atan', 'atanh',
'cos', 'cosh', 'exp', 'log', 'log10', 'sin', 'sinh',
'sqrt', 'tan', 'tanh']]
# test first and second arguments independently for 2-argument log
test_functions.append(lambda x : cmath.log(x, 1729. + 0j))
test_functions.append(lambda x : cmath.log(14.-27j, x))
def setUp(self):
self.test_values = open(test_file)
def tearDown(self):
self.test_values.close()
def rAssertAlmostEqual(self, a, b, rel_err = 2e-15, abs_err = 5e-323,
msg=None):
"""Fail if the two floating-point numbers are not almost equal.
Determine whether floating-point values a and b are equal to within
a (small) rounding error. The default values for rel_err and
abs_err are chosen to be suitable for platforms where a float is
represented by an IEEE 754 double. They allow an error of between
9 and 19 ulps.
"""
# special values testing
if math.isnan(a):
if math.isnan(b):
return
self.fail(msg or '{!r} should be nan'.format(b))
if math.isinf(a):
if a == b:
return
self.fail(msg or 'finite result where infinity expected: '
'expected {!r}, got {!r}'.format(a, b))
# if both a and b are zero, check whether they have the same sign
# (in theory there are examples where it would be legitimate for a
# and b to have opposite signs; in practice these hardly ever
# occur).
if not a and not b:
if math.copysign(1., a) != math.copysign(1., b):
self.fail(msg or 'zero has wrong sign: expected {!r}, '
'got {!r}'.format(a, b))
# if a-b overflows, or b is infinite, return False. Again, in
# theory there are examples where a is within a few ulps of the
# max representable float, and then b could legitimately be
# infinite. In practice these examples are rare.
try:
absolute_error = abs(b-a)
except OverflowError:
pass
else:
# test passes if either the absolute error or the relative
# error is sufficiently small. The defaults amount to an
# error of between 9 ulps and 19 ulps on an IEEE-754 compliant
# machine.
if absolute_error <= max(abs_err, rel_err * abs(a)):
return
self.fail(msg or
'{!r} and {!r} are not sufficiently close'.format(a, b))
def test_constants(self):
e_expected = 2.71828182845904523536
pi_expected = 3.14159265358979323846
self.assertAlmostEqual(cmath.pi, pi_expected, places=9,
msg="cmath.pi is {}; should be {}".format(cmath.pi, pi_expected))
self.assertAlmostEqual(cmath.e, e_expected, places=9,
msg="cmath.e is {}; should be {}".format(cmath.e, e_expected))
def test_user_object(self):
# Test automatic calling of __complex__ and __float__ by cmath
# functions
# some random values to use as test values; we avoid values
# for which any of the functions in cmath is undefined
# (i.e. 0., 1., -1., 1j, -1j) or would cause overflow
cx_arg = 4.419414439 + 1.497100113j
flt_arg = -6.131677725
# a variety of non-complex numbers, used to check that
# non-complex return values from __complex__ give an error
non_complexes = ["not complex", 1, 5L, 2., None,
object(), NotImplemented]
# Now we introduce a variety of classes whose instances might
# end up being passed to the cmath functions
# usual case: new-style class implementing __complex__
class MyComplex(object):
def __init__(self, value):
self.value = value
def __complex__(self):
return self.value
# old-style class implementing __complex__
class MyComplexOS:
def __init__(self, value):
self.value = value
def __complex__(self):
return self.value
# classes for which __complex__ raises an exception
class SomeException(Exception):
pass
class MyComplexException(object):
def __complex__(self):
raise SomeException
class MyComplexExceptionOS:
def __complex__(self):
raise SomeException
# some classes not providing __float__ or __complex__
class NeitherComplexNorFloat(object):
pass
class NeitherComplexNorFloatOS:
pass
class MyInt(object):
def __int__(self): return 2
def __long__(self): return 2L
def __index__(self): return 2
class MyIntOS:
def __int__(self): return 2
def __long__(self): return 2L
def __index__(self): return 2
# other possible combinations of __float__ and __complex__
# that should work
class FloatAndComplex(object):
def __float__(self):
return flt_arg
def __complex__(self):
return cx_arg
class FloatAndComplexOS:
def __float__(self):
return flt_arg
def __complex__(self):
return cx_arg
class JustFloat(object):
def __float__(self):
return flt_arg
class JustFloatOS:
def __float__(self):
return flt_arg
for f in self.test_functions:
# usual usage
self.assertEqual(f(MyComplex(cx_arg)), f(cx_arg))
self.assertEqual(f(MyComplexOS(cx_arg)), f(cx_arg))
# other combinations of __float__ and __complex__
self.assertEqual(f(FloatAndComplex()), f(cx_arg))
self.assertEqual(f(FloatAndComplexOS()), f(cx_arg))
self.assertEqual(f(JustFloat()), f(flt_arg))
self.assertEqual(f(JustFloatOS()), f(flt_arg))
# TypeError should be raised for classes not providing
# either __complex__ or __float__, even if they provide
# __int__, __long__ or __index__. An old-style class
# currently raises AttributeError instead of a TypeError;
# this could be considered a bug.
self.assertRaises(TypeError, f, NeitherComplexNorFloat())
self.assertRaises(TypeError, f, MyInt())
self.assertRaises(Exception, f, NeitherComplexNorFloatOS())
self.assertRaises(Exception, f, MyIntOS())
# non-complex return value from __complex__ -> TypeError
for bad_complex in non_complexes:
self.assertRaises(TypeError, f, MyComplex(bad_complex))
self.assertRaises(TypeError, f, MyComplexOS(bad_complex))
# exceptions in __complex__ should be propagated correctly
self.assertRaises(SomeException, f, MyComplexException())
self.assertRaises(SomeException, f, MyComplexExceptionOS())
def test_input_type(self):
# ints and longs should be acceptable inputs to all cmath
# functions, by virtue of providing a __float__ method
for f in self.test_functions:
for arg in [2, 2L, 2.]:
self.assertEqual(f(arg), f(arg.__float__()))
# but strings should give a TypeError
for f in self.test_functions:
for arg in ["a", "long_string", "0", "1j", ""]:
self.assertRaises(TypeError, f, arg)
def test_cmath_matches_math(self):
# check that corresponding cmath and math functions are equal
# for floats in the appropriate range
# test_values in (0, 1)
test_values = [0.01, 0.1, 0.2, 0.5, 0.9, 0.99]
# test_values for functions defined on [-1., 1.]
unit_interval = test_values + [-x for x in test_values] + \
[0., 1., -1.]
# test_values for log, log10, sqrt
positive = test_values + [1.] + [1./x for x in test_values]
nonnegative = [0.] + positive
# test_values for functions defined on the whole real line
real_line = [0.] + positive + [-x for x in positive]
test_functions = {
'acos' : unit_interval,
'asin' : unit_interval,
'atan' : real_line,
'cos' : real_line,
'cosh' : real_line,
'exp' : real_line,
'log' : positive,
'log10' : positive,
'sin' : real_line,
'sinh' : real_line,
'sqrt' : nonnegative,
'tan' : real_line,
'tanh' : real_line}
for fn, values in test_functions.items():
float_fn = getattr(math, fn)
complex_fn = getattr(cmath, fn)
for v in values:
z = complex_fn(v)
self.rAssertAlmostEqual(float_fn(v), z.real)
self.assertEqual(0., z.imag)
# test two-argument version of log with various bases
for base in [0.5, 2., 10.]:
for v in positive:
z = cmath.log(v, base)
self.rAssertAlmostEqual(math.log(v, base), z.real)
self.assertEqual(0., z.imag)
def test_specific_values(self):
if not float.__getformat__("double").startswith("IEEE"):
return
def rect_complex(z):
"""Wrapped version of rect that accepts a complex number instead of
two float arguments."""
return cmath.rect(z.real, z.imag)
def polar_complex(z):
"""Wrapped version of polar that returns a complex number instead of
two floats."""
return complex(*polar(z))
for id, fn, ar, ai, er, ei, flags in parse_testfile(test_file):
arg = complex(ar, ai)
expected = complex(er, ei)
if fn == 'rect':
function = rect_complex
elif fn == 'polar':
function = polar_complex
else:
function = getattr(cmath, fn)
if 'divide-by-zero' in flags or 'invalid' in flags:
try:
actual = function(arg)
except ValueError:
continue
else:
self.fail('ValueError not raised in test '
'{}: {}(complex({!r}, {!r}))'.format(id, fn, ar, ai))
if 'overflow' in flags:
try:
actual = function(arg)
except OverflowError:
continue
else:
self.fail('OverflowError not raised in test '
'{}: {}(complex({!r}, {!r}))'.format(id, fn, ar, ai))
actual = function(arg)
if 'ignore-real-sign' in flags:
actual = complex(abs(actual.real), actual.imag)
expected = complex(abs(expected.real), expected.imag)
if 'ignore-imag-sign' in flags:
actual = complex(actual.real, abs(actual.imag))
expected = complex(expected.real, abs(expected.imag))
# for the real part of the log function, we allow an
# absolute error of up to 2e-15.
if fn in ('log', 'log10'):
real_abs_err = 2e-15
else:
real_abs_err = 5e-323
error_message = (
'{}: {}(complex({!r}, {!r}))\n'
'Expected: complex({!r}, {!r})\n'
'Received: complex({!r}, {!r})\n'
'Received value insufficiently close to expected value.'
).format(id, fn, ar, ai,
expected.real, expected.imag,
actual.real, actual.imag)
self.rAssertAlmostEqual(expected.real, actual.real,
abs_err=real_abs_err,
msg=error_message)
self.rAssertAlmostEqual(expected.imag, actual.imag,
msg=error_message)
def assertCISEqual(self, a, b):
eps = 1E-7
if abs(a[0] - b[0]) > eps or abs(a[1] - b[1]) > eps:
self.fail((a ,b))
def test_polar(self):
self.assertCISEqual(polar(0), (0., 0.))
self.assertCISEqual(polar(1.), (1., 0.))
self.assertCISEqual(polar(-1.), (1., pi))
self.assertCISEqual(polar(1j), (1., pi/2))
self.assertCISEqual(polar(-1j), (1., -pi/2))
def test_phase(self):
self.assertAlmostEqual(phase(0), 0.)
self.assertAlmostEqual(phase(1.), 0.)
self.assertAlmostEqual(phase(-1.), pi)
self.assertAlmostEqual(phase(-1.+1E-300j), pi)
self.assertAlmostEqual(phase(-1.-1E-300j), -pi)
self.assertAlmostEqual(phase(1j), pi/2)
self.assertAlmostEqual(phase(-1j), -pi/2)
# zeros
self.assertEqual(phase(complex(0.0, 0.0)), 0.0)
self.assertEqual(phase(complex(0.0, -0.0)), -0.0)
self.assertEqual(phase(complex(-0.0, 0.0)), pi)
self.assertEqual(phase(complex(-0.0, -0.0)), -pi)
# infinities
self.assertAlmostEqual(phase(complex(-INF, -0.0)), -pi)
self.assertAlmostEqual(phase(complex(-INF, -2.3)), -pi)
self.assertAlmostEqual(phase(complex(-INF, -INF)), -0.75*pi)
self.assertAlmostEqual(phase(complex(-2.3, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(-0.0, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(0.0, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(2.3, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(INF, -INF)), -pi/4)
self.assertEqual(phase(complex(INF, -2.3)), -0.0)
self.assertEqual(phase(complex(INF, -0.0)), -0.0)
self.assertEqual(phase(complex(INF, 0.0)), 0.0)
self.assertEqual(phase(complex(INF, 2.3)), 0.0)
self.assertAlmostEqual(phase(complex(INF, INF)), pi/4)
self.assertAlmostEqual(phase(complex(2.3, INF)), pi/2)
self.assertAlmostEqual(phase(complex(0.0, INF)), pi/2)
self.assertAlmostEqual(phase(complex(-0.0, INF)), pi/2)
self.assertAlmostEqual(phase(complex(-2.3, INF)), pi/2)
self.assertAlmostEqual(phase(complex(-INF, INF)), 0.75*pi)
self.assertAlmostEqual(phase(complex(-INF, 2.3)), pi)
self.assertAlmostEqual(phase(complex(-INF, 0.0)), pi)
# real or imaginary part NaN
for z in complex_nans:
self.assertTrue(math.isnan(phase(z)))
def test_abs(self):
# zeros
for z in complex_zeros:
self.assertEqual(abs(z), 0.0)
# infinities
for z in complex_infinities:
self.assertEqual(abs(z), INF)
# real or imaginary part NaN
self.assertEqual(abs(complex(NAN, -INF)), INF)
self.assertTrue(math.isnan(abs(complex(NAN, -2.3))))
self.assertTrue(math.isnan(abs(complex(NAN, -0.0))))
self.assertTrue(math.isnan(abs(complex(NAN, 0.0))))
self.assertTrue(math.isnan(abs(complex(NAN, 2.3))))
self.assertEqual(abs(complex(NAN, INF)), INF)
self.assertEqual(abs(complex(-INF, NAN)), INF)
self.assertTrue(math.isnan(abs(complex(-2.3, NAN))))
self.assertTrue(math.isnan(abs(complex(-0.0, NAN))))
self.assertTrue(math.isnan(abs(complex(0.0, NAN))))
self.assertTrue(math.isnan(abs(complex(2.3, NAN))))
self.assertEqual(abs(complex(INF, NAN)), INF)
self.assertTrue(math.isnan(abs(complex(NAN, NAN))))
# result overflows
if float.__getformat__("double").startswith("IEEE"):
self.assertRaises(OverflowError, abs, complex(1.4e308, 1.4e308))
def assertCEqual(self, a, b):
eps = 1E-7
if abs(a.real - b[0]) > eps or abs(a.imag - b[1]) > eps:
self.fail((a ,b))
def test_rect(self):
self.assertCEqual(rect(0, 0), (0, 0))
self.assertCEqual(rect(1, 0), (1., 0))
self.assertCEqual(rect(1, -pi), (-1., 0))
self.assertCEqual(rect(1, pi/2), (0, 1.))
self.assertCEqual(rect(1, -pi/2), (0, -1.))
def test_isnan(self):
self.assertFalse(cmath.isnan(1))
self.assertFalse(cmath.isnan(1j))
self.assertFalse(cmath.isnan(INF))
self.assertTrue(cmath.isnan(NAN))
self.assertTrue(cmath.isnan(complex(NAN, 0)))
self.assertTrue(cmath.isnan(complex(0, NAN)))
self.assertTrue(cmath.isnan(complex(NAN, NAN)))
self.assertTrue(cmath.isnan(complex(NAN, INF)))
self.assertTrue(cmath.isnan(complex(INF, NAN)))
def test_isinf(self):
self.assertFalse(cmath.isinf(1))
self.assertFalse(cmath.isinf(1j))
self.assertFalse(cmath.isinf(NAN))
self.assertTrue(cmath.isinf(INF))
self.assertTrue(cmath.isinf(complex(INF, 0)))
self.assertTrue(cmath.isinf(complex(0, INF)))
self.assertTrue(cmath.isinf(complex(INF, INF)))
self.assertTrue(cmath.isinf(complex(NAN, INF)))
self.assertTrue(cmath.isinf(complex(INF, NAN)))
def test_main():
run_unittest(CMathTests)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
burnash/skype4py | Skype4Py/sms.py | 21 | 7879 | """Short messaging system.
"""
__docformat__ = 'restructuredtext en'
from utils import *
class SmsMessage(Cached):
"""Represents an SMS message.
"""
_ValidateHandle = int
def __repr__(self):
return Cached.__repr__(self, 'Id')
def _Alter(self, AlterName, Args=None):
return self._Owner._Alter('SMS', self.Id, AlterName, Args)
def _Init(self):
self._MakeOwner()
def _Property(self, PropName, Set=None, Cache=True):
return self._Owner._Property('SMS', self.Id, PropName, Set, Cache)
def Delete(self):
"""Deletes this SMS message.
"""
self._Owner._DoCommand('DELETE SMS %s' % self.Id)
def MarkAsSeen(self):
"""Marks this SMS message as seen.
"""
self._Owner._DoCommand('SET SMS %s SEEN' % self.Id)
def Send(self):
"""Sends this SMS message.
"""
self._Alter('SEND')
def _GetBody(self):
return self._Property('BODY')
def _SetBody(self, Value):
self._Property('BODY', Value)
Body = property(_GetBody, _SetBody,
doc="""Text of this SMS message.
:type: unicode
""")
def _GetChunks(self):
return SmsChunkCollection(self, xrange(int(chop(self._Property('CHUNKING', Cache=False))[0])))
Chunks = property(_GetChunks,
doc="""Chunks of this SMS message. More than one if this is a multi-part message.
:type: `SmsChunkCollection`
""")
def _GetDatetime(self):
from datetime import datetime
return datetime.fromtimestamp(self.Timestamp)
Datetime = property(_GetDatetime,
doc="""Timestamp of this SMS message as datetime object.
:type: datetime.datetime
""")
def _GetFailureReason(self):
return str(self._Property('FAILUREREASON'))
FailureReason = property(_GetFailureReason,
doc="""Reason an SMS message failed. Read this if `Status` == `enums.smsMessageStatusFailed`.
:type: `enums`.smsFailureReason*
""")
def _GetId(self):
return self._Handle
Id = property(_GetId,
doc="""Unique SMS message Id.
:type: int
""")
def _GetIsFailedUnseen(self):
return (self._Property('IS_FAILED_UNSEEN') == 'TRUE')
IsFailedUnseen = property(_GetIsFailedUnseen,
doc="""Tells if a failed SMS message was unseen.
:type: bool
""")
def _GetPrice(self):
return int(self._Property('PRICE'))
Price = property(_GetPrice,
doc="""SMS price. Expressed using `PricePrecision`. For a value expressed using `PriceCurrency`, use `PriceValue`.
:type: int
:see: `PriceCurrency`, `PricePrecision`, `PriceToText`, `PriceValue`
""")
def _GetPriceCurrency(self):
return self._Property('PRICE_CURRENCY')
PriceCurrency = property(_GetPriceCurrency,
doc="""SMS price currency.
:type: unicode
:see: `Price`, `PricePrecision`, `PriceToText`, `PriceValue`
""")
def _GetPricePrecision(self):
return int(self._Property('PRICE_PRECISION'))
PricePrecision = property(_GetPricePrecision,
doc="""SMS price precision.
:type: int
:see: `Price`, `PriceCurrency`, `PriceToText`, `PriceValue`
""")
def _GetPriceToText(self):
return (u'%s %.3f' % (self.PriceCurrency, self.PriceValue)).strip()
PriceToText = property(_GetPriceToText,
doc="""SMS price as properly formatted text with currency.
:type: unicode
:see: `Price`, `PriceCurrency`, `PricePrecision`, `PriceValue`
""")
def _GetPriceValue(self):
if self.Price < 0:
return 0.0
return float(self.Price) / (10 ** self.PricePrecision)
PriceValue = property(_GetPriceValue,
doc="""SMS price. Expressed in `PriceCurrency`.
:type: float
:see: `Price`, `PriceCurrency`, `PricePrecision`, `PriceToText`
""")
def _GetReplyToNumber(self):
return str(self._Property('REPLY_TO_NUMBER'))
def _SetReplyToNumber(self, Value):
self._Property('REPLY_TO_NUMBER', Value)
ReplyToNumber = property(_GetReplyToNumber, _SetReplyToNumber,
doc="""Reply-to number for this SMS message.
:type: str
""")
def _SetSeen(self, Value):
from warnings import warn
warn('SmsMessage.Seen = x: Use SmsMessage.MarkAsSeen() instead.', DeprecationWarning, stacklevel=2)
if Value:
self.MarkAsSeen()
else:
raise SkypeError(0, 'Seen can only be set to True')
Seen = property(fset=_SetSeen,
doc="""Set the read status of the SMS message. Accepts only True value.
:type: bool
:deprecated: Extremely unpythonic, use `MarkAsSeen` instead.
""")
def _GetStatus(self):
return str(self._Property('STATUS'))
Status = property(_GetStatus,
doc="""SMS message status.
:type: `enums`.smsMessageStatus*
""")
def _GetTargetNumbers(self):
return tuple(split(self._Property('TARGET_NUMBERS'), ', '))
def _SetTargetNumbers(self, Value):
self._Property('TARGET_NUMBERS', ', '.join(Value))
TargetNumbers = property(_GetTargetNumbers, _SetTargetNumbers,
doc="""Target phone numbers.
:type: tuple of str
""")
def _GetTargets(self):
return SmsTargetCollection(self, split(self._Property('TARGET_NUMBERS'), ', '))
Targets = property(_GetTargets,
doc="""Target objects.
:type: `SmsTargetCollection`
""")
def _GetTimestamp(self):
return float(self._Property('TIMESTAMP'))
Timestamp = property(_GetTimestamp,
doc="""Timestamp of this SMS message.
:type: float
:see: `Datetime`
""")
def _GetType(self):
return str(self._Property('TYPE'))
Type = property(_GetType,
doc="""SMS message type
:type: `enums`.smsMessageType*
""")
class SmsMessageCollection(CachedCollection):
_CachedType = SmsMessage
class SmsChunk(Cached):
"""Represents a single chunk of a multi-part SMS message.
"""
_ValidateHandle = int
def __repr__(self):
return Cached.__repr__(self, 'Id', 'Message')
def _GetCharactersLeft(self):
count, left = map(int, chop(self.Message._Property('CHUNKING', Cache=False)))
if self.Id == count - 1:
return left
return 0
CharactersLeft = property(_GetCharactersLeft,
doc="""CharactersLeft.
:type: int
""")
def _GetId(self):
return self._Handle
Id = property(_GetId,
doc="""SMS chunk Id.
:type: int
""")
def _GetMessage(self):
return self._Owner
Message = property(_GetMessage,
doc="""SMS message associated with this chunk.
:type: `SmsMessage`
""")
def _GetText(self):
return self.Message._Property('CHUNK %s' % self.Id)
Text = property(_GetText,
doc="""Text (body) of this SMS chunk.
:type: unicode
""")
class SmsChunkCollection(CachedCollection):
_CachedType = SmsChunk
class SmsTarget(Cached):
"""Represents a single target of a multi-target SMS message.
"""
_ValidateHandle = str
def __repr__(self):
return Cached.__repr__(self, 'Number', 'Message')
def _GetMessage(self):
return self._Owner
Message = property(_GetMessage,
doc="""An SMS message object this target refers to.
:type: `SmsMessage`
""")
def _GetNumber(self):
return self._Handle
Number = property(_GetNumber,
doc="""Target phone number.
:type: str
""")
def _GetStatus(self):
for t in split(self.Message._Property('TARGET_STATUSES'), ', '):
number, status = t.split('=')
if number == self.Number:
return str(status)
Status = property(_GetStatus,
doc="""Status of this target.
:type: `enums`.smsTargetStatus*
""")
class SmsTargetCollection(CachedCollection):
_CachedType = SmsTarget
| bsd-3-clause |
nturaga/tools-iuc | tools/dexseq/dexseq_helper.py | 20 | 1390 | def validate_input( trans, error_map, param_values, page_param_map ):
"""
Validates the user input, before execution.
"""
factors = param_values['rep_factorName']
factor_name_list = []
factor_duplication = False
level_duplication = False
for factor in factors:
# factor names should be unique
fn = factor['factorName']
if fn in factor_name_list:
factor_duplication = True
break
factor_name_list.append( fn )
level_name_list = list()
for level in ['factorLevel1', 'factorLevel2']:
# level names under one factor should be unique
fl = factor[level]
if fl in level_name_list:
level_duplication = True
level_name_list.append( fl )
if level_duplication:
error_map['rep_factorName'] = [ dict() for t in factors ]
for i in range( len( factors ) ):
error_map['rep_factorName'][i]['FactorLevel1'] = [ {'factorLevel': 'Factor levels for each factor need to be unique'} for t in [factor['factorLevel1'], factor['factorLevel2']] ]
break
if factor_duplication:
error_map['rep_factorName'] = [ dict() for t in factors ]
for i in range( len( factors ) ):
error_map['rep_factorName'][i]['factorName'] = 'Factor names need to be unique.'
| mit |
h2oloopan/easymerge | EasyMerge/tests/scrapy/scrapy/squeue.py | 16 | 1108 | """
Scheduler queues
"""
import marshal, cPickle as pickle
from queuelib import queue
def _serializable_queue(queue_class, serialize, deserialize):
class SerializableQueue(queue_class):
def push(self, obj):
s = serialize(obj)
super(SerializableQueue, self).push(s)
def pop(self):
s = super(SerializableQueue, self).pop()
if s:
return deserialize(s)
return SerializableQueue
def _pickle_serialize(obj):
try:
return pickle.dumps(obj, protocol=2)
except pickle.PicklingError as e:
raise ValueError(str(e))
PickleFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \
_pickle_serialize, pickle.loads)
PickleLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \
_pickle_serialize, pickle.loads)
MarshalFifoDiskQueue = _serializable_queue(queue.FifoDiskQueue, \
marshal.dumps, marshal.loads)
MarshalLifoDiskQueue = _serializable_queue(queue.LifoDiskQueue, \
marshal.dumps, marshal.loads)
FifoMemoryQueue = queue.FifoMemoryQueue
LifoMemoryQueue = queue.LifoMemoryQueue
| mit |
gibil5/openhealth | models/management/lib/management_extra.py | 1 | 4447 | # -*- coding: utf-8 -*-
"""
Management - Extra
SRP
Responsibility of this class:
All methods that are smart but not used.
Created: 28 oct 2020
Last up: 28 oct 2020
"""
from __future__ import print_function
from openerp import models, fields, api
# ------------------------------------------------------------------- Class -----------------------
class ManagementExtra(models.Model):
"""
Extra: smart but not used.
"""
_inherit = "openhealth.management"
# ----------------------------------------------------------- Update Max -----------------------
@api.multi
def update_max(self):
"""
Update Year Max
"""
print()
print('X - Update Max')
# Clear
mgts = self.env[_MODEL_MGT].search([
('owner', 'in', ['month']),
('year', 'in', [self.year]),
],
order='date_begin asc',
#limit=1,
)
for mgt in mgts:
#print(mgt.name)
mgt.pl_max = False
mgt.pl_min = False
# Max
mgt = self.env[_MODEL_MGT].search([
('owner', 'in', ['month']),
('year', 'in', [self.year]),
('month', 'not in', [False]),
],
order='total_amount asc',
limit=1,
)
mgt.pl_min = True
# Max
mgt = self.env[_MODEL_MGT].search([
('owner', 'in', ['month']),
('year', 'in', [self.year]),
('month', 'not in', [False]),
],
order='total_amount desc',
limit=1,
)
mgt.pl_max = True
# ----------------------------------------------------------- Update Year all -----------------------
@api.multi
def update_year_all(self):
"""
Update Year All
"""
print()
print('X - Update Year All')
# Mgts
mgts = self.env[_MODEL_MGT].search([
('owner', 'in', ['month']),
('year', 'in', [self.year]),
],
order='date_begin asc',
#limit=1,
)
# Count
count = self.env[_MODEL_MGT].search_count([
('owner', 'in', ['month']),
('year', 'in', [self.year]),
],
#order='x_serial_nr asc',
#limit=1,
)
print(mgts)
print(count)
for mgt in mgts:
print(mgt.name)
mgt.update_year()
# ------------------------------------------------------- Validate external ----
# Validate
@api.multi
def validate_external(self):
"""
Validates Data Coherency - External.
Looks for data coherency between reports.
Builds a Report Sale Product for the month.
Compares it to Products stats.
"""
#print()
#print('Validate External')
if self.report_sale_product.name in [False]:
date_begin = self.date_begin
self.report_sale_product = self.env['openhealth.report.sale.product'].create({
'name': date_begin,
'management_id': self.id,
})
rsp = self.report_sale_product
#print(rsp)
#print(rsp.name)
rsp.update()
self.rsp_count = rsp.total_qty
self.rsp_total = rsp.total
self.rsp_count_delta = self.nr_products - self.rsp_count
self.rsp_total_delta = self.amo_products - self.rsp_total
# ----------------------------------------------------------- Update Year ------
@api.multi
def update_year(self):
"""
Update Yearly total amounts
"""
print()
print('** Update Year')
# Mgts
mgts = self.env["openhealth.management"].search([
('owner', 'in', ['month']),
('year', 'in', [self.year]),
],
#order='x_serial_nr asc',
#limit=1,
)
# Count
count = self.env["openhealth.management"].search_count([
('owner', 'in', ['month']),
('year', 'in', [self.year]),
],
#order='x_serial_nr asc',
#limit=1,
)
#print(mgts)
#print(count)
total = 0
for mgt in mgts:
total = total + mgt.total_amount
self.total_amount_year = total
if self.total_amount_year != 0:
self.per_amo_total = self.total_amount / self.total_amount_year
# update_year
# ------------------------------------------------------------- CN Analysis ----
def credit_note_analysis(self, order):
"""
Credit note analysis
"""
self.nr_credit_notes = self.nr_credit_notes + 1
self.amo_credit_notes = self.amo_credit_notes + order.x_amount_flow
| agpl-3.0 |
COSMOGRAIL/COSMOULINE | pipe/playground/indi_export_NU.py | 1 | 3207 | #execfile("../config.py")
import os
import shutil
import sys
# to get access to all our modules without installing anything :
sys.path.append("../modules")
from kirbybase import KirbyBase, KBError
import rdbexport
import variousfct
import datetime
print "I am a special version, if you just want to translate an old db into a pkl."
print "I write my files in the current directory."
print "You have to configure me..."
configstr = "HS2209"
databasepath = "/Users/mtewes/Desktop/HS2209_database.dat"
############ Building the filenames ##############
now = datetime.datetime.now()
datestr = now.strftime("%Y-%m-%d")
filename = "%s_%s" % (datestr, configstr)
configdir = "."
readmefilepath = os.path.join(configdir, filename + "_readme.txt")
pklfilepath = os.path.join(configdir, filename + "_db.pkl")
print "My basename : %s" % (filename)
########### The readme #############
readme = ["This is the automatic readme file for\n%s\n" % pklfilepath]
# We do only one select :
db = KirbyBase()
images = db.select(databasepath, ['recno'], ['*'], sortFields=['setname', 'mjd'], returnType='dict')
mjdsortedimages = sorted(images, key=lambda k: k['mjd'])
readme.append("Target : %s" % configstr)
readme.append("Total : %i images" % (len(images)))
readme.append("Time span : %s -> %s" % (mjdsortedimages[0]["datet"], mjdsortedimages[-1]["datet"]))
telescopes = sorted(list(set([image["telescopename"] for image in images])))
setnames = sorted(list(set([image["setname"] for image in images])))
readme.append("Telescopes : %s" % ",".join(telescopes))
readme.append("Setnames : %s" % ",".join(setnames))
#readme.append("Ref image name : %s " % refimgname)
fieldnames = db.getFieldNames(databasepath)
fieldtypes = db.getFieldTypes(databasepath)
fielddesc = ["%s %s" % (fieldname, fieldtype) for (fieldname, fieldtype) in zip(fieldnames, fieldtypes)]
deconvolutions = [fieldname[11:] for fieldname in fieldnames if fieldname.split("_")[0] == "decfilenum"]
deconvolutionsreadout = [fieldname[4:] for fieldname in fieldnames if (fieldname.split("_")[0] == "out") and ( fieldname.split("_")[-1] == "flux" or fieldname.split("_")[-1] == "int")]
readme.append("\nDeconvolutions :")
readme.extend(deconvolutions)
readme.append("\nDeconvolution sources :")
readme.extend(deconvolutionsreadout)
renorms = [fieldname for fieldname in fieldnames if fieldname[0:6] == "renorm"]
readme.append("\nRenorms :")
readme.extend(renorms)
readmetxt = "\n".join(readme)
print "Here is the readme text : \n\n%s\n\n" % (readmetxt)
print "I will now write the files."
#variousfct.proquest(askquestions)
readme.append("\nThe full list of fields :")
readme.extend(fielddesc)
readme.append("\n\n(end of automatic part)\n")
readmetxt = "\n".join(readme)
if os.path.exists(readmefilepath) or os.path.exists(pklfilepath):
print "The files exist. I will overwrite them."
variousfct.proquest(askquestions)
if os.path.exists(readmefilepath):
os.remove(readmefilepath)
if os.path.exists(pklfilepath):
os.remove(pklfilepath)
out_file = open(readmefilepath, "w")
out_file.write(readmetxt)
out_file.close()
print "Wrote %s" % readmefilepath
variousfct.writepickle(images, pklfilepath, verbose=True)
| gpl-3.0 |
zerc/django | django/db/models/options.py | 39 | 29860 | from __future__ import unicode_literals
from bisect import bisect
from collections import OrderedDict, defaultdict
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.db import connections
from django.db.models.fields import AutoField
from django.db.models.fields.proxy import OrderWrt
from django.utils import six
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.encoding import (
force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.functional import cached_property
from django.utils.text import camel_case_to_spaces
from django.utils.translation import override, string_concat
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = tuple()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract', 'managed', 'proxy', 'swappable', 'auto_created',
'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name',
'required_db_features', 'required_db_vendor')
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = next(iter(option_together))
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
@python_2_unicode_compatible
class Options(object):
FORWARD_PROPERTIES = ('fields', 'many_to_many', 'concrete_fields',
'local_concrete_fields', '_forward_fields_map')
REVERSE_PROPERTIES = ('related_objects', 'fields_map', '_relation_tree')
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.proxied_children = []
self.local_fields = []
self.local_many_to_many = []
self.virtual_fields = []
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self._ordering_clash = False
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.required_db_features = []
self.required_db_vendor = None
self.meta = meta
self.pk = None
self.has_auto_field = False
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = OrderedDict()
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from (concrete or abstract base classes). `managers`
# keeps a list of 3-tuples of the form:
# (creation_counter, instance, abstract(=True))
self.managers = []
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = apps
self.default_related_name = None
@property
def label(self):
return '%s.%s' % (self.app_label, self.object_name)
@property
def label_lower(self):
return '%s.%s' % (self.app_label, self.model_name)
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
@property
def abstract_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if abstract
]
@property
def concrete_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if not abstract
]
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
self.index_together = normalize_together(self.index_together)
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, query))
self.ordering = ('_order',)
if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields):
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(six.itervalues(self.parents))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
auto = AutoField(verbose_name='ID', primary_key=True,
auto_created=True)
model.add_to_class('id', auto)
def add_field(self, field, virtual=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if virtual:
self.virtual_fields.append(field)
elif field.is_relation and field.many_to_many:
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model:
try:
field.remote_field.model._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Does the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (smart_text(self.app_label), smart_text(self.model_name))
def can_migrate(self, connection):
"""
Return True if the model can/should be migrated on the `connection`.
`connection` can be either a real connection or a connection alias.
"""
if self.proxy or self.swapped or not self.managed:
return False
if isinstance(connection, six.string_types):
connection = connections[connection]
if self.required_db_vendor:
return self.required_db_vendor == connection.vendor
if self.required_db_features:
return all(getattr(connection.features, feat, False)
for feat in self.required_db_features)
return True
@property
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
with override(None):
return force_text(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) != self.label_lower:
return swapped_for
return None
@cached_property
def fields(self):
"""
Returns a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not virtual or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
is_not_an_m2m_field = lambda f: not (f.is_relation and f.many_to_many)
is_not_a_generic_relation = lambda f: not (f.is_relation and f.one_to_many)
is_not_a_generic_foreign_key = lambda f: not (
f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model)
)
return make_immutable_fields_list(
"fields",
(f for f in self._get_fields(reverse=False) if
is_not_an_m2m_field(f) and is_not_a_generic_relation(f)
and is_not_a_generic_foreign_key(f))
)
@cached_property
def concrete_fields(self):
"""
Returns a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Returns a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@cached_property
def many_to_many(self):
"""
Returns a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(f for f in self._get_fields(reverse=False)
if f.is_relation and f.many_to_many)
)
@cached_property
def related_objects(self):
"""
Returns all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return make_immutable_fields_list(
"related_objects",
(obj for obj in all_related_fields
if not obj.hidden or obj.field.many_to_many)
)
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name):
"""
Return a field instance given the name of a forward or reverse field.
"""
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
return self._forward_fields_map[field_name]
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named %r. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, field_name))
def get_base_chain(self, model):
"""
Return a list of parent classes leading to `model` (ordered from
closest to most distant ancestor). This has to handle the case where
`model` is a grandparent or even more distant relation.
"""
if not self.parents:
return []
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return []
def get_parent_list(self):
"""
Returns all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Returns the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Returns None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if model._meta.abstract:
continue
fields_with_relations = (
f for f in model._meta._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.remote_field.model, six.string_types):
related_objects_graph[f.remote_field.model._meta].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[model._meta]
model._meta.__dict__['_relation_tree'] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
properties_to_expire = []
if forward:
properties_to_expire.extend(self.FORWARD_PROPERTIES)
if reverse and not self.abstract:
properties_to_expire.extend(self.REVERSE_PROPERTIES)
for cache_key in properties_to_expire:
try:
delattr(self, cache_key)
except AttributeError:
pass
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Returns a list of fields associated to the model. By default, includes
forward and reverse fields, fields derived from inheritance, but not
hidden fields. The returned fields can be changed using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)
def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
seen_models=None):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError("Invalid argument for include_parents: %s" % (include_parents,))
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = False
if seen_models is None:
seen_models = set()
topmost_call = True
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (parent._meta.concrete_model != self.concrete_model and
include_parents == PROXY_PARENTS):
continue
for obj in parent._meta._get_fields(
forward=forward, reverse=reverse, include_parents=include_parents,
include_hidden=include_hidden, seen_models=seen_models):
if hasattr(obj, 'parent_link') and obj.parent_link:
continue
fields.append(obj)
if reverse:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.remote_field.hidden:
fields.append(field.remote_field)
if forward:
fields.extend(
field for field in chain(self.local_fields, self.local_many_to_many)
)
# Virtual fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the virtual fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields.extend(
f for f in self.virtual_fields
)
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
| bsd-3-clause |
KevinGoodsell/sympy | sympy/geometry/curve.py | 5 | 1190 | from sympy.core import sympify
from sympy.geometry.exceptions import GeometryError
from entity import GeometryEntity
class Curve(GeometryEntity):
"""
A curve in space.
Example:
========
>>> from sympy import sin, cos, Symbol
>>> t = Symbol("t")
>>> C = Curve([sin(t), cos(t)], (t, 0, 2))
>>> C.functions
[sin(t), cos(t)]
>>> C.limits
(t, 0, 2)
>>> C.parameter
t
"""
def __new__(cls, function, limits):
fun = sympify(function)
if not fun:
raise GeometryError("%s.__new__ don't know how to handle" % cls.__name__);
if not isinstance(limits, (list, tuple)) or len(limits) != 3:
raise ValueError("Limits argument has wrong syntax");
return GeometryEntity.__new__(cls, fun, limits)
@property
def functions(self):
"""The functions specifying the curve."""
return self.__getitem__(0)
@property
def parameter(self):
"""The curve function variable."""
return self.__getitem__(1)[0]
@property
def limits(self):
"""The limits for the curve."""
return self.__getitem__(1)
| bsd-3-clause |
Acehaidrey/incubator-airflow | airflow/__main__.py | 11 | 1368 | #!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Main executable module"""
import os
import argcomplete
from airflow.cli import cli_parser
from airflow.configuration import conf
def main():
"""Main executable function"""
if conf.get("core", "security") == 'kerberos':
os.environ['KRB5CCNAME'] = conf.get('kerberos', 'ccache')
os.environ['KRB5_KTNAME'] = conf.get('kerberos', 'keytab')
parser = cli_parser.get_parser()
argcomplete.autocomplete(parser)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
| apache-2.0 |
gregdek/ansible | test/runner/lib/csharp_import_analysis.py | 49 | 2452 | """Analyze C# import statements."""
from __future__ import absolute_import, print_function
import os
import re
from lib.util import (
display,
)
def get_csharp_module_utils_imports(powershell_targets, csharp_targets):
"""Return a dictionary of module_utils names mapped to sets of powershell file paths.
:type powershell_targets: list[TestTarget] - C# files
:type csharp_targets: list[TestTarget] - PS files
:rtype: dict[str, set[str]]
"""
module_utils = enumerate_module_utils()
imports_by_target_path = {}
for target in powershell_targets:
imports_by_target_path[target.path] = extract_csharp_module_utils_imports(target.path, module_utils, False)
for target in csharp_targets:
imports_by_target_path[target.path] = extract_csharp_module_utils_imports(target.path, module_utils, True)
imports = dict([(module_util, set()) for module_util in module_utils])
for target_path in imports_by_target_path:
for module_util in imports_by_target_path[target_path]:
imports[module_util].add(target_path)
for module_util in sorted(imports):
if not imports[module_util]:
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def enumerate_module_utils():
"""Return a list of available module_utils imports.
:rtype: set[str]
"""
return set(os.path.splitext(p)[0] for p in os.listdir('lib/ansible/module_utils/csharp') if os.path.splitext(p)[1] == '.cs')
def extract_csharp_module_utils_imports(path, module_utils, is_pure_csharp):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
:rtype: set[str]
"""
imports = set()
if is_pure_csharp:
pattern = re.compile(r'(?i)^using\s(Ansible\..+);$')
else:
pattern = re.compile(r'(?i)^#\s*ansiblerequires\s+-csharputil\s+(Ansible\..+)')
with open(path, 'r') as module_file:
for line_number, line in enumerate(module_file, 1):
match = re.search(pattern, line)
if not match:
continue
import_name = match.group(1)
if import_name not in module_utils:
display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
continue
imports.add(import_name)
return imports
| gpl-3.0 |
AlericInglewood/3p-google-breakpad | src/tools/gyp/gyptest.py | 20 | 7664 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner:
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered:
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files
if f.startswith('gyptest') and f.endswith('.py') ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
os.environ['PATH'] += ':' + ':'.join(opts.path)
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'freebsd7': ['make'],
'freebsd8': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
azureplus/hue | desktop/core/ext-py/PyYAML-3.09/lib/yaml/reader.py | 114 | 8139 | # This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
#
# We define two classes here.
#
# Mark(source, line, column)
# It's just a record and its only use is producing nice error messages.
# Parser does not use it for any other purposes.
#
# Reader(source, data)
# Reader determines the encoding of `data` and converts it to unicode.
# Reader provides the following methods and attributes:
# reader.peek(length=1) - return the next `length` characters
# reader.forward(length=1) - move the current position to `length` characters.
# reader.index - the number of the current character.
# reader.line, stream.column - the line and the column of the current character.
__all__ = ['Reader', 'ReaderError']
from error import YAMLError, Mark
import codecs, re
# Unfortunately, codec functions in Python 2.3 does not support the `finish`
# arguments, so we have to write our own wrappers.
try:
codecs.utf_8_decode('', 'strict', False)
from codecs import utf_8_decode, utf_16_le_decode, utf_16_be_decode
except TypeError:
def utf_16_le_decode(data, errors, finish=False):
if not finish and len(data) % 2 == 1:
data = data[:-1]
return codecs.utf_16_le_decode(data, errors)
def utf_16_be_decode(data, errors, finish=False):
if not finish and len(data) % 2 == 1:
data = data[:-1]
return codecs.utf_16_be_decode(data, errors)
def utf_8_decode(data, errors, finish=False):
if not finish:
# We are trying to remove a possible incomplete multibyte character
# from the suffix of the data.
# The first byte of a multi-byte sequence is in the range 0xc0 to 0xfd.
# All further bytes are in the range 0x80 to 0xbf.
# UTF-8 encoded UCS characters may be up to six bytes long.
count = 0
while count < 5 and count < len(data) \
and '\x80' <= data[-count-1] <= '\xBF':
count -= 1
if count < 5 and count < len(data) \
and '\xC0' <= data[-count-1] <= '\xFD':
data = data[:-count-1]
return codecs.utf_8_decode(data, errors)
class ReaderError(YAMLError):
def __init__(self, name, position, character, encoding, reason):
self.name = name
self.character = character
self.position = position
self.encoding = encoding
self.reason = reason
def __str__(self):
if isinstance(self.character, str):
return "'%s' codec can't decode byte #x%02x: %s\n" \
" in \"%s\", position %d" \
% (self.encoding, ord(self.character), self.reason,
self.name, self.position)
else:
return "unacceptable character #x%04x: %s\n" \
" in \"%s\", position %d" \
% (self.character, self.reason,
self.name, self.position)
class Reader(object):
# Reader:
# - determines the data encoding and converts it to unicode,
# - checks if characters are in allowed range,
# - adds '\0' to the end.
# Reader accepts
# - a `str` object,
# - a `unicode` object,
# - a file-like object with its `read` method returning `str`,
# - a file-like object with its `read` method returning `unicode`.
# Yeah, it's ugly and slow.
def __init__(self, stream):
self.name = None
self.stream = None
self.stream_pointer = 0
self.eof = True
self.buffer = u''
self.pointer = 0
self.raw_buffer = None
self.raw_decode = None
self.encoding = None
self.index = 0
self.line = 0
self.column = 0
if isinstance(stream, unicode):
self.name = "<unicode string>"
self.check_printable(stream)
self.buffer = stream+u'\0'
elif isinstance(stream, str):
self.name = "<string>"
self.raw_buffer = stream
self.determine_encoding()
else:
self.stream = stream
self.name = getattr(stream, 'name', "<file>")
self.eof = False
self.raw_buffer = ''
self.determine_encoding()
def peek(self, index=0):
try:
return self.buffer[self.pointer+index]
except IndexError:
self.update(index+1)
return self.buffer[self.pointer+index]
def prefix(self, length=1):
if self.pointer+length >= len(self.buffer):
self.update(length)
return self.buffer[self.pointer:self.pointer+length]
def forward(self, length=1):
if self.pointer+length+1 >= len(self.buffer):
self.update(length+1)
while length:
ch = self.buffer[self.pointer]
self.pointer += 1
self.index += 1
if ch in u'\n\x85\u2028\u2029' \
or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
self.line += 1
self.column = 0
elif ch != u'\uFEFF':
self.column += 1
length -= 1
def get_mark(self):
if self.stream is None:
return Mark(self.name, self.index, self.line, self.column,
self.buffer, self.pointer)
else:
return Mark(self.name, self.index, self.line, self.column,
None, None)
def determine_encoding(self):
while not self.eof and len(self.raw_buffer) < 2:
self.update_raw()
if not isinstance(self.raw_buffer, unicode):
if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
self.raw_decode = utf_16_le_decode
self.encoding = 'utf-16-le'
elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
self.raw_decode = utf_16_be_decode
self.encoding = 'utf-16-be'
else:
self.raw_decode = utf_8_decode
self.encoding = 'utf-8'
self.update(1)
NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
def check_printable(self, data):
match = self.NON_PRINTABLE.search(data)
if match:
character = match.group()
position = self.index+(len(self.buffer)-self.pointer)+match.start()
raise ReaderError(self.name, position, ord(character),
'unicode', "special characters are not allowed")
def update(self, length):
if self.raw_buffer is None:
return
self.buffer = self.buffer[self.pointer:]
self.pointer = 0
while len(self.buffer) < length:
if not self.eof:
self.update_raw()
if self.raw_decode is not None:
try:
data, converted = self.raw_decode(self.raw_buffer,
'strict', self.eof)
except UnicodeDecodeError, exc:
character = exc.object[exc.start]
if self.stream is not None:
position = self.stream_pointer-len(self.raw_buffer)+exc.start
else:
position = exc.start
raise ReaderError(self.name, position, character,
exc.encoding, exc.reason)
else:
data = self.raw_buffer
converted = len(data)
self.check_printable(data)
self.buffer += data
self.raw_buffer = self.raw_buffer[converted:]
if self.eof:
self.buffer += u'\0'
self.raw_buffer = None
break
def update_raw(self, size=1024):
data = self.stream.read(size)
if data:
self.raw_buffer += data
self.stream_pointer += len(data)
else:
self.eof = True
#try:
# import psyco
# psyco.bind(Reader)
#except ImportError:
# pass
| apache-2.0 |
imosquera/spinnaker | pylib/spinnaker/yaml_util.py | 2 | 8208 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import yaml
def yml_or_yaml_path(basedir, basename):
"""Return a path to the requested YAML file.
Args:
basedir [string]: The directory path containing the file.
basename [string]: The base filename for the file
Returns:
Path to the YAML file with either a .yml or .yaml extension,
depending on which if any exists.
If neither exists, return the .yml. If both, then raise an exception.
"""
basepath = os.path.join(basedir, basename)
yml_path = basepath + ".yml"
yaml_path = basepath + ".yaml"
if os.path.exists(yaml_path):
if os.path.exists(yml_path):
raise ValueError('Both {0} and {1} exist.'.format(yml_path, yaml_path))
return yaml_path
return yml_path
class YamlBindings(object):
"""Implements a map from yaml using variable references similar to spring."""
@property
def map(self):
return self.__map
def __init__(self):
self.__map = {}
def __getitem__(self, field):
return self.__get_field_value(field, [], original=field)
def get(self, field, default=None):
try:
return self.__get_field_value(field, [], original=field)
except KeyError:
return default
def import_dict(self, d):
if dict is not None:
for name,value in d.items():
self.__update_field(name, value, self.__map)
def import_string(self, s):
self.import_dict(yaml.load(s, Loader=yaml.Loader))
def import_path(self, path):
with open(path, 'r') as f:
self.import_dict(yaml.load(f, Loader=yaml.Loader))
def __update_field(self, name, value, container):
if not isinstance(value, dict) or not name in container:
container[name] = value
return
container_value = container[name]
if not isinstance(container_value, dict):
container[name] = value
return
for child_name, child_value in value.items():
self.__update_field(child_name, child_value, container_value)
def __get_node(self, field):
path = field.split('.')
node = self.__map
for part in path:
if not isinstance(node, dict) or not part in node:
raise KeyError(field)
if isinstance(node, list):
node = node[0][part]
else:
node = node[part]
return node
def __typed_value(self, value_text):
"""Convert the text of a value into the YAML value.
This is used for type conversion for default values.
Not particularly efficient, but there doesnt seem to be a direct API.
"""
return yaml.load('x: {0}'.format(value_text), Loader=yaml.Loader)['x']
def __get_field_value(self, field, saw, original):
value = os.environ.get(field, None)
if value is None:
value = self.__get_node(field)
else:
value = self.__typed_value(value)
if not isinstance(value, basestring) or not value.startswith('$'):
return value
if field in saw:
raise ValueError('Cycle looking up variable ' + original)
saw = saw + [field]
return self.__resolve_value(value, saw, original)
def __resolve_value(self, value, saw, original):
expression_re = re.compile('\${([\._a-zA-Z0-9]+)(:.+?)?}')
exact_match = expression_re.match(value)
if exact_match and exact_match.group(0) == value:
try:
got = self.__get_field_value(exact_match.group(1), saw, original)
return got
except KeyError:
if exact_match.group(2):
return self.__typed_value(exact_match.group(2)[1:])
else:
return value
result = []
offset = 0
# Look for fragments of ${key} or ${key:default} then resolve them.
text = value
for match in expression_re.finditer(text):
result.append(text[offset:match.start()])
try:
got = self.__get_field_value(str(match.group(1)), saw, original)
result.append(str(got))
except KeyError:
if match.group(2):
result.append(str(match.group(2)[1:]))
else:
result.append(match.group(0))
offset = match.end() # skip trailing '}'
result.append(text[offset:])
return ''.join(result)
def replace(self, text):
return self.__resolve_value(text, [], text)
def __get_flat_keys(self, container):
flat_keys = []
for key,value in container.items():
if isinstance(value, dict):
flat_keys.extend([key + '.' + subkey for subkey in self.__get_flat_keys(value)])
else:
flat_keys.append(key)
return flat_keys
@staticmethod
def update_yml_source(path, update_dict):
"""Update the yaml source at the path according to the update dict.
All the previous bindings not in the update dict remain unchanged.
The yaml file at the path is re-written with the new bindings.
Args:
path [string]: Path to a yaml source file.
update_dict [dict]: Nested dictionary corresponding to
nested yaml properties, keyed by strings.
"""
bindings = YamlBindings()
bindings.import_dict(update_dict)
updated_keys = bindings.__get_flat_keys(bindings.__map)
source = '' # declare so this is in scope for both 'with' blocks
with open(path, 'r') as source_file:
source = source_file.read()
for prop in updated_keys:
source = bindings.transform_yaml_source(source, prop)
with open(path, 'w') as source_file:
source_file.write(source)
def transform_yaml_source(self, source, key):
"""Transform the given yaml source so its value of key matches the binding.
Has no effect if key is not among the bindings.
But raises a KeyError if it is in the bindings but not in the source.
Args:
source [string]: A YAML document
key [string]: A key into the bindings.
Returns:
Transformed source with value of key replaced to match the bindings.
"""
try:
value = self[key]
except KeyError:
return source
parts = key.split('.')
offset = 0
s = source
for attr in parts:
match = re.search('^ *{attr}:(.*)'.format(attr=attr), s, re.MULTILINE)
if not match:
raise ValueError(
'Could not find {key}. Failed on {attr} at {offset}'
.format(key=key, attr=attr, offset=offset))
offset += match.start(0)
s = source[offset:]
offset -= match.start(0)
value_start = match.start(1) + offset
value_end = match.end(0) + offset
if isinstance(value, basestring) and re.search('{[^}]*{', value):
# Quote strings with nested {} yaml flows
value = '"{0}"'.format(value)
# yaml doesn't understand capital letter boolean values.
if isinstance(value, bool):
value = str(value).lower()
return ''.join([
source[0:value_start],
' {value}'.format(value=value),
source[value_end:]
])
def load_bindings(installed_config_dir, user_config_dir, only_if_local=False):
user_local_yml_path = yml_or_yaml_path(user_config_dir, 'spinnaker-local')
install_local_yml_path = yml_or_yaml_path(installed_config_dir,
'spinnaker-local')
have_user_local = os.path.exists(user_local_yml_path)
have_install_local = os.path.exists(install_local_yml_path)
have_local = have_user_local or have_install_local
if only_if_local and not have_local:
return None
bindings = YamlBindings()
bindings.import_path(yml_or_yaml_path(installed_config_dir, 'spinnaker'))
if have_install_local:
bindings.import_path(install_local_yml_path)
if have_user_local:
bindings.import_path(user_local_yml_path)
return bindings
| apache-2.0 |
sontek/rethinkdb | external/v8_3.30.33.16/tools/presubmit.py | 35 | 14090 | #!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
import hashlib
md5er = hashlib.md5
except ImportError, e:
import md5
md5er = md5.new
import optparse
import os
from os.path import abspath, join, dirname, basename, exists
import pickle
import re
import sys
import subprocess
import multiprocessing
from subprocess import PIPE
# Disabled LINT rules and reason.
# build/include_what_you_use: Started giving false positives for variables
# named "string" and "map" assuming that you needed to include STL headers.
ENABLED_LINT_RULES = """
build/class
build/deprecated
build/endif_comment
build/forward_decl
build/include_alpha
build/include_order
build/printf_format
build/storage_class
legal/copyright
readability/boost
readability/braces
readability/casting
readability/constructors
readability/fn_size
readability/function
readability/multiline_comment
readability/multiline_string
readability/streams
readability/todo
readability/utf8
runtime/arrays
runtime/casting
runtime/deprecated_fn
runtime/explicit
runtime/int
runtime/memset
runtime/mutex
runtime/nonconf
runtime/printf
runtime/printf_format
runtime/rtti
runtime/sizeof
runtime/string
runtime/virtual
runtime/vlog
whitespace/blank_line
whitespace/braces
whitespace/comma
whitespace/comments
whitespace/ending_newline
whitespace/indent
whitespace/labels
whitespace/line_length
whitespace/newline
whitespace/operators
whitespace/parens
whitespace/tab
whitespace/todo
""".split()
# TODO(bmeurer): Fix and re-enable readability/check
LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing')
def CppLintWorker(command):
try:
process = subprocess.Popen(command, stderr=subprocess.PIPE)
process.wait()
out_lines = ""
error_count = -1
while True:
out_line = process.stderr.readline()
if out_line == '' and process.poll() != None:
if error_count == -1:
print "Failed to process %s" % command.pop()
return 1
break
m = LINT_OUTPUT_PATTERN.match(out_line)
if m:
out_lines += out_line
error_count += 1
sys.stdout.write(out_lines)
return error_count
except KeyboardInterrupt:
process.kill()
except:
print('Error running cpplint.py. Please make sure you have depot_tools' +
' in your $PATH. Lint check skipped.')
process.kill()
class FileContentsCache(object):
def __init__(self, sums_file_name):
self.sums = {}
self.sums_file_name = sums_file_name
def Load(self):
try:
sums_file = None
try:
sums_file = open(self.sums_file_name, 'r')
self.sums = pickle.load(sums_file)
except:
# Cannot parse pickle for any reason. Not much we can do about it.
pass
finally:
if sums_file:
sums_file.close()
def Save(self):
try:
sums_file = open(self.sums_file_name, 'w')
pickle.dump(self.sums, sums_file)
except:
# Failed to write pickle. Try to clean-up behind us.
if sums_file:
sums_file.close()
try:
os.unlink(self.sums_file_name)
except:
pass
finally:
sums_file.close()
def FilterUnchangedFiles(self, files):
changed_or_new = []
for file in files:
try:
handle = open(file, "r")
file_sum = md5er(handle.read()).digest()
if not file in self.sums or self.sums[file] != file_sum:
changed_or_new.append(file)
self.sums[file] = file_sum
finally:
handle.close()
return changed_or_new
def RemoveFile(self, file):
if file in self.sums:
self.sums.pop(file)
class SourceFileProcessor(object):
"""
Utility class that can run through a directory structure, find all relevant
files and invoke a custom check on the files.
"""
def Run(self, path):
all_files = []
for file in self.GetPathsToSearch():
all_files += self.FindFilesIn(join(path, file))
if not self.ProcessFiles(all_files, path):
return False
return True
def IgnoreDir(self, name):
return (name.startswith('.') or
name in ('buildtools', 'data', 'gmock', 'gtest', 'kraken',
'octane', 'sunspider'))
def IgnoreFile(self, name):
return name.startswith('.')
def FindFilesIn(self, path):
result = []
for (root, dirs, files) in os.walk(path):
for ignored in [x for x in dirs if self.IgnoreDir(x)]:
dirs.remove(ignored)
for file in files:
if not self.IgnoreFile(file) and self.IsRelevant(file):
result.append(join(root, file))
return result
class CppLintProcessor(SourceFileProcessor):
"""
Lint files to check that they follow the google code style.
"""
def IsRelevant(self, name):
return name.endswith('.cc') or name.endswith('.h')
def IgnoreDir(self, name):
return (super(CppLintProcessor, self).IgnoreDir(name)
or (name == 'third_party'))
IGNORE_LINT = ['flag-definitions.h']
def IgnoreFile(self, name):
return (super(CppLintProcessor, self).IgnoreFile(name)
or (name in CppLintProcessor.IGNORE_LINT))
def GetPathsToSearch(self):
return ['src', 'include', 'samples', join('test', 'cctest'),
join('test', 'unittests')]
def GetCpplintScript(self, prio_path):
for path in [prio_path] + os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
cpplint = os.path.join(path, "cpplint.py")
if os.path.isfile(cpplint):
return cpplint
return None
def ProcessFiles(self, files, path):
good_files_cache = FileContentsCache('.cpplint-cache')
good_files_cache.Load()
files = good_files_cache.FilterUnchangedFiles(files)
if len(files) == 0:
print 'No changes in files detected. Skipping cpplint check.'
return True
filt = '-,' + ",".join(['+' + n for n in ENABLED_LINT_RULES])
command = [sys.executable, 'cpplint.py', '--filter', filt]
cpplint = self.GetCpplintScript(join(path, "tools"))
if cpplint is None:
print('Could not find cpplint.py. Make sure '
'depot_tools is installed and in the path.')
sys.exit(1)
command = [sys.executable, cpplint, '--filter', filt]
commands = join([command + [file] for file in files])
count = multiprocessing.cpu_count()
pool = multiprocessing.Pool(count)
try:
results = pool.map_async(CppLintWorker, commands).get(999999)
except KeyboardInterrupt:
print "\nCaught KeyboardInterrupt, terminating workers."
sys.exit(1)
for i in range(len(files)):
if results[i] > 0:
good_files_cache.RemoveFile(files[i])
total_errors = sum(results)
print "Total errors found: %d" % total_errors
good_files_cache.Save()
return total_errors == 0
COPYRIGHT_HEADER_PATTERN = re.compile(
r'Copyright [\d-]*20[0-1][0-9] the V8 project authors. All rights reserved.')
class SourceProcessor(SourceFileProcessor):
"""
Check that all files include a copyright notice and no trailing whitespaces.
"""
RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c',
'.status', '.gyp', '.gypi']
# Overwriting the one in the parent class.
def FindFilesIn(self, path):
if os.path.exists(path+'/.git'):
output = subprocess.Popen('git ls-files --full-name',
stdout=PIPE, cwd=path, shell=True)
result = []
for file in output.stdout.read().split():
for dir_part in os.path.dirname(file).replace(os.sep, '/').split('/'):
if self.IgnoreDir(dir_part):
break
else:
if (self.IsRelevant(file) and os.path.exists(file)
and not self.IgnoreFile(file)):
result.append(join(path, file))
if output.wait() == 0:
return result
return super(SourceProcessor, self).FindFilesIn(path)
def IsRelevant(self, name):
for ext in SourceProcessor.RELEVANT_EXTENSIONS:
if name.endswith(ext):
return True
return False
def GetPathsToSearch(self):
return ['.']
def IgnoreDir(self, name):
return (super(SourceProcessor, self).IgnoreDir(name) or
name in ('third_party', 'gyp', 'out', 'obj', 'DerivedSources'))
IGNORE_COPYRIGHTS = ['cpplint.py',
'daemon.py',
'earley-boyer.js',
'raytrace.js',
'crypto.js',
'libraries.cc',
'libraries-empty.cc',
'jsmin.py',
'regexp-pcre.js',
'gnuplot-4.6.3-emscripten.js']
IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js']
def EndOfDeclaration(self, line):
return line == "}" or line == "};"
def StartOfDeclaration(self, line):
return line.find("//") == 0 or \
line.find("/*") == 0 or \
line.find(") {") != -1
def ProcessContents(self, name, contents):
result = True
base = basename(name)
if not base in SourceProcessor.IGNORE_TABS:
if '\t' in contents:
print "%s contains tabs" % name
result = False
if not base in SourceProcessor.IGNORE_COPYRIGHTS:
if not COPYRIGHT_HEADER_PATTERN.search(contents):
print "%s is missing a correct copyright header." % name
result = False
if ' \n' in contents or contents.endswith(' '):
line = 0
lines = []
parts = contents.split(' \n')
if not contents.endswith(' '):
parts.pop()
for part in parts:
line += part.count('\n') + 1
lines.append(str(line))
linenumbers = ', '.join(lines)
if len(lines) > 1:
print "%s has trailing whitespaces in lines %s." % (name, linenumbers)
else:
print "%s has trailing whitespaces in line %s." % (name, linenumbers)
result = False
if not contents.endswith('\n') or contents.endswith('\n\n'):
print "%s does not end with a single new line." % name
result = False
# Check two empty lines between declarations.
if name.endswith(".cc"):
line = 0
lines = []
parts = contents.split('\n')
while line < len(parts) - 2:
if self.EndOfDeclaration(parts[line]):
if self.StartOfDeclaration(parts[line + 1]):
lines.append(str(line + 1))
line += 1
elif parts[line + 1] == "" and \
self.StartOfDeclaration(parts[line + 2]):
lines.append(str(line + 1))
line += 2
line += 1
if len(lines) >= 1:
linenumbers = ', '.join(lines)
if len(lines) > 1:
print "%s does not have two empty lines between declarations " \
"in lines %s." % (name, linenumbers)
else:
print "%s does not have two empty lines between declarations " \
"in line %s." % (name, linenumbers)
result = False
return result
def ProcessFiles(self, files, path):
success = True
violations = 0
for file in files:
try:
handle = open(file)
contents = handle.read()
if not self.ProcessContents(file, contents):
success = False
violations += 1
finally:
handle.close()
print "Total violating files: %s" % violations
return success
def CheckRuntimeVsNativesNameClashes(workspace):
code = subprocess.call(
[sys.executable, join(workspace, "tools", "check-name-clashes.py")])
return code == 0
def CheckExternalReferenceRegistration(workspace):
code = subprocess.call(
[sys.executable, join(workspace, "tools", "external-reference-check.py")])
return code == 0
def GetOptions():
result = optparse.OptionParser()
result.add_option('--no-lint', help="Do not run cpplint", default=False,
action="store_true")
return result
def Main():
workspace = abspath(join(dirname(sys.argv[0]), '..'))
parser = GetOptions()
(options, args) = parser.parse_args()
success = True
print "Running C++ lint check..."
if not options.no_lint:
success = CppLintProcessor().Run(workspace) and success
print "Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check..."
success = SourceProcessor().Run(workspace) and success
success = CheckRuntimeVsNativesNameClashes(workspace) and success
success = CheckExternalReferenceRegistration(workspace) and success
if success:
return 0
else:
return 1
if __name__ == '__main__':
sys.exit(Main())
| agpl-3.0 |
noba3/KoTos | addons/plugin.video.mega/resources/lib/platform_libraries/Linux/arm/Crypto/SelfTest/Random/OSRNG/test_generic.py | 131 | 1746 | # -*- coding: utf-8 -*-
#
# SelfTest/Util/test_generic.py: Self-test for the OSRNG.new() function
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Random.OSRNG"""
__revision__ = "$Id$"
import unittest
class SimpleTest(unittest.TestCase):
def runTest(self):
"""Crypto.Random.OSRNG.new()"""
# Import the OSRNG module and try to use it
import Crypto.Random.OSRNG
randobj = Crypto.Random.OSRNG.new()
x = randobj.read(16)
y = randobj.read(16)
self.assertNotEqual(x, y)
def get_tests(config={}):
return [SimpleTest()]
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| gpl-2.0 |
cyx1231st/nova | nova/api/openstack/compute/legacy_v2/contrib/extended_availability_zone.py | 26 | 2398 | # Copyright 2013 Netease, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Availability Zone Status API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import availability_zones as avail_zone
authorize = extensions.soft_extension_authorizer('compute',
'extended_availability_zone')
class ExtendedAZController(wsgi.Controller):
def _extend_server(self, context, server, instance):
key = "%s:availability_zone" % Extended_availability_zone.alias
az = avail_zone.get_instance_availability_zone(context, instance)
server[key] = az or ''
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
self._extend_server(context, server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
self._extend_server(context, server, db_instance)
class Extended_availability_zone(extensions.ExtensionDescriptor):
"""Extended Availability Zone support."""
name = "ExtendedAvailabilityZone"
alias = "OS-EXT-AZ"
namespace = ("http://docs.openstack.org/compute/ext/"
"extended_availability_zone/api/v2")
updated = "2013-01-30T00:00:00Z"
def get_controller_extensions(self):
controller = ExtendedAZController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| apache-2.0 |
DailyActie/Surrogate-Model | 01-codes/scipy-master/scipy/sparse/linalg/dsolve/setup.py | 1 | 1726 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import glob
import sys
from os.path import join, dirname
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
from scipy._build_utils import get_sgemv_fix
from scipy._build_utils import numpy_nodepr_api
config = Configuration('dsolve', parent_package, top_path)
config.add_data_dir('tests')
lapack_opt = get_info('lapack_opt', notfound_action=2)
if sys.platform == 'win32':
superlu_defs = [('NO_TIMER', 1)]
else:
superlu_defs = []
superlu_defs.append(('USE_VENDOR_BLAS', 1))
superlu_src = join(dirname(__file__), 'SuperLU', 'SRC')
sources = list(glob.glob(join(superlu_src, '*.c')))
headers = list(glob.glob(join(superlu_src, '*.h')))
config.add_library('superlu_src',
sources=sources,
macros=superlu_defs,
include_dirs=[superlu_src],
)
# Extension
ext_sources = ['_superlumodule.c',
'_superlu_utils.c',
'_superluobject.c']
ext_sources += get_sgemv_fix(lapack_opt)
config.add_extension('_superlu',
sources=ext_sources,
libraries=['superlu_src'],
depends=(sources + headers),
extra_info=lapack_opt,
**numpy_nodepr_api
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| mit |
bracket/handsome | setup.py | 2 | 1216 | """
handsome: Rasterizer for 2D and 3D image primitives based off of NumPy
"""
from setuptools import setup, Extension
import os
import re
version_re = re.compile(r"^__version__ = ['\"](?P<version>[^'\"]*)['\"]", re.M)
def find_version(*file_paths):
"""Get version from python file."""
path = os.path.join(os.path.dirname(__file__), *file_paths)
with open(path) as version_file: contents = version_file.read()
m = version_re.search(contents)
if not m: raise RuntimeError("Unable to find version string.")
return m.group('version')
HERE = os.path.abspath(os.path.dirname(__file__))
def fmt_here(string):
return string.format(HERE=HERE)
setup(
name='handsome',
version=find_version('handsome/__init__.py'),
author='Stephen [Bracket] McCray',
author_email='mcbracket@gmail.com',
packages=['handsome'],
classifiers=[
'Development Status :: 4 - Beta'
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.