repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
darron/dd-agent | checks/check_status.py | 22 | 31901 | """
This module contains classes which are used to occasionally persist the status
of checks.
"""
# stdlib
from collections import defaultdict
import cPickle as pickle
import datetime
import logging
import os
import platform
import sys
import tempfile
import time
# 3p
import ntplib
import yaml
# project
import config
from config import _is_affirmative, _windows_commondata_path, get_config
from util import plural
from utils.jmx import JMXFiles
from utils.ntp import get_ntp_args
from utils.pidfile import PidFile
from utils.platform import Platform
from utils.profile import pretty_statistics
STATUS_OK = 'OK'
STATUS_ERROR = 'ERROR'
STATUS_WARNING = 'WARNING'
NTP_OFFSET_THRESHOLD = 60
log = logging.getLogger(__name__)
class Stylizer(object):
STYLES = {
'bold' : 1,
'grey' : 30,
'red' : 31,
'green' : 32,
'yellow' : 33,
'blue' : 34,
'magenta' : 35,
'cyan' : 36,
'white' : 37,
}
HEADER = '\033[1m'
UNDERLINE = '\033[2m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
RESET = '\033[0m'
ENABLED = False
@classmethod
def stylize(cls, text, *styles):
""" stylize the text. """
if not cls.ENABLED:
return text
# don't bother about escaping, not that complicated.
fmt = '\033[%dm%s'
for style in styles or []:
text = fmt % (cls.STYLES[style], text)
return text + fmt % (0, '') # reset
# a small convienence method
def style(*args):
return Stylizer.stylize(*args)
def logger_info():
loggers = []
root_logger = logging.getLogger()
if len(root_logger.handlers) > 0:
for handler in root_logger.handlers:
if isinstance(handler, logging.StreamHandler):
try:
loggers.append(handler.stream.name)
except AttributeError:
loggers.append("unnamed stream")
if isinstance(handler, logging.handlers.SysLogHandler):
if isinstance(handler.address, basestring):
loggers.append('syslog:%s' % handler.address)
else:
loggers.append('syslog:(%s, %s)' % handler.address)
else:
loggers.append("No loggers configured")
return ', '.join(loggers)
def get_ntp_info():
req_args = get_ntp_args()
ntp_offset = ntplib.NTPClient().request(**req_args).offset
if abs(ntp_offset) > NTP_OFFSET_THRESHOLD:
ntp_styles = ['red', 'bold']
else:
ntp_styles = []
return ntp_offset, ntp_styles
class AgentStatus(object):
"""
A small class used to load and save status messages to the filesystem.
"""
NAME = None
def __init__(self):
self.created_at = datetime.datetime.now()
self.created_by_pid = os.getpid()
def has_error(self):
raise NotImplementedError
def persist(self):
try:
path = self._get_pickle_path()
log.debug("Persisting status to %s" % path)
f = open(path, 'w')
try:
pickle.dump(self, f)
finally:
f.close()
except Exception:
log.exception("Error persisting status")
def created_seconds_ago(self):
td = datetime.datetime.now() - self.created_at
return td.seconds
def render(self):
indent = " "
lines = self._header_lines(indent) + [
indent + l for l in self.body_lines()
] + ["", ""]
return "\n".join(lines)
@classmethod
def _title_lines(self):
name_line = "%s (v %s)" % (self.NAME, config.get_version())
lines = [
"=" * len(name_line),
"%s" % name_line,
"=" * len(name_line),
"",
]
return lines
def _header_lines(self, indent):
# Don't indent the header
lines = self._title_lines()
if self.created_seconds_ago() > 120:
styles = ['red','bold']
else:
styles = []
# We color it in red if the status is too old
fields = [
(
style("Status date", *styles),
style("%s (%ss ago)" % (
self.created_at.strftime('%Y-%m-%d %H:%M:%S'),
self.created_seconds_ago()), *styles
)
)
]
fields += [
("Pid", self.created_by_pid),
("Platform", platform.platform()),
("Python Version", platform.python_version()),
("Logs", logger_info()),
]
for key, value in fields:
l = indent + "%s: %s" % (key, value)
lines.append(l)
return lines + [""]
def to_dict(self):
return {
'pid': self.created_by_pid,
'status_date': "%s (%ss ago)" % (
self.created_at.strftime('%Y-%m-%d %H:%M:%S'),
self.created_seconds_ago()
),
}
@classmethod
def _not_running_message(cls):
lines = cls._title_lines() + [
style(" %s is not running." % cls.NAME, 'red'),
style(""" You can get more details in the logs:
%s""" % logger_info(), 'red'),
"",
""
]
return "\n".join(lines)
@classmethod
def remove_latest_status(cls):
log.debug("Removing latest status")
try:
os.remove(cls._get_pickle_path())
except OSError:
pass
@classmethod
def load_latest_status(cls):
try:
f = open(cls._get_pickle_path())
try:
return pickle.load(f)
finally:
f.close()
except IOError:
return None
@classmethod
def print_latest_status(cls, verbose=False):
cls.verbose = verbose
Stylizer.ENABLED = False
try:
if sys.stdout.isatty():
Stylizer.ENABLED = True
except Exception:
# Don't worry if we can't enable the
# stylizer.
pass
message = cls._not_running_message()
exit_code = -1
module_status = cls.load_latest_status()
if module_status:
message = module_status.render()
exit_code = 0
if module_status.has_error():
exit_code = 1
sys.stdout.write(message)
return exit_code
@classmethod
def _get_pickle_path(cls):
if Platform.is_win32():
path = os.path.join(_windows_commondata_path(), 'Datadog')
elif os.path.isdir(PidFile.get_dir()):
path = PidFile.get_dir()
else:
path = tempfile.gettempdir()
return os.path.join(path, cls.__name__ + '.pickle')
class InstanceStatus(object):
def __init__(self, instance_id, status, error=None, tb=None, warnings=None, metric_count=None,
instance_check_stats=None):
self.instance_id = instance_id
self.status = status
if error is not None:
self.error = repr(error)
else:
self.error = None
self.traceback = tb
self.warnings = warnings
self.metric_count = metric_count
self.instance_check_stats = instance_check_stats
def has_error(self):
return self.status == STATUS_ERROR
def has_warnings(self):
return self.status == STATUS_WARNING
class CheckStatus(object):
def __init__(self, check_name, instance_statuses, metric_count=None,
event_count=None, service_check_count=None, service_metadata=[],
init_failed_error=None, init_failed_traceback=None,
library_versions=None, source_type_name=None,
check_stats=None):
self.name = check_name
self.source_type_name = source_type_name
self.instance_statuses = instance_statuses
self.metric_count = metric_count or 0
self.event_count = event_count or 0
self.service_check_count = service_check_count or 0
self.init_failed_error = init_failed_error
self.init_failed_traceback = init_failed_traceback
self.library_versions = library_versions
self.check_stats = check_stats
self.service_metadata = service_metadata
@property
def status(self):
if self.init_failed_error:
return STATUS_ERROR
for instance_status in self.instance_statuses:
if instance_status.status == STATUS_ERROR:
return STATUS_ERROR
return STATUS_OK
def has_error(self):
return self.status == STATUS_ERROR
class EmitterStatus(object):
def __init__(self, name, error=None):
self.name = name
self.error = None
if error:
self.error = repr(error)
@property
def status(self):
if self.error:
return STATUS_ERROR
else:
return STATUS_OK
def has_error(self):
return self.status != STATUS_OK
class CollectorStatus(AgentStatus):
NAME = 'Collector'
def __init__(self, check_statuses=None, emitter_statuses=None, metadata=None):
AgentStatus.__init__(self)
self.check_statuses = check_statuses or []
self.emitter_statuses = emitter_statuses or []
self.host_metadata = metadata or []
@property
def status(self):
for check_status in self.check_statuses:
if check_status.status == STATUS_ERROR:
return STATUS_ERROR
return STATUS_OK
def has_error(self):
return self.status != STATUS_OK
@staticmethod
def check_status_lines(cs):
check_lines = [
' ' + cs.name,
' ' + '-' * len(cs.name)
]
if cs.init_failed_error:
check_lines.append(" - initialize check class [%s]: %s" %
(style(STATUS_ERROR, 'red'),
repr(cs.init_failed_error)))
if cs.init_failed_traceback:
check_lines.extend(' ' + line for line in
cs.init_failed_traceback.split('\n'))
else:
for s in cs.instance_statuses:
c = 'green'
if s.has_warnings():
c = 'yellow'
if s.has_error():
c = 'red'
line = " - instance #%s [%s]" % (
s.instance_id, style(s.status, c))
if s.has_error():
line += u": %s" % s.error
if s.metric_count is not None:
line += " collected %s metrics" % s.metric_count
if s.instance_check_stats is not None:
line += " Last run duration: %s" % s.instance_check_stats.get('run_time')
check_lines.append(line)
if s.has_warnings():
for warning in s.warnings:
warn = warning.split('\n')
if not len(warn):
continue
check_lines.append(u" %s: %s" %
(style("Warning", 'yellow'), warn[0]))
check_lines.extend(u" %s" % l for l in
warn[1:])
if s.traceback is not None:
check_lines.extend(' ' + line for line in
s.traceback.split('\n'))
check_lines += [
" - Collected %s metric%s, %s event%s & %s service check%s" % (
cs.metric_count, plural(cs.metric_count),
cs.event_count, plural(cs.event_count),
cs.service_check_count, plural(cs.service_check_count)),
]
if cs.check_stats is not None:
check_lines += [
" - Stats: %s" % pretty_statistics(cs.check_stats)
]
if cs.library_versions is not None:
check_lines += [
" - Dependencies:"]
for library, version in cs.library_versions.iteritems():
check_lines += [" - %s: %s" % (library, version)]
check_lines += [""]
return check_lines
@staticmethod
def render_check_status(cs):
indent = " "
lines = [
indent + l for l in CollectorStatus.check_status_lines(cs)
] + ["", ""]
return "\n".join(lines)
def body_lines(self):
# Metadata whitelist
metadata_whitelist = [
'hostname',
'fqdn',
'ipv4',
'instance-id'
]
lines = [
'Clocks',
'======',
''
]
try:
ntp_offset, ntp_styles = get_ntp_info()
lines.append(' ' + style('NTP offset', *ntp_styles) + ': ' + style('%s s' % round(ntp_offset, 4), *ntp_styles))
except Exception, e:
lines.append(' NTP offset: Unknown (%s)' % str(e))
lines.append(' System UTC time: ' + datetime.datetime.utcnow().__str__())
lines.append('')
# Paths to checks.d/conf.d
lines += [
'Paths',
'=====',
''
]
osname = config.get_os()
try:
confd_path = config.get_confd_path(osname)
except config.PathNotFound:
confd_path = 'Not found'
try:
checksd_path = config.get_checksd_path(osname)
except config.PathNotFound:
checksd_path = 'Not found'
lines.append(' conf.d: ' + confd_path)
lines.append(' checks.d: ' + checksd_path)
lines.append('')
# Hostnames
lines += [
'Hostnames',
'=========',
''
]
if not self.host_metadata:
lines.append(" No host information available yet.")
else:
for key, host in self.host_metadata.iteritems():
for whitelist_item in metadata_whitelist:
if whitelist_item in key:
lines.append(" " + key + ": " + host)
break
lines.append('')
# Checks.d Status
lines += [
'Checks',
'======',
''
]
check_statuses = self.check_statuses + get_jmx_status()
if not check_statuses:
lines.append(" No checks have run yet.")
else:
for cs in check_statuses:
check_lines = [
' ' + cs.name,
' ' + '-' * len(cs.name)
]
if cs.init_failed_error:
check_lines.append(" - initialize check class [%s]: %s" %
(style(STATUS_ERROR, 'red'),
repr(cs.init_failed_error)))
if self.verbose and cs.init_failed_traceback:
check_lines.extend(' ' + line for line in
cs.init_failed_traceback.split('\n'))
else:
for s in cs.instance_statuses:
c = 'green'
if s.has_warnings():
c = 'yellow'
if s.has_error():
c = 'red'
line = " - instance #%s [%s]" % (
s.instance_id, style(s.status, c))
if s.has_error():
line += u": %s" % s.error
if s.metric_count is not None:
line += " collected %s metrics" % s.metric_count
if s.instance_check_stats is not None:
line += " Last run duration: %s" % s.instance_check_stats.get('run_time')
check_lines.append(line)
if s.has_warnings():
for warning in s.warnings:
warn = warning.split('\n')
if not len(warn):
continue
check_lines.append(u" %s: %s" %
(style("Warning", 'yellow'), warn[0]))
check_lines.extend(u" %s" % l for l in
warn[1:])
if self.verbose and s.traceback is not None:
check_lines.extend(' ' + line for line in
s.traceback.split('\n'))
check_lines += [
" - Collected %s metric%s, %s event%s & %s service check%s" % (
cs.metric_count, plural(cs.metric_count),
cs.event_count, plural(cs.event_count),
cs.service_check_count, plural(cs.service_check_count)),
]
if cs.check_stats is not None:
check_lines += [
" - Stats: %s" % pretty_statistics(cs.check_stats)
]
if cs.library_versions is not None:
check_lines += [
" - Dependencies:"]
for library, version in cs.library_versions.iteritems():
check_lines += [
" - %s: %s" % (library, version)]
check_lines += [""]
lines += check_lines
# Metadata status
metadata_enabled = _is_affirmative(get_config().get('display_service_metadata', False))
if metadata_enabled:
lines += [
"",
"Service metadata",
"================",
""
]
if not check_statuses:
lines.append(" No checks have run yet.")
else:
meta_lines = []
for cs in check_statuses:
# Check title
check_line = [
' ' + cs.name,
' ' + '-' * len(cs.name)
]
instance_lines = []
for i, meta in enumerate(cs.service_metadata):
if not meta:
continue
instance_lines += [" - instance #%s:" % i]
for k, v in meta.iteritems():
instance_lines += [" - %s: %s" % (k, v)]
if instance_lines:
check_line += instance_lines
meta_lines += check_line
if meta_lines:
lines += meta_lines
else:
lines.append(" No metadata were collected.")
# Emitter status
lines += [
"",
"Emitters",
"========",
""
]
if not self.emitter_statuses:
lines.append(" No emitters have run yet.")
else:
for es in self.emitter_statuses:
c = 'green'
if es.has_error():
c = 'red'
line = " - %s [%s]" % (es.name, style(es.status, c))
if es.status != STATUS_OK:
line += ": %s" % es.error
lines.append(line)
return lines
def to_dict(self):
status_info = AgentStatus.to_dict(self)
# Hostnames
status_info['hostnames'] = {}
metadata_whitelist = [
'hostname',
'fqdn',
'ipv4',
'instance-id'
]
if self.host_metadata:
for key, host in self.host_metadata.iteritems():
for whitelist_item in metadata_whitelist:
if whitelist_item in key:
status_info['hostnames'][key] = host
break
# Checks.d Status
status_info['checks'] = {}
check_statuses = self.check_statuses + get_jmx_status()
for cs in check_statuses:
status_info['checks'][cs.name] = {'instances': {}}
if cs.init_failed_error:
status_info['checks'][cs.name]['init_failed'] = True
status_info['checks'][cs.name]['traceback'] = \
cs.init_failed_traceback or cs.init_failed_error
else:
status_info['checks'][cs.name] = {'instances': {}}
status_info['checks'][cs.name]['init_failed'] = False
for s in cs.instance_statuses:
status_info['checks'][cs.name]['instances'][s.instance_id] = {
'status': s.status,
'has_error': s.has_error(),
'has_warnings': s.has_warnings(),
}
if s.has_error():
status_info['checks'][cs.name]['instances'][s.instance_id]['error'] = s.error
if s.has_warnings():
status_info['checks'][cs.name]['instances'][s.instance_id]['warnings'] = s.warnings
status_info['checks'][cs.name]['metric_count'] = cs.metric_count
status_info['checks'][cs.name]['event_count'] = cs.event_count
status_info['checks'][cs.name]['service_check_count'] = cs.service_check_count
# Emitter status
status_info['emitter'] = []
for es in self.emitter_statuses:
check_status = {
'name': es.name,
'status': es.status,
'has_error': es.has_error(),
}
if es.has_error():
check_status['error'] = es.error
status_info['emitter'].append(check_status)
osname = config.get_os()
try:
status_info['confd_path'] = config.get_confd_path(osname)
except config.PathNotFound:
status_info['confd_path'] = 'Not found'
try:
status_info['checksd_path'] = config.get_checksd_path(osname)
except config.PathNotFound:
status_info['checksd_path'] = 'Not found'
# Clocks
try:
ntp_offset, ntp_style = get_ntp_info()
warn_ntp = len(ntp_style) > 0
status_info["ntp_offset"] = round(ntp_offset, 4)
except Exception as e:
ntp_offset = "Unknown (%s)" % str(e)
warn_ntp = True
status_info["ntp_offset"] = ntp_offset
status_info["ntp_warning"] = warn_ntp
status_info["utc_time"] = datetime.datetime.utcnow().__str__()
return status_info
class DogstatsdStatus(AgentStatus):
NAME = 'Dogstatsd'
def __init__(self, flush_count=0, packet_count=0, packets_per_second=0,
metric_count=0, event_count=0):
AgentStatus.__init__(self)
self.flush_count = flush_count
self.packet_count = packet_count
self.packets_per_second = packets_per_second
self.metric_count = metric_count
self.event_count = event_count
def has_error(self):
return self.flush_count == 0 and self.packet_count == 0 and self.metric_count == 0
def body_lines(self):
lines = [
"Flush count: %s" % self.flush_count,
"Packet Count: %s" % self.packet_count,
"Packets per second: %s" % self.packets_per_second,
"Metric count: %s" % self.metric_count,
"Event count: %s" % self.event_count,
]
return lines
def to_dict(self):
status_info = AgentStatus.to_dict(self)
status_info.update({
'flush_count': self.flush_count,
'packet_count': self.packet_count,
'packets_per_second': self.packets_per_second,
'metric_count': self.metric_count,
'event_count': self.event_count,
})
return status_info
class ForwarderStatus(AgentStatus):
NAME = 'Forwarder'
def __init__(self, queue_length=0, queue_size=0, flush_count=0, transactions_received=0,
transactions_flushed=0):
AgentStatus.__init__(self)
self.queue_length = queue_length
self.queue_size = queue_size
self.flush_count = flush_count
self.transactions_received = transactions_received
self.transactions_flushed = transactions_flushed
self.proxy_data = get_config(parse_args=False).get('proxy_settings')
self.hidden_username = None
self.hidden_password = None
if self.proxy_data and self.proxy_data.get('user'):
username = self.proxy_data.get('user')
hidden = len(username) / 2 if len(username) <= 7 else len(username) - 4
self.hidden_username = '*' * 5 + username[hidden:]
self.hidden_password = '*' * 10
def body_lines(self):
lines = [
"Queue Size: %s bytes" % self.queue_size,
"Queue Length: %s" % self.queue_length,
"Flush Count: %s" % self.flush_count,
"Transactions received: %s" % self.transactions_received,
"Transactions flushed: %s" % self.transactions_flushed,
""
]
if self.proxy_data:
lines += [
"Proxy",
"=====",
"",
" Host: %s" % self.proxy_data.get('host'),
" Port: %s" % self.proxy_data.get('port')
]
if self.proxy_data.get('user'):
lines += [
" Username: %s" % self.hidden_username,
" Password: %s" % self.hidden_password
]
return lines
def has_error(self):
return self.flush_count == 0
def to_dict(self):
status_info = AgentStatus.to_dict(self)
status_info.update({
'flush_count': self.flush_count,
'queue_length': self.queue_length,
'queue_size': self.queue_size,
'proxy_data': self.proxy_data,
'hidden_username': self.hidden_username,
'hidden_password': self.hidden_password,
})
return status_info
def get_jmx_instance_status(instance_name, status, message, metric_count):
if status == STATUS_ERROR:
instance_status = InstanceStatus(instance_name, STATUS_ERROR, error=message, metric_count=metric_count)
elif status == STATUS_WARNING:
instance_status = InstanceStatus(instance_name, STATUS_WARNING, warnings=[message], metric_count=metric_count)
elif status == STATUS_OK:
instance_status = InstanceStatus(instance_name, STATUS_OK, metric_count=metric_count)
return instance_status
def get_jmx_status():
"""This function tries to read the 2 jmxfetch status file which are yaml file
located in the temp directory.
There are 2 files:
- One generated by the Agent itself, for jmx checks that can't be initialized because
there are missing stuff.
Its format is as following:
###
invalid_checks:
jmx: !!python/object/apply:jmxfetch.InvalidJMXConfiguration [You need to have at
least one instance defined in the YAML file for this check]
timestamp: 1391040927.136523
###
- One generated by jmxfetch that return information about the collection of metrics
its format is as following:
###
timestamp: 1391037347435
checks:
failed_checks:
jmx:
- {message: Unable to create instance. Please check your yaml file, status: ERROR}
initialized_checks:
tomcat:
- {message: null, status: OK, metric_count: 7, instance_name: jmx-remihakim.fr-3000}
###
"""
check_statuses = []
java_status_path = JMXFiles.get_status_file_path()
python_status_path = JMXFiles.get_python_status_file_path()
if not os.path.exists(java_status_path) and not os.path.exists(python_status_path):
log.debug("There is no jmx_status file at: %s or at: %s" % (java_status_path, python_status_path))
return []
check_data = defaultdict(lambda: defaultdict(list))
try:
if os.path.exists(java_status_path):
java_jmx_stats = yaml.load(file(java_status_path))
status_age = time.time() - java_jmx_stats.get('timestamp')/1000 # JMX timestamp is saved in milliseconds
jmx_checks = java_jmx_stats.get('checks', {})
if status_age > 60:
check_statuses.append(
CheckStatus("jmx", [
InstanceStatus(
0,
STATUS_ERROR,
error="JMXfetch didn't return any metrics during the last minute"
)
])
)
else:
for check_name, instances in jmx_checks.get('failed_checks', {}).iteritems():
for info in instances:
message = info.get('message', None)
metric_count = info.get('metric_count', 0)
service_check_count = info.get('service_check_count', 0)
status = info.get('status')
instance_name = info.get('instance_name', None)
check_data[check_name]['statuses'].append(get_jmx_instance_status(instance_name, status, message, metric_count))
check_data[check_name]['metric_count'].append(metric_count)
check_data[check_name]['service_check_count'].append(service_check_count)
for check_name, instances in jmx_checks.get('initialized_checks', {}).iteritems():
for info in instances:
message = info.get('message', None)
metric_count = info.get('metric_count', 0)
service_check_count = info.get('service_check_count', 0)
status = info.get('status')
instance_name = info.get('instance_name', None)
check_data[check_name]['statuses'].append(get_jmx_instance_status(instance_name, status, message, metric_count))
check_data[check_name]['metric_count'].append(metric_count)
check_data[check_name]['service_check_count'].append(service_check_count)
for check_name, data in check_data.iteritems():
check_status = CheckStatus(check_name, data['statuses'],
metric_count=sum(data['metric_count']),
service_check_count=sum(data['service_check_count']))
check_statuses.append(check_status)
if os.path.exists(python_status_path):
python_jmx_stats = yaml.load(file(python_status_path))
jmx_checks = python_jmx_stats.get('invalid_checks', {})
for check_name, excep in jmx_checks.iteritems():
check_statuses.append(CheckStatus(check_name, [], init_failed_error=excep))
return check_statuses
except Exception:
log.exception("Couldn't load latest jmx status")
return []
| bsd-3-clause |
40223142/CDA0629 | static/Brython3.1.0-20150301-090019/Lib/test/re_tests.py | 879 | 31796 | #!/usr/bin/env python3
# -*- mode: python -*-
# Re test suite and benchmark suite v1.5
# The 3 possible outcomes for each pattern
[SUCCEED, FAIL, SYNTAX_ERROR] = range(3)
# Benchmark suite (needs expansion)
#
# The benchmark suite does not test correctness, just speed. The
# first element of each tuple is the regex pattern; the second is a
# string to match it against. The benchmarking code will embed the
# second string inside several sizes of padding, to test how regex
# matching performs on large strings.
benchmarks = [
# test common prefix
('Python|Perl', 'Perl'), # Alternation
('(Python|Perl)', 'Perl'), # Grouped alternation
('Python|Perl|Tcl', 'Perl'), # Alternation
('(Python|Perl|Tcl)', 'Perl'), # Grouped alternation
('(Python)\\1', 'PythonPython'), # Backreference
('([0a-z][a-z0-9]*,)+', 'a5,b7,c9,'), # Disable the fastmap optimization
('([a-z][a-z0-9]*,)+', 'a5,b7,c9,'), # A few sets
('Python', 'Python'), # Simple text literal
('.*Python', 'Python'), # Bad text literal
('.*Python.*', 'Python'), # Worse text literal
('.*(Python)', 'Python'), # Bad text literal with grouping
]
# Test suite (for verifying correctness)
#
# The test suite is a list of 5- or 3-tuples. The 5 parts of a
# complete tuple are:
# element 0: a string containing the pattern
# 1: the string to match against the pattern
# 2: the expected result (SUCCEED, FAIL, SYNTAX_ERROR)
# 3: a string that will be eval()'ed to produce a test string.
# This is an arbitrary Python expression; the available
# variables are "found" (the whole match), and "g1", "g2", ...
# up to "g99" contain the contents of each group, or the
# string 'None' if the group wasn't given a value, or the
# string 'Error' if the group index was out of range;
# also "groups", the return value of m.group() (a tuple).
# 4: The expected result of evaluating the expression.
# If the two don't match, an error is reported.
#
# If the regex isn't expected to work, the latter two elements can be omitted.
tests = [
# Test ?P< and ?P= extensions
('(?P<foo_123', '', SYNTAX_ERROR), # Unterminated group identifier
('(?P<1>a)', '', SYNTAX_ERROR), # Begins with a digit
('(?P<!>a)', '', SYNTAX_ERROR), # Begins with an illegal char
('(?P<foo!>a)', '', SYNTAX_ERROR), # Begins with an illegal char
# Same tests, for the ?P= form
('(?P<foo_123>a)(?P=foo_123', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=1)', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=!)', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=foo_124', 'aa', SYNTAX_ERROR), # Backref to undefined group
('(?P<foo_123>a)', 'a', SUCCEED, 'g1', 'a'),
('(?P<foo_123>a)(?P=foo_123)', 'aa', SUCCEED, 'g1', 'a'),
# Test octal escapes
('\\1', 'a', SYNTAX_ERROR), # Backreference
('[\\1]', '\1', SUCCEED, 'found', '\1'), # Character
('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
('\\141', 'a', SUCCEED, 'found', 'a'),
('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
# Test \0 is handled everywhere
(r'\0', '\0', SUCCEED, 'found', '\0'),
(r'[\0a]', '\0', SUCCEED, 'found', '\0'),
(r'[a\0]', '\0', SUCCEED, 'found', '\0'),
(r'[^a\0]', '\0', FAIL),
# Test various letter escapes
(r'\a[\b]\f\n\r\t\v', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
(r'[\a][\b][\f][\n][\r][\t][\v]', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
# NOTE: not an error under PCRE/PRE:
# (r'\u', '', SYNTAX_ERROR), # A Perl escape
(r'\c\e\g\h\i\j\k\m\o\p\q\y\z', 'ceghijkmopqyz', SUCCEED, 'found', 'ceghijkmopqyz'),
(r'\xff', '\377', SUCCEED, 'found', chr(255)),
# new \x semantics
(r'\x00ffffffffffffff', '\377', FAIL, 'found', chr(255)),
(r'\x00f', '\017', FAIL, 'found', chr(15)),
(r'\x00fe', '\376', FAIL, 'found', chr(254)),
# (r'\x00ffffffffffffff', '\377', SUCCEED, 'found', chr(255)),
# (r'\x00f', '\017', SUCCEED, 'found', chr(15)),
# (r'\x00fe', '\376', SUCCEED, 'found', chr(254)),
(r"^\w+=(\\[\000-\277]|[^\n\\])*", "SRC=eval.c g.c blah blah blah \\\\\n\tapes.c",
SUCCEED, 'found', "SRC=eval.c g.c blah blah blah \\\\"),
# Test that . only matches \n in DOTALL mode
('a.b', 'acb', SUCCEED, 'found', 'acb'),
('a.b', 'a\nb', FAIL),
('a.*b', 'acc\nccb', FAIL),
('a.{4,5}b', 'acc\nccb', FAIL),
('a.b', 'a\rb', SUCCEED, 'found', 'a\rb'),
('a.b(?s)', 'a\nb', SUCCEED, 'found', 'a\nb'),
('a.*(?s)b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
('(?s)a.{4,5}b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
(')', '', SYNTAX_ERROR), # Unmatched right bracket
('', '', SUCCEED, 'found', ''), # Empty pattern
('abc', 'abc', SUCCEED, 'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED, 'found', 'abc'),
('abc', 'ababc', SUCCEED, 'found', 'abc'),
('ab*c', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED, 'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED, 'found', 'abc'),
('^', 'abc', SUCCEED, 'found+"-"', '-'),
('$', 'abc', SUCCEED, 'found+"-"', '-'),
('a.c', 'abc', SUCCEED, 'found', 'abc'),
('a.c', 'axc', SUCCEED, 'found', 'axc'),
('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[\\-b]', 'a-', SUCCEED, 'found', 'a-'),
# NOTE: not an error under PCRE/PRE:
# ('a[b-]', 'a-', SYNTAX_ERROR),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a\\', '-', SYNTAX_ERROR),
('abc)', '-', SYNTAX_ERROR),
('(abc', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED, 'found', 'a]'),
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[\]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
('\\ba\\b', 'a-', SUCCEED, '"-"', '-'),
('\\ba\\b', '-a', SUCCEED, '"-"', '-'),
('\\ba\\b', '-a-', SUCCEED, '"-"', '-'),
('\\by\\b', 'xy', FAIL),
('\\by\\b', 'yz', FAIL),
('\\by\\b', 'xyz', FAIL),
('x\\b', 'xyz', FAIL),
('x\\B', 'xyz', SUCCEED, '"-"', '-'),
('\\Bz', 'xyz', SUCCEED, '"-"', '-'),
('z\\B', 'xyz', FAIL),
('\\Bx', 'xyz', FAIL),
('\\Ba\\B', 'a-', FAIL, '"-"', '-'),
('\\Ba\\B', '-a', FAIL, '"-"', '-'),
('\\Ba\\B', '-a-', FAIL, '"-"', '-'),
('\\By\\B', 'xy', FAIL),
('\\By\\B', 'yz', FAIL),
('\\By\\b', 'xy', SUCCEED, '"-"', '-'),
('\\by\\B', 'yz', SUCCEED, '"-"', '-'),
('\\By\\B', 'xyz', SUCCEED, '"-"', '-'),
('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
('$b', 'b', FAIL),
('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
(')(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED, 'found', ''),
('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
('^(ab|cd)e', 'abcde', FAIL, 'xg1y', 'xy'),
('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
('(a+).\\1$', 'aaaaa', SUCCEED, 'found+"-"+g1', 'aaaaa-aa'),
('^(a+).\\1$', 'aaaa', FAIL),
('(abc)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('([a-c]+)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('(a)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a+)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a+)+\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a).+\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
('(a)ba*\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
('(aa|a)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('(a|aa)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('(a+)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('([abc]*)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('(a)(b)c|ab', 'ab', SUCCEED, 'found+"-"+g1+"-"+g2', 'ab-None-None'),
('(a)+x', 'aaax', SUCCEED, 'found+"-"+g1', 'aaax-a'),
('([ac])+x', 'aacx', SUCCEED, 'found+"-"+g1', 'aacx-c'),
('([^/]*/)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', SUCCEED, 'found+"-"+g1', 'd:msgs/tdir/sub1/-tdir/'),
('([^.]*)\\.([^:]*):[T ]+(.*)', 'track1.title:TBlah blah blah', SUCCEED, 'found+"-"+g1+"-"+g2+"-"+g3', 'track1.title:TBlah blah blah-track1-title-Blah blah blah'),
('([^N]*N)+', 'abNNxyzN', SUCCEED, 'found+"-"+g1', 'abNNxyzN-xyzN'),
('([^N]*N)+', 'abNNxyz', SUCCEED, 'found+"-"+g1', 'abNN-N'),
('([abc]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'abcx-abc'),
('([abc]*)x', 'abc', FAIL),
('([xyz]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'x-'),
('(a)+b|aac', 'aac', SUCCEED, 'found+"-"+g1', 'aac-None'),
# Test symbolic groups
('(?P<i d>aaa)a', 'aaaa', SYNTAX_ERROR),
('(?P<id>aaa)a', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aaa'),
('(?P<id>aa)(?P=id)', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aa'),
('(?P<id>aa)(?P=xd)', 'aaaa', SYNTAX_ERROR),
# Test octal escapes/memory references
('\\1', 'a', SYNTAX_ERROR),
('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
('\\141', 'a', SUCCEED, 'found', 'a'),
('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
# All tests from Perl
('abc', 'abc', SUCCEED, 'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED, 'found', 'abc'),
('abc', 'ababc', SUCCEED, 'found', 'abc'),
('ab*c', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{0,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab{1,}bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{1,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{1,3}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{3,4}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{4,5}bc', 'abbbbc', FAIL),
('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
('ab{0,1}bc', 'abc', SUCCEED, 'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED, 'found', 'abc'),
('ab{0,1}c', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED, 'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED, 'found', 'abc'),
('^', 'abc', SUCCEED, 'found', ''),
('$', 'abc', SUCCEED, 'found', ''),
('a.c', 'abc', SUCCEED, 'found', 'abc'),
('a.c', 'axc', SUCCEED, 'found', 'axc'),
('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-a]', '-', SYNTAX_ERROR),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED, 'found', 'a]'),
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
('*a', '-', SYNTAX_ERROR),
('(*)b', '-', SYNTAX_ERROR),
('$b', 'b', FAIL),
('a\\', '-', SYNTAX_ERROR),
('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
('abc)', '-', SYNTAX_ERROR),
('(abc', '-', SYNTAX_ERROR),
('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
('a{1,}b{1,}c', 'aabbabc', SUCCEED, 'found', 'abc'),
('a**', '-', SYNTAX_ERROR),
('a.+?c', 'abcabc', SUCCEED, 'found', 'abc'),
('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b){0,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b){1,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
('(a+|b){0,1}', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
(')(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED, 'found', ''),
('([abc])*d', 'abbbcd', SUCCEED, 'found+"-"+g1', 'abbbcd-c'),
('([abc])*bcd', 'abcd', SUCCEED, 'found+"-"+g1', 'abcd-a'),
('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
('^(ab|cd)e', 'abcde', FAIL),
('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('((((((((((a))))))))))', 'a', SUCCEED, 'g10', 'a'),
('((((((((((a))))))))))\\10', 'aa', SUCCEED, 'found', 'aa'),
# Python does not have the same rules for \\41 so this is a syntax error
# ('((((((((((a))))))))))\\41', 'aa', FAIL),
# ('((((((((((a))))))))))\\41', 'a!', SUCCEED, 'found', 'a!'),
('((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
('(?i)((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('(?i)abc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)abc', 'XBC', FAIL),
('(?i)abc', 'AXC', FAIL),
('(?i)abc', 'ABX', FAIL),
('(?i)abc', 'XABCY', SUCCEED, 'found', 'ABC'),
('(?i)abc', 'ABABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab*?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{0,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab+?bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab+bc', 'ABC', FAIL),
('(?i)ab+bc', 'ABQ', FAIL),
('(?i)ab{1,}bc', 'ABQ', FAIL),
('(?i)ab+bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{1,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{1,3}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{3,4}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{4,5}?bc', 'ABBBBC', FAIL),
('(?i)ab??bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab??bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab{0,1}?bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab??bc', 'ABBBBC', FAIL),
('(?i)ab??c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab{0,1}?c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'ABCC', FAIL),
('(?i)^abc', 'ABCC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'AABC', FAIL),
('(?i)abc$', 'AABC', SUCCEED, 'found', 'ABC'),
('(?i)^', 'ABC', SUCCEED, 'found', ''),
('(?i)$', 'ABC', SUCCEED, 'found', ''),
('(?i)a.c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)a.c', 'AXC', SUCCEED, 'found', 'AXC'),
('(?i)a.*?c', 'AXYZC', SUCCEED, 'found', 'AXYZC'),
('(?i)a.*c', 'AXYZD', FAIL),
('(?i)a[bc]d', 'ABC', FAIL),
('(?i)a[bc]d', 'ABD', SUCCEED, 'found', 'ABD'),
('(?i)a[b-d]e', 'ABD', FAIL),
('(?i)a[b-d]e', 'ACE', SUCCEED, 'found', 'ACE'),
('(?i)a[b-d]', 'AAC', SUCCEED, 'found', 'AC'),
('(?i)a[-b]', 'A-', SUCCEED, 'found', 'A-'),
('(?i)a[b-]', 'A-', SUCCEED, 'found', 'A-'),
('(?i)a[b-a]', '-', SYNTAX_ERROR),
('(?i)a[]b', '-', SYNTAX_ERROR),
('(?i)a[', '-', SYNTAX_ERROR),
('(?i)a]', 'A]', SUCCEED, 'found', 'A]'),
('(?i)a[]]b', 'A]B', SUCCEED, 'found', 'A]B'),
('(?i)a[^bc]d', 'AED', SUCCEED, 'found', 'AED'),
('(?i)a[^bc]d', 'ABD', FAIL),
('(?i)a[^-b]c', 'ADC', SUCCEED, 'found', 'ADC'),
('(?i)a[^-b]c', 'A-C', FAIL),
('(?i)a[^]b]c', 'A]C', FAIL),
('(?i)a[^]b]c', 'ADC', SUCCEED, 'found', 'ADC'),
('(?i)ab|cd', 'ABC', SUCCEED, 'found', 'AB'),
('(?i)ab|cd', 'ABCD', SUCCEED, 'found', 'AB'),
('(?i)()ef', 'DEF', SUCCEED, 'found+"-"+g1', 'EF-'),
('(?i)*a', '-', SYNTAX_ERROR),
('(?i)(*)b', '-', SYNTAX_ERROR),
('(?i)$b', 'B', FAIL),
('(?i)a\\', '-', SYNTAX_ERROR),
('(?i)a\\(b', 'A(B', SUCCEED, 'found+"-"+g1', 'A(B-Error'),
('(?i)a\\(*b', 'AB', SUCCEED, 'found', 'AB'),
('(?i)a\\(*b', 'A((B', SUCCEED, 'found', 'A((B'),
('(?i)a\\\\b', 'A\\B', SUCCEED, 'found', 'A\\B'),
('(?i)abc)', '-', SYNTAX_ERROR),
('(?i)(abc', '-', SYNTAX_ERROR),
('(?i)((a))', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'A-A-A'),
('(?i)(a)b(c)', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABC-A-C'),
('(?i)a+b+c', 'AABBABC', SUCCEED, 'found', 'ABC'),
('(?i)a{1,}b{1,}c', 'AABBABC', SUCCEED, 'found', 'ABC'),
('(?i)a**', '-', SYNTAX_ERROR),
('(?i)a.+?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)a.*?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)a.{0,5}?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)(a+|b)*', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b){0,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b)+', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b){1,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b)?', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
('(?i)(a+|b){0,1}', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
('(?i)(a+|b){0,1}?', 'AB', SUCCEED, 'found+"-"+g1', '-None'),
('(?i))(', '-', SYNTAX_ERROR),
('(?i)[^ab]*', 'CDE', SUCCEED, 'found', 'CDE'),
('(?i)abc', '', FAIL),
('(?i)a*', '', SUCCEED, 'found', ''),
('(?i)([abc])*d', 'ABBBCD', SUCCEED, 'found+"-"+g1', 'ABBBCD-C'),
('(?i)([abc])*bcd', 'ABCD', SUCCEED, 'found+"-"+g1', 'ABCD-A'),
('(?i)a|b|c|d|e', 'E', SUCCEED, 'found', 'E'),
('(?i)(a|b|c|d|e)f', 'EF', SUCCEED, 'found+"-"+g1', 'EF-E'),
('(?i)abcd*efg', 'ABCDEFG', SUCCEED, 'found', 'ABCDEFG'),
('(?i)ab*', 'XABYABBBZ', SUCCEED, 'found', 'AB'),
('(?i)ab*', 'XAYABBBZ', SUCCEED, 'found', 'A'),
('(?i)(ab|cd)e', 'ABCDE', SUCCEED, 'found+"-"+g1', 'CDE-CD'),
('(?i)[abhgefdc]ij', 'HIJ', SUCCEED, 'found', 'HIJ'),
('(?i)^(ab|cd)e', 'ABCDE', FAIL),
('(?i)(abc|)ef', 'ABCDEF', SUCCEED, 'found+"-"+g1', 'EF-'),
('(?i)(a|b)c*d', 'ABCD', SUCCEED, 'found+"-"+g1', 'BCD-B'),
('(?i)(ab|ab*)bc', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-A'),
('(?i)a([bc]*)c*', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-BC'),
('(?i)a([bc]*)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
('(?i)a([bc]+)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
('(?i)a([bc]*)(c+d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-B-CD'),
('(?i)a[bcd]*dcdcde', 'ADCDCDE', SUCCEED, 'found', 'ADCDCDE'),
('(?i)a[bcd]+dcdcde', 'ADCDCDE', FAIL),
('(?i)(ab|a)b*c', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-AB'),
('(?i)((a)(b)c)(d)', 'ABCD', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'ABC-A-B-D'),
('(?i)[a-zA-Z_][a-zA-Z0-9_]*', 'ALPHA', SUCCEED, 'found', 'ALPHA'),
('(?i)^a(bc+|b[eh])g|.h$', 'ABH', SUCCEED, 'found+"-"+g1', 'BH-None'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'IJ', SUCCEED, 'found+"-"+g1+"-"+g2', 'IJ-IJ-J'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFG', FAIL),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'BCDD', FAIL),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'REFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
('(?i)((((((((((a))))))))))', 'A', SUCCEED, 'g10', 'A'),
('(?i)((((((((((a))))))))))\\10', 'AA', SUCCEED, 'found', 'AA'),
#('(?i)((((((((((a))))))))))\\41', 'AA', FAIL),
#('(?i)((((((((((a))))))))))\\41', 'A!', SUCCEED, 'found', 'A!'),
('(?i)(((((((((a)))))))))', 'A', SUCCEED, 'found', 'A'),
('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a))))))))))', 'A', SUCCEED, 'g1', 'A'),
('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a|b|c))))))))))', 'C', SUCCEED, 'g1', 'C'),
('(?i)multiple words of text', 'UH-UH', FAIL),
('(?i)multiple words', 'MULTIPLE WORDS, YEAH', SUCCEED, 'found', 'MULTIPLE WORDS'),
('(?i)(.*)c(.*)', 'ABCDE', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCDE-AB-DE'),
('(?i)\\((.*), (.*)\\)', '(A, B)', SUCCEED, 'g2+"-"+g1', 'B-A'),
('(?i)[k]', 'AB', FAIL),
# ('(?i)abcd', 'ABCD', SUCCEED, 'found+"-"+\\found+"-"+\\\\found', 'ABCD-$&-\\ABCD'),
# ('(?i)a(bc)d', 'ABCD', SUCCEED, 'g1+"-"+\\g1+"-"+\\\\g1', 'BC-$1-\\BC'),
('(?i)a[-]?c', 'AC', SUCCEED, 'found', 'AC'),
('(?i)(abc)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
('(?i)([a-c]*)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
('a(?!b).', 'abad', SUCCEED, 'found', 'ad'),
('a(?=d).', 'abad', SUCCEED, 'found', 'ad'),
('a(?=c|d).', 'abad', SUCCEED, 'found', 'ad'),
('a(?:b|c|d)(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|c|d)*(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|c|d)+?(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|(c|e){1,2}?|d)+?(.)', 'ace', SUCCEED, 'g1 + g2', 'ce'),
('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
# lookbehind: split by : but not if it is escaped by -.
('(?<!-):(.*?)(?<!-):', 'a:bc-:de:f', SUCCEED, 'g1', 'bc-:de' ),
# escaping with \ as we know it
('(?<!\\\):(.*?)(?<!\\\):', 'a:bc\\:de:f', SUCCEED, 'g1', 'bc\\:de' ),
# terminating with ' and escaping with ? as in edifact
("(?<!\\?)'(.*?)(?<!\\?)'", "a'bc?'de'f", SUCCEED, 'g1', "bc?'de" ),
# Comments using the (?#...) syntax
('w(?# comment', 'w', SYNTAX_ERROR),
('w(?# comment 1)xy(?# comment 2)z', 'wxyz', SUCCEED, 'found', 'wxyz'),
# Check odd placement of embedded pattern modifiers
# not an error under PCRE/PRE:
('w(?i)', 'W', SUCCEED, 'found', 'W'),
# ('w(?i)', 'W', SYNTAX_ERROR),
# Comments using the x embedded pattern modifier
("""(?x)w# comment 1
x y
# comment 2
z""", 'wxyz', SUCCEED, 'found', 'wxyz'),
# using the m embedded pattern modifier
('^abc', """jkl
abc
xyz""", FAIL),
('(?m)^abc', """jkl
abc
xyz""", SUCCEED, 'found', 'abc'),
('(?m)abc$', """jkl
xyzabc
123""", SUCCEED, 'found', 'abc'),
# using the s embedded pattern modifier
('a.b', 'a\nb', FAIL),
('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
# test \w, etc. both inside and outside character classes
('\\w+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
('[\\w]+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
('\\D+', '1234abc5678', SUCCEED, 'found', 'abc'),
('[\\D]+', '1234abc5678', SUCCEED, 'found', 'abc'),
('[\\da-fA-F]+', '123abc', SUCCEED, 'found', '123abc'),
# not an error under PCRE/PRE:
# ('[\\d-x]', '-', SYNTAX_ERROR),
(r'([\s]*)([\S]*)([\s]*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
(r'(\s*)(\S*)(\s*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
(r'\xff', '\377', SUCCEED, 'found', chr(255)),
# new \x semantics
(r'\x00ff', '\377', FAIL),
# (r'\x00ff', '\377', SUCCEED, 'found', chr(255)),
(r'\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
('\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
(r'\t\n\v\r\f\a', '\t\n\v\r\f\a', SUCCEED, 'found', chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)),
(r'[\t][\n][\v][\r][\f][\b]', '\t\n\v\r\f\b', SUCCEED, 'found', '\t\n\v\r\f\b'),
#
# post-1.5.2 additions
# xmllib problem
(r'(([a-z]+):)?([a-z]+)$', 'smil', SUCCEED, 'g1+"-"+g2+"-"+g3', 'None-None-smil'),
# bug 110866: reference to undefined group
(r'((.)\1+)', '', SYNTAX_ERROR),
# bug 111869: search (PRE/PCRE fails on this one, SRE doesn't)
(r'.*d', 'abc\nabd', SUCCEED, 'found', 'abd'),
# bug 112468: various expected syntax errors
(r'(', '', SYNTAX_ERROR),
(r'[\41]', '!', SUCCEED, 'found', '!'),
# bug 114033: nothing to repeat
(r'(x?)?', 'x', SUCCEED, 'found', 'x'),
# bug 115040: rescan if flags are modified inside pattern
(r' (?x)foo ', 'foo', SUCCEED, 'found', 'foo'),
# bug 115618: negative lookahead
(r'(?<!abc)(d.f)', 'abcdefdof', SUCCEED, 'found', 'dof'),
# bug 116251: character class bug
(r'[\w-]+', 'laser_beam', SUCCEED, 'found', 'laser_beam'),
# bug 123769+127259: non-greedy backtracking bug
(r'.*?\S *:', 'xx:', SUCCEED, 'found', 'xx:'),
(r'a[ ]*?\ (\d+).*', 'a 10', SUCCEED, 'found', 'a 10'),
(r'a[ ]*?\ (\d+).*', 'a 10', SUCCEED, 'found', 'a 10'),
# bug 127259: \Z shouldn't depend on multiline mode
(r'(?ms).*?x\s*\Z(.*)','xx\nx\n', SUCCEED, 'g1', ''),
# bug 128899: uppercase literals under the ignorecase flag
(r'(?i)M+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)m+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)[M]+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)[m]+', 'MMM', SUCCEED, 'found', 'MMM'),
# bug 130748: ^* should be an error (nothing to repeat)
(r'^*', '', SYNTAX_ERROR),
# bug 133283: minimizing repeat problem
(r'"(?:\\"|[^"])*?"', r'"\""', SUCCEED, 'found', r'"\""'),
# bug 477728: minimizing repeat problem
(r'^.*?$', 'one\ntwo\nthree\n', FAIL),
# bug 483789: minimizing repeat problem
(r'a[^>]*?b', 'a>b', FAIL),
# bug 490573: minimizing repeat problem
(r'^a*?$', 'foo', FAIL),
# bug 470582: nested groups problem
(r'^((a)c)?(ab)$', 'ab', SUCCEED, 'g1+"-"+g2+"-"+g3', 'None-None-ab'),
# another minimizing repeat problem (capturing groups in assertions)
('^([ab]*?)(?=(b)?)c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'),
('^([ab]*?)(?!(b))c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'),
('^([ab]*?)(?<!(a))c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'),
]
u = '\N{LATIN CAPITAL LETTER A WITH DIAERESIS}'
tests.extend([
# bug 410271: \b broken under locales
(r'\b.\b', 'a', SUCCEED, 'found', 'a'),
(r'(?u)\b.\b', u, SUCCEED, 'found', u),
(r'(?u)\w', u, SUCCEED, 'found', u),
])
| gpl-3.0 |
mrquim/repository.mrquim | repo/service.xbmc.versioncheck/lib/versions.py | 48 | 6995 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Team-XBMC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from lib.common import log
def compare_version(version_installed, versionlist):
# Create separate version lists
versionlist_stable = versionlist['releases']['stable']
versionlist_rc = versionlist['releases']['releasecandidate']
versionlist_beta = versionlist['releases']['beta']
versionlist_alpha = versionlist['releases']['alpha']
versionlist_prealpha = versionlist['releases']['prealpha']
log('Version installed %s' %version_installed)
### Check to upgrade to newest available stable version
# check on smaller major version. Smaller version than available always notify
oldversion = False
version_available = ''
# check if installed major version is smaller than available major stable
# here we don't care if running non stable
if version_installed['major'] < int(versionlist_stable[0]['major']):
version_available = versionlist_stable[0]
oldversion = 'stable'
log('Version available %s' %versionlist_stable[0])
log('You are running an older version')
# check if installed major version is equal than available major stable
# however also check on minor version and still don't care about non stable
elif version_installed['major'] == int(versionlist_stable[0]['major']):
if version_installed['minor'] < int(versionlist_stable[0]['minor']):
version_available = versionlist_stable[0]
oldversion = 'stable'
log('Version available %s' %versionlist_stable[0])
log('You are running an older minor version')
# check for <= minor !stable
elif version_installed['tag'] != 'stable' and version_installed['minor'] <= int(versionlist_stable[0]['minor']):
version_available = versionlist_stable[0]
oldversion = True
log('Version available %s' %versionlist_stable[0])
log('You are running an older non stable minor version')
else:
log('Version available %s' %versionlist_stable[0])
log('There is no newer stable available')
# Already skipped a possible newer stable build. Let's continue with non stable builds.
# Check also 'oldversion' hasn't been set to 'stable' or true by previous checks because if so,
# those part need to be skipped
#check for RC builds
if not oldversion and version_installed['tag'] in ['releasecandidate']:
# check if you are using a RC build lower than current available RC
# then check if you are using a beta/alpha lower than current available RC
# 14.0rc3 is newer than: 14.0rc1, 14.0b9, 14.0a15
if version_installed['major'] <= int(versionlist_rc[0]['major']):
if version_installed['minor'] <= int(versionlist_rc[0]['minor']):
if version_installed.get('tagversion','') < versionlist_rc[0]['tagversion']:
version_available = versionlist_rc[0]
oldversion = True
log('Version available %s' %versionlist_rc[0])
log('You are running an older RC version')
# now check if installed !=rc
elif not oldversion and version_installed['tag'] in ['beta','alpha','prealpha']:
if version_installed['major'] <= int(versionlist_rc[0]['major']):
if version_installed['minor'] <= int(versionlist_beta[0]['minor']):
version_available = versionlist_rc[0]
oldversion = True
log('Version available %s' %versionlist_rc[0])
log('You are running an older non RC version')
#check for beta builds
if not oldversion and version_installed['tag'] == 'beta':
# check if you are using a RC build lower than current available RC
# then check if you are using a beta/alpha lower than current available RC
# 14.0b3 is newer than: 14.0b1, 14.0a15
if version_installed['major'] <= int(versionlist_beta[0]['major']):
if version_installed['minor'] <= int(versionlist_beta[0]['minor']):
if version_installed.get('tagversion','') < versionlist_beta[0]['tagversion']:
version_available = versionlist_beta[0]
oldversion = True
log('Version available %s' %versionlist_beta[0])
log('You are running an older beta version')
# now check if installed !=beta
elif not oldversion and version_installed['tag'] in ['alpha','prealpha']:
if version_installed['major'] <= int(versionlist_beta[0]['major']):
if version_installed['minor'] <= int(versionlist_beta[0]['minor']):
version_available = versionlist_beta[0]
oldversion = True
log('Version available %s' %versionlist_beta[0])
log('You are running an older non beta version')
#check for alpha builds and older
if not oldversion and version_installed['tag'] == 'alpha':
# check if you are using a RC build lower than current available RC
# then check if you are using a beta/alpha lower than current available RC
# 14.0a3 is newer than: 14.0a1 or pre-alpha
if version_installed['major'] <= int(versionlist_alpha[0]['major']):
if version_installed['minor'] <= int(versionlist_alpha[0]['minor']):
if version_installed.get('tagversion','') < versionlist_alpha[0]['tagversion']:
version_available = versionlist_alpha[0]
oldversion = True
log('Version available %s' %versionlist_alpha[0])
log('You are running an older alpha version')
# now check if installed !=alpha
elif not oldversion and version_installed['tag'] in ['prealpha']:
if version_installed['major'] <= int(versionlist_alpha[0]['major']):
if version_installed['minor'] <= int(versionlist_alpha[0]['minor']):
version_available = versionlist_alpha[0]
oldversion = True
log('Version available %s' %versionlist_alpha[0])
log('You are running an older non alpha version')
version_stable = versionlist_stable[0]
return oldversion, version_installed, version_available, version_stable | gpl-2.0 |
edermartioli/mcal | spectralclass.py | 1 | 12874 | # -*- coding: utf-8 -*-
"""
Spectral Classes
---------------------------
Created on January 31 2017
@author: Eder Martioli
Laboratorio Nacional de Astrofisica, Brazil
"""
import os
import numpy as np
from astropy.io import fits
import mcallib
from scipy import constants
import gzip
########## SPECTRUM CLASS ############
class Spectrum :
'Common base class for a spectrum'
def __init__(self, Filename, FFT=False):
"""
Create a Spectrum object.
Parameters
----------
filename : string
File to read the spectrum from.
Examples
--------
>>> spc = Spectrum("spectrumfile.spc.gz")
>>> spc = Spectrum("spectrumfile.fits")
>>> spc = Spectrum("spectrumfile.txt")
"""
self.sourceRV = 0.0
self.sourceSNR = 0.0
self.filepath = Filename
basename = mcallib.getbasename(self.filepath)
self.eqw_output = basename[1] + '/' + basename[0] + '_ew.npy'
self.id = basename[0]
self.filename = os.path.basename(self.filepath)
try :
if self.filepath.endswith(".fits") :
self.wl,self.flux=self.loadSpectrumFromFITS(self.filepath)
elif self.filepath.endswith(".spc.gz") :
self.wl,self.flux=self.loadSpectrumFromSPC(self.filepath)
elif self.filepath.endswith(".txt") :
self.wl,self.flux=self.loadSpectrumFromTXT(self.filepath)
elif self.filepath.endswith("iu.s.gz") or self.filepath.endswith("iu.s") or \
self.filepath.endswith("in.s.gz") or self.filepath.endswith("in.s") or \
self.filepath.endswith("pu.s.gz") or self.filepath.endswith("pu.s") or \
self.filepath.endswith("pn.s.gz") or self.filepath.endswith("pn.s") :
self.wl,self.flux=self.loadSpectrumFromLEfile(self.filepath)
else :
print "Error: file type not supported for input spectrum: ",self.filepath
exit()
except :
print "Error: could not open file: ",self.filepath
exit()
if FFT :
# Throw away last point, so it doesn't hang (at least not on test data)
self.wl = self.wl[0:-2]; self.flux = self.flux[0:-2]
self.flux = mcallib.fft_filter(self.flux)
#--- Function to load spectrum from .spc.gz (OPERA) file
def loadSpectrumFromSPC(self,spcfilename):
wl,rvcorr,xcorr,rawflux,rawvar,normflux,calflux = np.loadtxt(spcfilename, unpack=True, comments='#', skiprows=11, usecols=(4,6,7,8,9,10,11), delimiter=' ')
threshold = 10.0
snr = rawflux / np.sqrt(rawvar)
mask = np.where(snr > threshold)
indices = (wl[mask]).argsort()
sortedwl = (wl[mask])[indices]
#sortedrvcorrwl = (wl[mask] + rvcorr[mask])[indices]
#sortednormflux = (normflux[mask])[indices]
sortedcalflux = (calflux[mask])[indices]
cleanwl = np.linspace(np.min(sortedwl), np.max(sortedwl), len(sortedwl))
#cleannormflux = np.interp(cleanwl, sortedwl, sortednormflux)
#cleanrvcorrnormflux = np.interp(cleanwl, sortedrvcorrwl, sortednormflux)
cleancalflux = np.interp(cleanwl, sortedwl, sortedcalflux)
cleanwl = cleanwl * 10.0
return cleanwl, cleancalflux
#------------
#--- Function to load spectrum from .fits file
def loadSpectrumFromFITS(self,fitsfilename):
wl,flux = [],[]
hdu = fits.open(fitsfilename)
try :
if hdu[0].header['INSTRUME'] == 'ESPaDOnS' :
self.instrument = 'ESPaDOnS'
self.object = hdu[0].header['OBJECT']
odonumber = self.id[0:-1]
self.sourceRV,self.sourceSNR = mcallib.getSourceRadialVelocity(odonumber=odonumber,targetName=self.object)
if hdu[0].header['INSTMODE'] == 'Polarimetry, R=65,000' :
# data[0],data[1] for normalized spectrum
# data[6],data[7] for unnormalized spectrum
wltmp = hdu[0].data[0]*(1.0 - self.sourceRV*1000.0/constants.c)
indices = wltmp.argsort()
wl = 10.0*wltmp[indices]
flux = (hdu[0].data[1])[indices]
elif hdu[0].header['INSTMODE'] == 'Spectroscopy, star+sky, R=65,000':
# data[0],data[1] for normalized spectrum
# data[7],data[8] for unnormalized spectrum
wltmp = hdu[0].data[0]*(1.0 - self.sourceRV*1000.0/constants.c)
indices = wltmp.argsort()
wl = 10.0*wltmp[indices]
flux = (hdu[0].data[1])[indices]
elif hdu[0].header['INSTRUME'] == 'NARVAL-POL' :
self.instrument = 'NARVAL'
self.object = hdu[0].header['OBJECT']
odonumber = self.id[0:-1]
self.sourceRV,self.sourceSNR = mcallib.getSourceRadialVelocity(odonumber=odonumber,targetName=self.object)
wltmp = hdu[0].data[0]*(1.0 - self.sourceRV*1000.0/constants.c)
indices = wltmp.argsort()
wl = 10.0*wltmp[indices]
flux = (hdu[0].data[1])[indices]
except :
self.object = (self.id).rsplit('_')[0]
self.instrument = 'HARPS'
length = hdu[0].header['NAXIS1']
start = hdu[0].header['CRVAL1']
step = hdu[0].header['CDELT1']
wl = np.linspace(start, start+length*step, length)
flux = hdu[0].data
return wl, flux
#------------
#--- Function to load spectrum from .txt file
def loadSpectrumFromTXT(self,txtfilename):
x,y = np.loadtxt(txtfilename, unpack=True, comments='#',usecols=(0,1),skiprows=0, delimiter=' ')
return x,y
#------------
#--- Function to load spectrum from LE file
def loadSpectrumFromLEfile(self,filename):
self.instrument = 'ESPaDOnS'
self.sourceRV = 0.0
wl,flux = [],[]
if filename.endswith('.gz'):
file_obj = gzip.open(filename, 'r')
else:
file_obj = open(filename, 'r')
if(os.path.exists(filename)) :
nl = 0
for line in file_obj :
if nl == 0:
cols = line.split("'")
self.object = cols[1].replace(" ", "")
odonumber = self.id[0:-2]
self.sourceRV,self.sourceSNR = mcallib.getSourceRadialVelocity(odonumber=odonumber,targetName=self.object)
elif nl > 1:
line = line.replace(" ", " ")
cols = line.split(" ")
wl.append(float(cols[1]))
flux.append(float(cols[2]))
nl += 1
wl = 10.0*np.array(wl)
wl = wl*(1.0 - self.sourceRV*1000.0/constants.c)
flux = np.array(flux)
indices = wl.argsort()
return wl[indices],flux[indices]
#------------
#--- Function to load/calculate Equivalent Widths
def equivalentWidths(self, inputlinelist='lines.rdb', output=True, override=False, verbose=False) :
try :
if os.path.exists(self.eqw_output) and override is False :
if verbose: print "Loading EWs from existing file: ",self.eqw_output
self.eqwidths = np.load(self.eqw_output)
else :
if verbose: print "Calculating EWs from spectrum using list of lines: ",inputlinelist
self.eqwidths = mcallib.measureEquivalentWidths(self.wl, self.flux, inputlinelist, self.eqw_output)
# Below it calculates the median equivalent width
median_eqwidth = np.median(np.nan_to_num(self.eqwidths))
# Below it replaces all NaNs by the median EW
self.eqwidths[np.where(np.isnan(self.eqwidths))] = median_eqwidth
except :
print "Error: could not calculate Eq Widths. Input line list:",inputlinelist
exit()
#------------
#--- Function to calculate Teff and [Fe/H]
def calculateTeffAndFeH(self, calibmatrix, verbose=False) :
try :
if verbose: print "Calculating Teff and [Fe/H] using calibration: ", calibmatrix
(self.FeH,self.eFeH),(self.Teff,self.eTeff) = mcallib.mcal(self.eqwidths, calibmatrix)
except:
print "Error: could not calculate Teff and [Fe/H]. Calibration matrix:",calibmatrix
exit()
#------------
#--- Resample spectrum
def resampling(self, wlsampling, wl0, wlf) :
npoints = int((wlf-wl0)/wlsampling)
wl_new = np.linspace(wl0, wlf, npoints)
flux_new = np.interp(wl_new, self.wl, self.flux)
self.wl = wl_new
self.flux = flux_new
#------------
#--- bin spectrum
def binning(self, rvsampling_kps, wl0=0.0, wlf=0.0, median=False) :
if wl0 == 0.0:
wl0 = self.wl[0]
if wlf == 0.0:
wlf = self.wl[-1]
bins = []
wl = wl0
while wl <= wlf :
bins.append(wl)
wl *= (1.0 + rvsampling_kps*1000/constants.c)
bins = np.array(bins)
digitized = np.digitize(self.wl, bins)
wl_new = []
flux_new = []
for i in range(1, len(bins)):
if len(self.wl[digitized == i]) :
try:
wl_new.append(self.wl[digitized == i].mean())
if median :
flux_new.append(np.median(self.flux[digitized == i]))
else :
flux_new.append(self.flux[digitized == i].mean())
except :
continue
self.wl = np.array(wl_new)
self.flux = np.array(flux_new)
#--------------------------
#--- Print spectrum information
def info(self) :
print "**************************"
print "Info for spectrum: ",self.filename, " Object:", self.object
print "Instrument:",self.instrument
if self.instrument == 'ESPaDOnS' :
print "Source RV =",self.sourceRV,"km/s"
print "wl0 =",self.wl[0],"A -- wlf =",self.wl[-1],"A"
sampling = (self.wl[-1] - self.wl[0])/float(len(self.wl))
print "sampling =",sampling," A/pixel"
print "<F> =",self.flux.mean(),"+-",self.flux.std()
eqwbad = np.count_nonzero(np.isnan(self.eqwidths))
eqwgood = np.count_nonzero(~np.isnan(self.eqwidths))
print "Using", np.count_nonzero(~np.isnan(self.eqwidths)), "of",len(self.eqwidths),"lines in list for EWs"
print "**************************\n"
#------------
#--- Print spectrum data
def printdata(self) :
for i in range(len(self.wl)) :
print self.wl[i],self.flux[i]
#------------
#--- Calculate H-alpha activity
def halphaActivity(self, verbose=False) :
if verbose: print 'Calculating Halpha activity index...'
self.halpha = mcallib.calculateHalphaActivity(self.wl,self.flux)
if verbose :
if self.halpha >= 0.25 :
print 'WARNING: the star may be too active to use this calibration. Halpha =',self.halpha,''
else :
print 'No significant Halpha emission'
#------------
#--- Calculate Teff and [Fe/H] correction based on SNR
def TeffAndFeHCorr(self, verbose=False) :
if verbose: print 'Calculating Teff and [Fe/H] correction ...'
# Switch according to SNR; resolution is fixed at 65000
SNR = self.sourceSNR
# self.Fe_H_corr=1.2614*self.FeH-0.0997
# self.Teff_corr=0.8286*self.Teff+957
self.Fe_H_corr=self.FeH
self.Teff_corr=self.Teff
# if SNR < 30:
# self.Fe_H_corr=1.3743*self.FeH-0.1880
# self.Teff_corr=0.8567*self.Teff+713
# elif 30 <= SNR < 50:
# self.Fe_H_corr=1.3072*self.FeH-0.1315
# self.Teff_corr=0.8257*self.Teff+915
# elif 50 <= SNR < 70:
# self.Fe_H_corr=1.2849*self.FeH-0.1149
# self.Teff_corr=0.8354*self.Teff+917
# elif 70 <= SNR < 90:
# self.Fe_H_corr=1.2739*self.FeH-0.1075
# self.Teff_corr=0.8277*self.Teff+953
if SNR < 100:
self.Fe_H_corr=self.FeH-(6.1793e-6*np.power(SNR,3)-1.18732e-3*np.square(SNR)+0.0683364*SNR-1.047238)
self.Teff_corr=self.Teff-(-0.011468*np.power(SNR,3)+2.50713*np.square(SNR)-188.512*SNR+5242.187)
return self.Teff_corr, self.Fe_H_corr
#------------
| gpl-3.0 |
Innovahn/odoo.old | addons/analytic_user_function/analytic_user_function.py | 163 | 7769 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class analytic_user_funct_grid(osv.osv):
_name="analytic.user.funct.grid"
_description= "Price per User"
_rec_name="user_id"
_columns={
'user_id': fields.many2one("res.users", "User", required=True,),
'product_id': fields.many2one("product.product", "Service", required=True,),
'account_id': fields.many2one("account.analytic.account", "Analytic Account", required=True,),
'uom_id': fields.related("product_id", "uom_id", relation="product.uom", string="Unit of Measure", type="many2one", readonly=True),
'price': fields.float('Price', digits_compute=dp.get_precision('Product Price'), help="Price per hour for this user.", required=True),
}
def onchange_user_product_id(self, cr, uid, ids, user_id, product_id, context=None):
if not user_id:
return {}
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', user_id)], context=context)
if not emp_id:
return {}
value = {}
prod = False
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
emp = emp_obj.browse(cr, uid, emp_id[0], context=context)
if emp.product_id and not product_id:
value['product_id'] = emp.product_id.id
prod = emp.product_id
if prod:
value['price'] = prod.list_price
value['uom_id'] = prod.uom_id.id
return {'value': value}
class account_analytic_account(osv.osv):
_inherit = "account.analytic.account"
_columns = {
'user_product_ids': fields.one2many('analytic.user.funct.grid', 'account_id', 'Users/Products Rel.', copy=True),
}
class hr_analytic_timesheet(osv.osv):
_inherit = "hr.analytic.timesheet"
# Look in account, if no value for the user => look in parent until there is no more parent to look
# Take the first found... if nothing found => return False
def _get_related_user_account_recursiv(self, cr, uid, user_id, account_id):
temp=self.pool.get('analytic.user.funct.grid').search(cr, uid, [('user_id', '=', user_id),('account_id', '=', account_id) ])
account=self.pool.get('account.analytic.account').browse(cr, uid, account_id)
if temp:
return temp
else:
if account.parent_id:
return self._get_related_user_account_recursiv(cr, uid, user_id, account.parent_id.id)
else:
return False
def on_change_account_id(self, cr, uid, ids, account_id, user_id=False, unit_amount=0):
res = {}
if not (account_id):
#avoid a useless call to super
return res
if not (user_id):
return super(hr_analytic_timesheet, self).on_change_account_id(cr, uid, ids, account_id)
#get the browse record related to user_id and account_id
temp = self._get_related_user_account_recursiv(cr, uid, user_id, account_id)
if not temp:
#if there isn't any record for this user_id and account_id
return super(hr_analytic_timesheet, self).on_change_account_id(cr, uid, ids, account_id)
else:
#get the old values from super and add the value from the new relation analytic_user_funct_grid
r = self.pool.get('analytic.user.funct.grid').browse(cr, uid, temp)[0]
res.setdefault('value',{})
res['value']= super(hr_analytic_timesheet, self).on_change_account_id(cr, uid, ids, account_id)['value']
res['value']['product_id'] = r.product_id.id
res['value']['product_uom_id'] = r.product_id.uom_id.id
#the change of product has to impact the amount, uom and general_account_id
a = r.product_id.property_account_expense.id
if not a:
a = r.product_id.categ_id.property_account_expense_categ.id
if not a:
raise osv.except_osv(_('Error!'),
_('There is no expense account defined ' \
'for this product: "%s" (id:%d)') % \
(r.product_id.name, r.product_id.id,))
# Compute based on pricetype
if unit_amount:
amount_unit = self.on_change_unit_amount(cr, uid, ids,
r.product_id.id, unit_amount, False, r.product_id.uom_id.id)['value']['amount']
amount = unit_amount * amount_unit
res ['value']['amount']= - round(amount, 2)
res ['value']['general_account_id']= a
return res
def on_change_user_id(self, cr, uid, ids, user_id, account_id, unit_amount=0):
res = super(hr_analytic_timesheet, self).on_change_user_id(cr, uid, ids, user_id)
if account_id:
#get the browse record related to user_id and account_id
temp = self._get_related_user_account_recursiv(cr, uid, user_id, account_id)
if temp:
#add the value from the new relation analytic_user_funct_grid
r = self.pool.get('analytic.user.funct.grid').browse(cr, uid, temp)[0]
res['value']['product_id'] = r.product_id.id
#the change of product has to impact the amount, uom and general_account_id
a = r.product_id.property_account_expense.id
if not a:
a = r.product_id.categ_id.property_account_expense_categ.id
if not a:
raise osv.except_osv(_('Error!'),
_('There is no expense account defined ' \
'for this product: "%s" (id:%d)') % \
(r.product_id.name, r.product_id.id,))
# Compute based on pricetype
if unit_amount:
amount_unit = self.on_change_unit_amount(cr, uid, ids,
r.product_id.id, unit_amount, False, r.product_id.uom_id.id)['value']['amount']
amount = unit_amount * amount_unit
res ['value']['amount']= - round(amount, 2)
res ['value']['general_account_id']= a
return res
class account_analytic_line(osv.osv):
_inherit = "account.analytic.line"
def _get_invoice_price(self, cr, uid, account, product_id, user_id, qty, context = {}):
for grid in account.user_product_ids:
if grid.user_id.id==user_id:
return grid.price
return super(account_analytic_line, self)._get_invoice_price(cr, uid, account, product_id, user_id, qty, context)
| agpl-3.0 |
jdavidagudelo/django-social-auth-corrected | social_auth/backends/contrib/disqus.py | 1 | 2438 | try:
import json as simplejson
except ImportError:
try:
import simplejson
except ImportError:
from django.utils import simplejson
from social_auth.backends import BaseOAuth2, OAuthBackend
from social_auth.utils import dsa_urlopen, backend_setting
from urllib import urlencode
DISQUS_SERVER = 'disqus.com'
DISQUS_AUTHORIZATION_URL = 'https://disqus.com/api/oauth/2.0/authorize/'
DISQUS_ACCESS_TOKEN_URL = 'https://disqus.com/api/oauth/2.0/access_token/'
DISQUS_CHECK_AUTH = 'https://disqus.com/api/3.0/users/details.json'
class DisqusBackend(OAuthBackend):
name = 'disqus'
EXTRA_DATA = [
('avatar', 'avatar'),
('connections', 'connections'),
('user_id', 'user_id'),
('email', 'email'),
('email_hash', 'emailHash'),
('expires', 'expires'),
('location', 'location'),
('meta', 'response'),
('name', 'name'),
('username', 'username'),
]
def get_user_id(self, details, response):
return response['response']['id']
def get_user_details(self, response):
"""Return user details from Disqus account"""
rr = response.get('response', {})
return {
'username': rr.get('username', ''),
'user_id': response.get('user_id', ''),
'email': rr.get('email', ''),
'name': rr.get('name', ''),
}
def extra_data(self, user, uid, response, details):
meta_response = dict(response, **response.get('response', {}))
return super(DisqusBackend, self).extra_data(user, uid, meta_response,
details)
class DisqusAuth(BaseOAuth2):
"""Disqus OAuth mechanism"""
AUTHORIZATION_URL = DISQUS_AUTHORIZATION_URL
ACCESS_TOKEN_URL = DISQUS_ACCESS_TOKEN_URL
AUTH_BACKEND = DisqusBackend
SETTINGS_KEY_NAME = 'DISQUS_CLIENT_ID'
SETTINGS_SECRET_NAME = 'DISQUS_CLIENT_SECRET'
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
params = {
'access_token': access_token,
'api_secret': backend_setting(self, self.SETTINGS_SECRET_NAME),
}
url = DISQUS_CHECK_AUTH + '?' + urlencode(params)
try:
return simplejson.load(dsa_urlopen(url))
except ValueError:
return None
# Backend definition
BACKENDS = {
'disqus': DisqusAuth,
}
| bsd-3-clause |
gjhiggins/vcoincore | test/functional/p2p_invalid_locator.py | 25 | 1957 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid locators.
"""
from test_framework.messages import msg_getheaders, msg_getblocks, MAX_LOCATOR_SZ
from test_framework.mininode import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
class InvalidLocatorTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = False
def run_test(self):
node = self.nodes[0] # convenience reference to the node
node.generatetoaddress(1, node.get_deterministic_priv_key().address) # Get node out of IBD
self.log.info('Test max locator size')
block_count = node.getblockcount()
for msg in [msg_getheaders(), msg_getblocks()]:
self.log.info('Wait for disconnect when sending {} hashes in locator'.format(MAX_LOCATOR_SZ + 1))
node.add_p2p_connection(P2PInterface())
msg.locator.vHave = [int(node.getblockhash(i - 1), 16) for i in range(block_count, block_count - (MAX_LOCATOR_SZ + 1), -1)]
node.p2p.send_message(msg)
node.p2p.wait_for_disconnect()
node.disconnect_p2ps()
self.log.info('Wait for response when sending {} hashes in locator'.format(MAX_LOCATOR_SZ))
node.add_p2p_connection(P2PInterface())
msg.locator.vHave = [int(node.getblockhash(i - 1), 16) for i in range(block_count, block_count - (MAX_LOCATOR_SZ), -1)]
node.p2p.send_message(msg)
if type(msg) == msg_getheaders:
node.p2p.wait_for_header(int(node.getbestblockhash(), 16))
else:
node.p2p.wait_for_block(int(node.getbestblockhash(), 16))
if __name__ == '__main__':
InvalidLocatorTest().main()
| mit |
spreg-git/pysal | pysal/esda/smoothing.py | 3 | 53684 | """
Apply smoothing to rate computation
[Longer Description]
Author(s):
Myunghwa Hwang mhwang4@gmail.com
David Folch dfolch@asu.edu
Luc Anselin luc.anselin@asu.edu
Serge Rey srey@asu.edu
"""
__author__ = "Myunghwa Hwang <mhwang4@gmail.com>, David Folch <dfolch@asu.edu>, Luc Anselin <luc.anselin@asu.edu>, Serge Rey <srey@asu.edu"
import pysal
from pysal.weights import comb, Kernel
from pysal.cg import Point, Ray, LineSegment
from pysal.cg import get_angle_between, get_points_dist, get_segment_point_dist
from pysal.cg import get_point_at_angle_and_dist, convex_hull
from pysal.common import np, KDTree
from pysal.weights.spatial_lag import lag_spatial as slag
from scipy.stats import gamma, norm, chi2, poisson
__all__ = ['Excess_Risk', 'Empirical_Bayes', 'Spatial_Empirical_Bayes', 'Spatial_Rate', 'Kernel_Smoother', 'Age_Adjusted_Smoother', 'Disk_Smoother', 'Spatial_Median_Rate', 'Spatial_Filtering', 'Headbanging_Triples', 'Headbanging_Median_Rate', 'flatten', 'weighted_median', 'sum_by_n', 'crude_age_standardization', 'direct_age_standardization', 'indirect_age_standardization', 'standardized_mortality_ratio', 'choynowski', 'assuncao_rate']
def flatten(l, unique=True):
"""flatten a list of lists
Parameters
----------
l : list
of lists
unique : boolean
whether or not only unique items are wanted (default=True)
Returns
-------
list
of single items
Examples
--------
Creating a sample list whose elements are lists of integers
>>> l = [[1, 2], [3, 4, ], [5, 6]]
Applying flatten function
>>> flatten(l)
[1, 2, 3, 4, 5, 6]
"""
l = reduce(lambda x, y: x + y, l)
if not unique:
return list(l)
return list(set(l))
def weighted_median(d, w):
"""A utility function to find a median of d based on w
Parameters
----------
d : array
(n, 1), variable for which median will be found
w : array
(n, 1), variable on which d's medain will be decided
Notes
-----
d and w are arranged in the same order
Returns
-------
float
median of d
Examples
--------
Creating an array including five integers.
We will get the median of these integers.
>>> d = np.array([5,4,3,1,2])
Creating another array including weight values for the above integers.
The median of d will be decided with a consideration to these weight
values.
>>> w = np.array([10, 22, 9, 2, 5])
Applying weighted_median function
>>> weighted_median(d, w)
4
"""
dtype = [('w', '%s' % w.dtype), ('v', '%s' % d.dtype)]
d_w = np.array(zip(w, d), dtype=dtype)
d_w.sort(order='v')
reordered_w = d_w['w'].cumsum()
cumsum_threshold = reordered_w[-1] * 1.0 / 2
median_inx = (reordered_w >= cumsum_threshold).nonzero()[0][0]
if reordered_w[median_inx] == cumsum_threshold and len(d) - 1 > median_inx:
return np.sort(d)[median_inx:median_inx + 2].mean()
return np.sort(d)[median_inx]
def sum_by_n(d, w, n):
"""A utility function to summarize a data array into n values
after weighting the array with another weight array w
Parameters
----------
d : array
(t, 1), numerical values
w : array
(t, 1), numerical values for weighting
n : integer
the number of groups
t = c*n (c is a constant)
Returns
-------
: array
(n, 1), an array with summarized values
Examples
--------
Creating an array including four integers.
We will compute weighted means for every two elements.
>>> d = np.array([10, 9, 20, 30])
Here is another array with the weight values for d's elements.
>>> w = np.array([0.5, 0.1, 0.3, 0.8])
We specify the number of groups for which the weighted mean is computed.
>>> n = 2
Applying sum_by_n function
>>> sum_by_n(d, w, n)
array([ 5.9, 30. ])
"""
t = len(d)
h = t / n
d = d * w
return np.array([sum(d[i: i + h]) for i in range(0, t, h)])
def crude_age_standardization(e, b, n):
"""A utility function to compute rate through crude age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array
(n, 1), age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying crude_age_standardization function to e and b
>>> crude_age_standardization(e, b, n)
array([ 0.2375 , 0.26666667])
"""
r = e * 1.0 / b
b_by_n = sum_by_n(b, 1.0, n)
age_weight = b * 1.0 / b_by_n.repeat(len(e) / n)
return sum_by_n(r, age_weight, n)
def direct_age_standardization(e, b, s, n, alpha=0.05):
"""A utility function to compute rate through direct age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s : array
(n*h, 1), standard population for each age group across n spatial units
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, and s are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rates and confidence intervals
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([1000, 1000, 1100, 900, 1000, 900, 1100, 900])
For direct age standardization, we also need the data for standard population.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., popoulation distribution for Arizona and California).
Another array including standard population is created.
>>> s = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying direct_age_standardization function to e and b
>>> [i[0] for i in direct_age_standardization(e, b, s, n)]
[0.023744019138755977, 0.026650717703349279]
"""
age_weight = (1.0 / b) * (s * 1.0 / sum_by_n(s, 1.0, n).repeat(len(s) / n))
adjusted_r = sum_by_n(e, age_weight, n)
var_estimate = sum_by_n(e, np.square(age_weight), n)
g_a = np.square(adjusted_r) / var_estimate
g_b = var_estimate / adjusted_r
k = [age_weight[i:i + len(b) / n].max() for i in range(0, len(b),
len(b) / n)]
g_a_k = np.square(adjusted_r + k) / (var_estimate + np.square(k))
g_b_k = (var_estimate + np.square(k)) / (adjusted_r + k)
summed_b = sum_by_n(b, 1.0, n)
res = []
for i in range(len(adjusted_r)):
if adjusted_r[i] == 0:
upper = 0.5 * chi2(1 - 0.5 * alpha)
lower = 0.0
else:
lower = gamma.ppf(0.5 * alpha, g_a[i], scale=g_b[i])
upper = gamma.ppf(1 - 0.5 * alpha, g_a_k[i], scale=g_b_k[i])
res.append((adjusted_r[i], lower, upper))
return res
def indirect_age_standardization(e, b, s_e, s_b, n, alpha=0.05):
"""A utility function to compute rate through indirect age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
For indirect age standardization, we also need the data for standard population and event.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., popoulation distribution for Arizona and California).
When the same concept is applied to the event variable,
we call it standard event (e.g., the number of cancer patients in the U.S.).
Two additional arrays including standard population and event are created.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> [i[0] for i in indirect_age_standardization(e, b, s_e, s_b, n)]
[0.23723821989528798, 0.2610803324099723]
"""
smr = standardized_mortality_ratio(e, b, s_e, s_b, n)
s_r_all = sum(s_e * 1.0) / sum(s_b * 1.0)
adjusted_r = s_r_all * smr
e_by_n = sum_by_n(e, 1.0, n)
log_smr = np.log(smr)
log_smr_sd = 1.0 / np.sqrt(e_by_n)
norm_thres = norm.ppf(1 - 0.5 * alpha)
log_smr_lower = log_smr - norm_thres * log_smr_sd
log_smr_upper = log_smr + norm_thres * log_smr_sd
smr_lower = np.exp(log_smr_lower) * s_r_all
smr_upper = np.exp(log_smr_upper) * s_r_all
res = zip(adjusted_r, smr_lower, smr_upper)
return res
def standardized_mortality_ratio(e, b, s_e, s_b, n):
"""A utility function to compute standardized mortality ratio (SMR).
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
array
(nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
To compute standardized mortality ratio (SMR),
we need two additional arrays for standard population and event.
Creating s_e and s_b for standard event and population, respectively.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> standardized_mortality_ratio(e, b, s_e, s_b, n)
array([ 2.48691099, 2.73684211])
"""
s_r = s_e * 1.0 / s_b
e_by_n = sum_by_n(e, 1.0, n)
expected = sum_by_n(b, s_r, n)
smr = e_by_n * 1.0 / expected
return smr
def choynowski(e, b, n, threshold=None):
"""Choynowski map probabilities.
Parameters
----------
e : array(n*h, 1)
event variable measured for each age group across n spatial units
b : array(n*h, 1)
population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
threshold : float
Returns zero for any p-value greater than threshold
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
References
----------
[1] M. Choynowski. 1959. Maps based on probabilities. Journal of the
American Statistical Association, 54, 385-388.
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> print choynowski(e, b, n)
[ 0.30437751 0.29367033]
"""
e_by_n = sum_by_n(e, 1.0, n)
b_by_n = sum_by_n(b, 1.0, n)
r_by_n = sum(e_by_n) * 1.0 / sum(b_by_n)
expected = r_by_n * b_by_n
p = []
for index, i in enumerate(e_by_n):
if i <= expected[index]:
p.append(poisson.cdf(i, expected[index]))
else:
p.append(1 - poisson.cdf(i - 1, expected[index]))
if threshold:
p = [i if i < threshold else 0.0 for i in p]
return np.array(p)
def assuncao_rate(e, b):
"""The standardized rates where the mean and stadard deviation used for
the standardization are those of Empirical Bayes rate estimates
The standardized rates resulting from this function are used to compute
Moran's I corrected for rate variables.
Parameters
----------
e : array(n, 1)
event variable measured at n spatial units
b : array(n, 1)
population at risk variable measured at n spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
References
----------
[1] Assuncao R. M. and Reis E. A., 1999, A new proposal to adjust Moran's I
for population density. Statistics in Medicine, 18, 2147-2162.
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 8 regions.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same 8 regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Computing the rates
>>> print assuncao_rate(e, b)[:4]
[ 1.04319254 -0.04117865 -0.56539054 -1.73762547]
"""
y = e * 1.0 / b
e_sum, b_sum = sum(e), sum(b)
ebi_b = e_sum * 1.0 / b_sum
s2 = sum(b * ((y - ebi_b) ** 2)) / b_sum
ebi_a = s2 - ebi_b / (b_sum / len(e))
ebi_v = ebi_a + ebi_b / b
return (y - ebi_b) / np.sqrt(ebi_v)
class Excess_Risk:
"""Excess Risk
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
execess risk values
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> stl = pysal.open(pysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Excess_Risk class using stl_e and stl_b
>>> er = Excess_Risk(stl_e, stl_b)
Extracting the excess risk values through the property r of the Excess_Risk instance, er
>>> er.r[:10]
array([ 0.20665681, 0.43613787, 0.42078261, 0.22066928, 0.57981596,
0.35301709, 0.56407549, 0.17020994, 0.3052372 , 0.25821905])
"""
def __init__(self, e, b):
r_mean = e.sum() * 1.0 / b.sum()
self.r = e * 1.0 / (b * r_mean)
class Empirical_Bayes:
"""Aspatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> stl = pysal.open(pysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Empirical_Bayes class using stl_e and stl_b
>>> eb = Empirical_Bayes(stl_e, stl_b)
Extracting the risk values through the property r of the Empirical_Bayes instance, eb
>>> eb.r[:10]
array([ 2.36718950e-05, 4.54539167e-05, 4.78114019e-05,
2.76907146e-05, 6.58989323e-05, 3.66494122e-05,
5.79952721e-05, 2.03064590e-05, 3.31152999e-05,
3.02748380e-05])
"""
def __init__(self, e, b):
e_sum, b_sum = e.sum() * 1.0, b.sum() * 1.0
r_mean = e_sum / b_sum
rate = e * 1.0 / b
r_variat = rate - r_mean
r_var_left = (b * r_variat * r_variat).sum() * 1.0 / b_sum
r_var_right = r_mean * 1.0 / b.mean()
r_var = r_var_left - r_var_right
weight = r_var / (r_var + r_mean / b)
self.r = weight * rate + (1.0 - weight) * r_mean
class Spatial_Empirical_Bayes:
"""Spatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> stl = pysal.open(pysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = pysal.open(pysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Empirical_Bayes class using stl_e, stl_b, and stl_w
>>> s_eb = Spatial_Empirical_Bayes(stl_e, stl_b, stl_w)
Extracting the risk values through the property r of s_eb
>>> s_eb.r[:10]
array([ 4.01485749e-05, 3.62437513e-05, 4.93034844e-05,
5.09387329e-05, 3.72735210e-05, 3.69333797e-05,
5.40245456e-05, 2.99806055e-05, 3.73034109e-05,
3.47270722e-05])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e an b")
r_mean = Spatial_Rate(e, b, w).r
rate = e * 1.0 / b
r_var_left = np.ones(len(e)) * 1.
ngh_num = np.ones(len(e))
bi = slag(w, b) + b
for i, idv in enumerate(w.id_order):
ngh = w[idv].keys() + [idv]
nghi = [w.id2i[k] for k in ngh]
ngh_num[i] = len(nghi)
v = sum(np.square(rate[nghi] - r_mean[i]) * b[nghi])
r_var_left[i] = v
r_var_left = r_var_left / bi
r_var_right = r_mean / (bi / ngh_num)
r_var = r_var_left - r_var_right
r_var[r_var < 0] = 0.0
self.r = r_mean + (rate - r_mean) * (r_var / (r_var + (r_mean / b)))
class Spatial_Rate:
"""Spatial Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> stl = pysal.open(pysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = pysal.open(pysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Rate class using stl_e, stl_b, and stl_w
>>> sr = Spatial_Rate(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of sr
>>> sr.r[:10]
array([ 4.59326407e-05, 3.62437513e-05, 4.98677081e-05,
5.09387329e-05, 3.72735210e-05, 4.01073093e-05,
3.79372794e-05, 3.27019246e-05, 4.26204928e-05,
3.47270722e-05])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
w.transform = 'b'
w_e, w_b = slag(w, e), slag(w, b)
self.r = (e + w_e) / (b + w_b)
w.transform = 'o'
class Kernel_Smoother:
"""Kernal smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : Kernel weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Creating an array including event values for 6 regions
>>> e = np.array([10, 1, 3, 4, 2, 5])
Creating another array including population-at-risk values for the 6 regions
>>> b = np.array([100, 15, 20, 20, 80, 90])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying kernel smoothing to e and b
>>> kr = Kernel_Smoother(e, b, kw)
Extracting the smoothed rates through the property r of the Kernel_Smoother instance
>>> kr.r
array([ 0.10543301, 0.0858573 , 0.08256196, 0.09884584, 0.04756872,
0.04845298])
"""
def __init__(self, e, b, w):
if type(w) != Kernel:
raise Error('w must be an instance of Kernel weights')
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
w_e, w_b = slag(w, e), slag(w, b)
self.r = w_e / w_b
class Age_Adjusted_Smoother:
"""Age-adjusted rate smoothing
Parameters
----------
e : array (n*h, 1)
event variable measured for each age group across n spatial units
b : array (n*h, 1)
population at risk variable measured for each age group across n spatial units
w : spatial weights instance
s : array (n*h, 1)
standard population for each age group across n spatial units
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Notes
-----
Weights used to smooth age-specific events and populations are simple binary weights
Examples
--------
Creating an array including 12 values for the 6 regions with 2 age groups
>>> e = np.array([10, 8, 1, 4, 3, 5, 4, 3, 2, 1, 5, 3])
Creating another array including 12 population-at-risk values for the 6 regions
>>> b = np.array([100, 90, 15, 30, 25, 20, 30, 20, 80, 80, 90, 60])
For age adjustment, we need another array of values containing standard population
s includes standard population data for the 6 regions
>>> s = np.array([98, 88, 15, 29, 20, 23, 33, 25, 76, 80, 89, 66])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying age-adjusted smoothing to e and b
>>> ar = Age_Adjusted_Smoother(e, b, kw, s)
Extracting the smoothed rates through the property r of the Age_Adjusted_Smoother instance
>>> ar.r
array([ 0.10519625, 0.08494318, 0.06440072, 0.06898604, 0.06952076,
0.05020968])
"""
def __init__(self, e, b, w, s, alpha=0.05):
t = len(e)
h = t / w.n
w.transform = 'b'
e_n, b_n = [], []
for i in range(h):
e_n.append(slag(w, e[i::h]).tolist())
b_n.append(slag(w, b[i::h]).tolist())
e_n = np.array(e_n).reshape((1, t), order='F')[0]
b_n = np.array(b_n).reshape((1, t), order='F')[0]
r = direct_age_standardization(e_n, b_n, s, w.n, alpha=alpha)
self.r = np.array([i[0] for i in r])
w.transform = 'o'
class Disk_Smoother:
"""Locally weighted averages or disk smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights matrix
Attributes
----------
r : array (n, 1)
rate values from disk smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> stl = pysal.open(pysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = pysal.open(pysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Applying disk smoothing to stl_e and stl_b
>>> sr = Disk_Smoother(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of s_eb
>>> sr.r[:10]
array([ 4.56502262e-05, 3.44027685e-05, 3.38280487e-05,
4.78530468e-05, 3.12278573e-05, 2.22596997e-05,
2.67074856e-05, 2.36924573e-05, 3.48801587e-05,
3.09511832e-05])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
r = e * 1.0 / b
weight_sum = []
for i in w.id_order:
weight_sum.append(sum(w.weights[i]))
self.r = slag(w, r) / np.array(weight_sum)
class Spatial_Median_Rate:
"""Spatial Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
iteration : integer
the number of interations
Attributes
----------
r : array (n, 1)
rate values from spatial median rate smoothing
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> stl = pysal.open(pysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = pysal.open(pysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Computing spatial median rates without iteration
>>> smr0 = Spatial_Median_Rate(stl_e,stl_b,stl_w)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr0.r[:10]
array([ 3.96047383e-05, 3.55386859e-05, 3.28308921e-05,
4.30731238e-05, 3.12453969e-05, 1.97300409e-05,
3.10159267e-05, 2.19279204e-05, 2.93763432e-05,
2.93763432e-05])
Recomputing spatial median rates with 5 iterations
>>> smr1 = Spatial_Median_Rate(stl_e,stl_b,stl_w,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr1.r[:10]
array([ 3.11293620e-05, 2.95956330e-05, 3.11293620e-05,
3.10159267e-05, 2.98436066e-05, 2.76406686e-05,
3.10159267e-05, 2.94788171e-05, 2.99460806e-05,
2.96981070e-05])
Computing spatial median rates by using the base variable as auxilliary weights
without iteration
>>> smr2 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr2.r[:10]
array([ 5.77412020e-05, 4.46449551e-05, 5.77412020e-05,
5.77412020e-05, 4.46449551e-05, 3.61363528e-05,
3.61363528e-05, 4.46449551e-05, 5.77412020e-05,
4.03987355e-05])
Recomputing spatial median rates by using the base variable as auxilliary weights
with 5 iterations
>>> smr3 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr3.r[:10]
array([ 3.61363528e-05, 4.46449551e-05, 3.61363528e-05,
3.61363528e-05, 4.46449551e-05, 3.61363528e-05,
3.61363528e-05, 4.46449551e-05, 3.61363528e-05,
4.46449551e-05])
>>>
"""
def __init__(self, e, b, w, aw=None, iteration=1):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
self.r = e * 1.0 / b
self.aw, self.w = aw, w
while iteration:
self.__search_median()
iteration -= 1
def __search_median(self):
r, aw, w = self.r, self.aw, self.w
new_r = []
if self.aw is None:
for i, id in enumerate(w.id_order):
r_disk = np.append(r[i], r[w.neighbor_offsets[id]])
new_r.append(np.median(r_disk))
else:
for i, id in enumerate(w.id_order):
id_d = [i] + list(w.neighbor_offsets[id])
aw_d, r_d = aw[id_d], r[id_d]
new_r.append(weighted_median(r_d, aw_d))
self.r = np.array(new_r)
class Spatial_Filtering:
"""Spatial Filtering
Parameters
----------
bbox : a list of two lists where each list is a pair of coordinates
a bounding box for the entire n spatial units
data : array (n, 2)
x, y coordinates
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
x_grid : integer
the number of cells on x axis
y_grid : integer
the number of cells on y axis
r : float
fixed radius of a moving window
pop : integer
population threshold to create adaptive moving windows
Attributes
----------
grid : array (x_grid*y_grid, 2)
x, y coordinates for grid points
r : array (x_grid*y_grid, 1)
rate values for grid points
Notes
-----
No tool is provided to find an optimal value for r or pop.
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> stl = pysal.open(pysal.examples.get_path('stl_hom.csv'), 'r')
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> fromWKT = pysal.core.util.WKTParser()
>>> stl.cast('WKT',fromWKT)
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl[:,0]])
Specifying the bounding box for the stl_hom data.
The bbox should includes two points for the left-bottom and the right-top corners
>>> bbox = [[-92.700676, 36.881809], [-87.916573, 40.3295669]]
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Applying spatial filtering by using a 10*10 mesh grid and a moving window
with 2 radius
>>> sf_0 = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,r=2)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf_0.r[:10]
array([ 4.23561763e-05, 4.45290850e-05, 4.56456221e-05,
4.49133384e-05, 4.39671835e-05, 4.44903042e-05,
4.19845497e-05, 4.11936548e-05, 3.93463504e-05,
4.04376345e-05])
Applying another spatial filtering by allowing the moving window to grow until
600000 people are found in the window
>>> sf = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,pop=600000)
Checking the size of the reulting array including the rates
>>> sf.r.shape
(100,)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf.r[:10]
array([ 3.73728738e-05, 4.04456300e-05, 4.04456300e-05,
3.81035327e-05, 4.54831940e-05, 4.54831940e-05,
3.75658628e-05, 3.75658628e-05, 3.75658628e-05,
3.75658628e-05])
"""
def __init__(self, bbox, data, e, b, x_grid, y_grid, r=None, pop=None):
data_tree = KDTree(data)
x_range = bbox[1][0] - bbox[0][0]
y_range = bbox[1][1] - bbox[0][1]
x, y = np.mgrid[bbox[0][0]:bbox[1][0]:x_range / x_grid,
bbox[0][1]:bbox[1][1]:y_range / y_grid]
self.grid = zip(x.ravel(), y.ravel())
self.r = []
if r is None and pop is None:
raise ValueError("Either r or pop should not be None")
if r is not None:
pnts_in_disk = data_tree.query_ball_point(self.grid, r=r)
for i in pnts_in_disk:
r = e[i].sum() * 1.0 / b[i].sum()
self.r.append(r)
if pop is not None:
half_nearest_pnts = data_tree.query(self.grid, k=len(e))[1]
for i in half_nearest_pnts:
e_n, b_n = e[i].cumsum(), b[i].cumsum()
b_n_filter = b_n <= pop
e_n_f, b_n_f = e_n[b_n_filter], b_n[b_n_filter]
if len(e_n_f) == 0:
e_n_f = e_n[[0]]
b_n_f = b_n[[0]]
self.r.append(e_n_f[-1] * 1.0 / b_n_f[-1])
self.r = np.array(self.r)
class Headbanging_Triples:
"""Generate a pseudo spatial weights instance that contains headbaning triples
Parameters
----------
data : array (n, 2)
numpy array of x, y coordinates
w : spatial weights instance
k : integer number of nearest neighbors
t : integer
the number of triples
angle : integer between 0 and 180
the angle criterium for a set of triples
edgecorr : boolean
whether or not correction for edge points is made
Attributes
----------
triples : dictionary
key is observation record id, value is a list of lists of triple ids
extra : dictionary
key is observation record id, value is a list of the following:
tuple of original triple observations
distance between original triple observations
distance between an original triple observation and its extrapolated point
Examples
--------
importing k-nearest neighbor weights creator
>>> from pysal import knnW
Reading data in stl_hom.csv into stl_db to extract values
for event and population-at-risk variables
>>> stl_db = pysal.open(pysal.examples.get_path('stl_hom.csv'),'r')
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> fromWKT = pysal.core.util.WKTParser()
>>> stl_db.cast('WKT',fromWKT)
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl_db[:,0]])
Using the centroids, we create a 5-nearst neighbor weights
>>> w = knnW(d,k=5)
Ensuring that the elements in the spatial weights instance are ordered
by the order of stl_db's IDs
>>> if not w.id_order_set: w.id_order = w.id_order
Finding headbaning triples by using 5 nearest neighbors
>>> ht = Headbanging_Triples(d,w,k=5)
Checking the members of triples
>>> for k, item in ht.triples.items()[:5]: print k, item
0 [(5, 6), (10, 6)]
1 [(4, 7), (4, 14), (9, 7)]
2 [(0, 8), (10, 3), (0, 6)]
3 [(4, 2), (2, 12), (8, 4)]
4 [(8, 1), (12, 1), (8, 9)]
Opening sids2.shp file
>>> sids = pysal.open(pysal.examples.get_path('sids2.shp'),'r')
Extracting the centroids of polygons in the sids data
>>> sids_d = np.array([i.centroid for i in sids])
Creating a 5-nearest neighbors weights from the sids centroids
>>> sids_w = knnW(sids_d,k=5)
Ensuring that the members in sids_w are ordered by
the order of sids_d's ID
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order
Finding headbaning triples by using 5 nearest neighbors
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5)
Checking the members of the found triples
>>> for k, item in s_ht.triples.items()[:5]: print k, item
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Finding headbanging tirpes by using 5 nearest neighbors with edge correction
>>> s_ht2 = Headbanging_Triples(sids_d,sids_w,k=5,edgecor=True)
Checking the members of the found triples
>>> for k, item in s_ht2.triples.items()[:5]: print k, item
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Checking the extrapolated point that is introduced into the triples
during edge correction
>>> extrapolated = s_ht2.extra[72]
Checking the observation IDs constituting the extrapolated triple
>>> extrapolated[0]
(89, 77)
Checking the distances between the exploated point and the observation 89 and 77
>>> round(extrapolated[1],5), round(extrapolated[2],6)
(0.33753, 0.302707)
"""
def __init__(self, data, w, k=5, t=3, angle=135.0, edgecor=False):
if k < 3:
raise ValueError("w should be NeareastNeighbors instance & the number of neighbors should be more than 3.")
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of data")
self.triples, points = {}, {}
for i, pnt in enumerate(data):
ng = w.neighbor_offsets[i]
points[(i, Point(pnt))] = dict(zip(ng, [Point(d)
for d in data[ng]]))
for i, pnt in points.keys():
ng = points[(i, pnt)]
tr, tr_dis = {}, []
for c in comb(ng.keys(), 2):
p2, p3 = ng[c[0]], ng[c[-1]]
ang = get_angle_between(Ray(pnt, p2), Ray(pnt, p3))
if ang > angle or (ang < 0.0 and ang + 360 > angle):
tr[tuple(c)] = (p2, p3)
if len(tr) > t:
for c in tr.keys():
p2, p3 = tr[c]
tr_dis.append((get_segment_point_dist(
LineSegment(p2, p3), pnt), c))
tr_dis = sorted(tr_dis)[:t]
self.triples[i] = [trp for dis, trp in tr_dis]
else:
self.triples[i] = tr.keys()
if edgecor:
self.extra = {}
ps = dict([(p, i) for i, p in points.keys()])
chull = convex_hull(ps.keys())
chull = [p for p in chull if len(self.triples[ps[p]]) == 0]
for point in chull:
key = (ps[point], point)
ng = points[key]
ng_dist = [(get_points_dist(point, p), p) for p in ng.values()]
ng_dist_s = sorted(ng_dist, reverse=True)
extra = None
while extra is None and len(ng_dist_s) > 0:
p2 = ng_dist_s.pop()[-1]
p3s = ng.values()
p3s.remove(p2)
for p3 in p3s:
dist_p2_p3 = get_points_dist(p2, p3)
dist_p_p2 = get_points_dist(point, p2)
dist_p_p3 = get_points_dist(point, p3)
if dist_p_p2 <= dist_p_p3:
ray1, ray2, s_pnt, dist, c = Ray(p2, point), Ray(p2, p3), p2, dist_p_p2, (ps[p2], ps[p3])
else:
ray1, ray2, s_pnt, dist, c = Ray(p3, point), Ray(p3, p2), p3, dist_p_p3, (ps[p3], ps[p2])
ang = get_angle_between(ray1, ray2)
if ang >= 90 + angle / 2 or (ang < 0 and ang + 360 >= 90 + angle / 2):
ex_point = get_point_at_angle_and_dist(
ray1, angle, dist)
extra = [c, dist_p2_p3, get_points_dist(
s_pnt, ex_point)]
break
self.triples[ps[point]].append(extra[0])
self.extra[ps[point]] = extra
class Headbanging_Median_Rate:
"""Headbaning Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
t : Headbanging_Triples instance
aw : array (n, 1)
auxilliary weight variable measured across n spatial units
iteration : integer
the number of iterations
Attributes
----------
r : array (n, 1)
rate values from headbaning median smoothing
Examples
--------
importing k-nearest neighbor weights creator
>>> from pysal import knnW
opening the sids2 shapefile
>>> sids = pysal.open(pysal.examples.get_path('sids2.shp'), 'r')
extracting the centroids of polygons in the sids2 data
>>> sids_d = np.array([i.centroid for i in sids])
creating a 5-nearest neighbors weights from the centroids
>>> sids_w = knnW(sids_d,k=5)
ensuring that the members in sids_w are ordered
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order
finding headbanging triples by using 5 neighbors
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5)
reading in the sids2 data table
>>> sids_db = pysal.open(pysal.examples.get_path('sids2.dbf'), 'r')
extracting the 10th and 9th columns in the sids2.dbf and
using data values as event and population-at-risk variables
>>> s_e, s_b = np.array(sids_db[:,9]), np.array(sids_db[:,8])
computing headbanging median rates from s_e, s_b, and s_ht
>>> sids_hb_r = Headbanging_Median_Rate(s_e,s_b,s_ht)
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r.r[:5]
array([ 0.00075586, 0. , 0.0008285 , 0.0018315 , 0.00498891])
recomputing headbanging median rates with 5 iterations
>>> sids_hb_r2 = Headbanging_Median_Rate(s_e,s_b,s_ht,iteration=5)
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r2.r[:5]
array([ 0.0008285 , 0.00084331, 0.00086896, 0.0018315 , 0.00498891])
recomputing headbanging median rates by considring a set of auxilliary weights
>>> sids_hb_r3 = Headbanging_Median_Rate(s_e,s_b,s_ht,aw=s_b)
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r3.r[:5]
array([ 0.00091659, 0. , 0.00156838, 0.0018315 , 0.00498891])
"""
def __init__(self, e, b, t, aw=None, iteration=1):
self.r = e * 1.0 / b
self.tr, self.aw = t.triples, aw
if hasattr(t, 'exta'):
self.extra = t.extra
while iteration:
self.__search_headbanging_median()
iteration -= 1
def __get_screens(self, id, triples, weighted=False):
r, tr = self.r, self.tr
if len(triples) == 0:
return r[id]
if hasattr(self, 'extra') and id in self.extra:
extra = self.extra
trp_r = r[list(triples[0])]
trp_r[-1] = trp_r[0] + (trp_r[0] - trp_r[-1]) * (
extra[id][-1] * 1.0 / extra[id][1])
trp_r = sorted(trp_r)
if not weighted:
return r, trp_r[0], trp_r[-1]
else:
trp_aw = self.aw[trp]
extra_w = trp_aw[0] + (trp_aw[0] - trp_aw[-
1]) * (extra[id][-1] * 1.0 / extra[id][1])
return r, trp_r[0], trp_r[-1], self.aw[id], trp_aw[0] + extra_w
if not weighted:
lowest, highest = [], []
for trp in triples:
trp_r = np.sort(r[list(trp)])
lowest.append(trp_r[0])
highest.append(trp_r[-1])
return r[id], np.median(np.array(lowest)), np.median(np.array(highest))
if weighted:
lowest, highest = [], []
lowest_aw, highest_aw = [], []
for trp in triples:
trp_r = r[list(trp)]
dtype = [('r', '%s' % trp_r.dtype), ('w',
'%s' % self.aw.dtype)]
trp_r = np.array(zip(trp_r, list(trp)), dtype=dtype)
trp_r.sort(order='r')
lowest.append(trp_r['r'][0])
highest.append(trp_r['r'][-1])
lowest_aw.append(self.aw[trp_r['w'][0]])
highest_aw.append(self.aw[trp_r['w'][-1]])
wm_lowest = weighted_median(np.array(lowest), np.array(lowest_aw))
wm_highest = weighted_median(
np.array(highest), np.array(highest_aw))
triple_members = flatten(triples, unique=False)
return r[id], wm_lowest, wm_highest, self.aw[id] * len(triples), self.aw[triple_members].sum()
def __get_median_from_screens(self, screens):
if isinstance(screens, float):
return screens
elif len(screens) == 3:
return np.median(np.array(screens))
elif len(screens) == 5:
rk, wm_lowest, wm_highest, w1, w2 = screens
if rk >= wm_lowest and rk <= wm_highest:
return rk
elif rk < wm_lowest and w1 < w2:
return wm_lowest
elif rk > wm_highest and w1 < w2:
return wm_highest
else:
return rk
def __search_headbanging_median(self):
r, tr = self.r, self.tr
new_r = []
for k in tr.keys():
screens = self.__get_screens(
k, tr[k], weighted=(self.aw is not None))
new_r.append(self.__get_median_from_screens(screens))
self.r = np.array(new_r)
| bsd-3-clause |
Konubinix/weboob | modules/pastealacon/test.py | 3 | 3905 | # -*- coding: utf-8 -*-
# Copyright(C) 2011-2014 Laurent Bachelier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
from weboob.capabilities.base import NotLoaded
from weboob.capabilities.paste import PasteNotFound
from .browser import Spam
class PastealaconTest(BackendTest):
MODULE = 'pastealacon'
def _get_paste(self, _id):
# html method
p = self.backend.get_paste(_id)
self.backend.fillobj(p, ['title'])
assert p.title == u'ouiboube'
assert p.page_url.startswith('http://pastealacon.com/')
assert u'héhéhé' in p.contents
assert p.public is True
# raw method
p = self.backend.get_paste(_id)
self.backend.fillobj(p, ['contents'])
assert p.title is NotLoaded
assert p.page_url.startswith('http://pastealacon.com/')
assert u'héhéhé' in p.contents
assert p.public is True
def test_post(self):
p = self.backend.new_paste(None, title=u'ouiboube', contents=u'Weboob Test héhéhé')
self.backend.post_paste(p, max_age=3600*24)
assert p.id
self.backend.fill_paste(p, ['title'])
assert p.title == 'ouiboube'
assert p.id in p.page_url
assert p.public is True
# test all get methods from the Paste we just created
self._get_paste(p.id)
# same but from the full URL
self._get_paste('http://pastealacon.com/'+p.id)
def test_spam(self):
p = self.backend.new_paste(None, title=u'viagra', contents=u'http://example.com/')
self.assertRaises(Spam, self.backend.post_paste, p)
def test_notfound(self):
for _id in ('424242424242424242424242424242424242',
'http://pastealacon.com/424242424242424242424242424242424242'):
# html method
p = self.backend.get_paste(_id)
self.assertRaises(PasteNotFound, self.backend.fillobj, p, ['title'])
# raw method
p = self.backend.get_paste(_id)
self.assertRaises(PasteNotFound, self.backend.fillobj, p, ['contents'])
def test_checkurl(self):
# call with an URL we can't handle with this backend
assert self.backend.get_paste('http://pastebin.com/nJG9ZFG8') is None
# same even with correct domain (IDs are numeric)
assert self.backend.get_paste('http://pastealacon.com/nJG9ZFG8') is None
assert self.backend.get_paste('nJG9ZFG8') is None
def test_can_post(self):
assert 0 == self.backend.can_post(u'hello', public=False)
assert 1 <= self.backend.can_post(u'hello', public=True)
assert 0 == self.backend.can_post(u'hello', public=True, max_age=600)
assert 1 <= self.backend.can_post(u'hello', public=True, max_age=3600*24)
assert 1 <= self.backend.can_post(u'hello', public=True, max_age=3600*24*3)
assert 1 <= self.backend.can_post(u'hello', public=True, max_age=False)
assert 1 <= self.backend.can_post(u'hello', public=None, max_age=False)
assert 1 <= self.backend.can_post(u'hello', public=True, max_age=3600*24*40)
assert 1 <= self.backend.can_post(u'héhé', public=True)
assert 0 == self.backend.can_post(u'hello ♥', public=True)
| agpl-3.0 |
jamesylgan/szechuantech | python-scripts/urllib3/packages/six.py | 2715 | 30098 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| mit |
RobertoMalatesta/shedskin | examples/loop.py | 6 | 16880 | """
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http:#www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This version is designed for ShedSkin
D code translated to Python by leonardo maffi, v.1.0, Jun 14 2011
"""
import sys
sys.setrecursionlimit(100000)
from sys import stdout
class Simple_loop(object):
"""
Basic representation of loops, a loop has an entry point,
one or more exit edges, a set of basic blocks, and potentially
an outer loop - a "parent" loop.
Furthermore, it can have any set of properties, e.g.,
it can be an irreducible loop, have control flow, be
a candidate for transformations, and what not.
"""
def __init__(self):
self.basic_blocks_ = set()
self.children_ = set()
self.parent_ = None
self.is_root_ = False
self.counter_ = 0
self.nesting_level_ = 0
self.depth_level_ = 0
def add_node(self, basic_block):
self.basic_blocks_.add(basic_block)
def add_child_loop(self, loop):
self.children_.add(loop)
def dump(self):
# Simplified for readability purposes.
print "loop-%d, nest: %d, depth: %d" % (self.counter_, self.nesting_level_, self.depth_level_)
# Getters/Setters
def set_parent(self, parent):
self.parent_ = parent
parent.add_child_loop(self)
def set_nesting_level(self, level):
self.nesting_level_ = level
if level == 0:
self.is_root_ = True
class Loop_structure_graph(object):
"""
Maintain loop structure for a given cfg.
Two values are maintained for this loop graph, depth, and nesting level.
For example:
loop nesting level depth
---------------------------------------
loop-0 2 0
loop-1 1 1
loop-3 1 1
loop-2 0 2
"""
def __init__(self):
self.loops_ = []
self.loop_counter_ = 0
self.root_ = Simple_loop()
self.root_.set_nesting_level(0) # make it the root node
self.root_.counter_ = self.loop_counter_
self.loop_counter_ += 1
self.loops_.append(self.root_)
def create_new_loop(self):
loop = Simple_loop()
loop.counter_ = self.loop_counter_
self.loop_counter_ += 1
return loop
def dump(self):
self.dump_rec(self.root_, 0)
def dump_rec(self, loop, indent):
# Simplified for readability purposes.
loop.dump()
for liter in loop.children_:
pass # self.dump_rec(liter, indent + 1)
def calculate_nesting_level(self):
# link up all 1st level loops to artificial root node.
for loop in self.loops_:
if loop.is_root_:
continue
if not loop.parent_:
loop.set_parent(self.root_)
# recursively traverse the tree and assign levels.
self.calculate_nesting_level_rec(self.root_, 0)
def calculate_nesting_level_rec(self, loop, depth):
loop.depth_level_ = depth
for ch in loop.children_:
calculate_nesting_level_rec(ch, depth + 1)
loop.nesting_level_ = max(loop.nesting_level_, 1 + ch.nesting_level_)
#======================================================
# Main Algorithm
#======================================================
class Union_find_node(object): # add __slots__ *******************************************
"""
Union/Find algorithm after Tarjan, R.E., 1983, Data Structures
and Network Algorithms.
"""
def init(self, bb, dfs_number):
self.parent_ = self
self.bb_ = bb
self.dfs_number_ = dfs_number
self.loop_ = None
def find_set(self):
"""
Union/Find Algorithm - The find routine.
Implemented with Path Compression (inner loops are only
visited and collapsed once, however, deep nests would still
result in significant traversals).
"""
nodeList = []
node = self
while node != node.parent_:
if node.parent_ != node.parent_.parent_:
nodeList.append(node)
node = node.parent_
# Path Compression, all nodes' parents point to the 1st level parent.
for n in nodeList:
n.parent_ = node.parent_
return node
#/ Union/Find Algorithm - The Union routine. We rely on path compression.
def do_union(self, B):
self.parent_ = B
class Basic_block_class(object):
TOP = 0 # uninitialized
NONHEADER = 1 # a regular BB
REDUCIBLE = 2 # reducible loop
SELF = 3 # single BB loop
IRREDUCIBLE = 4 # irreducible loop
DEAD = 5 # a dead BB
LAST = 6 # Sentinel
class Havlak_loop_finder(object):
"""
Loop Recognition
based on:
Paul Havlak, Nesting of Reducible and Irreducible Loops,
Rice University.
We adef doing tree balancing and instead use path compression
to adef traversing parent pointers over and over.
Most of the variable names and identifiers are taken literally
from_n this paper (and the original Tarjan paper mentioned above).
"""
def __init__(self, cfg, lsg):
self.cfg_ = cfg # current control flow graph.
self.lsg_ = lsg # loop forest.
# Constants
#/ Marker for uninitialized nodes.
K_UNVISITED = -1
#/ Safeguard against pathologic algorithm behavior.
K_MAX_NON_BACK_PREDS = 32 * 1024
"""
As described in the paper, determine whether a node 'w' is a
"True" ancestor for node 'v'.
Dominance can be tested quickly using a pre-order trick
for depth-first spanning trees. This is why dfs is the first
thing we run below.
"""
@staticmethod
def is_ancestor(w, v, last):
return w <= v and v <= last[w] # improve this ************************************************
@staticmethod
def dfs(current_node, nodes, number, last, current):
#/ Simple depth first traversal along out edges with node numbering.
nodes[current].init(current_node, current)
number[current_node] = current
lastid = current
for target in current_node.out_edges_:
if number[target] == Havlak_loop_finder.K_UNVISITED:
lastid = Havlak_loop_finder.dfs(target, nodes, number, last, lastid + 1)
last[number[current_node]] = lastid
return lastid
"""
Find loops and build loop forest using Havlak's algorithm, which
is derived from_n Tarjan. Variable names and step numbering has
been chosen to be identical to the nomenclature in Havlak's
paper (which is similar to the one used by Tarjan).
"""
def find_loops(self):
if not self.cfg_.start_node_:
return
size = len(self.cfg_.basic_block_map_)
non_back_preds = [set() for _ in xrange(size)]
back_preds = [[] for _ in xrange(size)]
header = [0] * size
type = [0] * size
last = [0] * size
nodes = [Union_find_node() for _ in xrange(size)]
number = {}
# Step a:
# - initialize all nodes as unvisited.
# - depth-first traversal and numbering.
# - unreached BB's are marked as dead.
#
for bblock in self.cfg_.basic_block_map_.itervalues():
number[bblock] = Havlak_loop_finder.K_UNVISITED
Havlak_loop_finder.dfs(self.cfg_.start_node_, nodes, number, last, 0)
# Step b:
# - iterate over all nodes.
#
# A backedge comes from_n a descendant in the dfs tree, and non-backedges
# from_n non-descendants (following Tarjan).
#
# - check incoming edges 'v' and add them to either
# - the list of backedges (back_preds) or
# - the list of non-backedges (non_back_preds)
for w in xrange(size):
header[w] = 0
type[w] = Basic_block_class.NONHEADER
node_w = nodes[w].bb_
if not node_w:
type[w] = Basic_block_class.DEAD
continue # dead BB
if len(node_w.in_edges_):
for node_v in node_w.in_edges_:
v = number[node_v]
if v == Havlak_loop_finder.K_UNVISITED:
continue # dead node
if Havlak_loop_finder.is_ancestor(w, v, last):
back_preds[w].append(v)
else:
non_back_preds[w].add(v)
# Start node is root of all other loops.
header[0] = 0
# Step c:
#
# The outer loop, unchanged from_n Tarjan. It does nothing except
# for those nodes which are the destinations of backedges.
# For a header node w, we chase backward from_n the sources of the
# backedges adding nodes to the set P, representing the body of
# the loop headed by w.
#
# By running through the nodes in reverse of the DFST preorder,
# we ensure that inner loop headers will be processed before the
# headers for surrounding loops.
for w in xrange(size-1, -1, -1):
node_pool = [] # this is 'P' in Havlak's paper
node_w = nodes[w].bb_
if not node_w:
continue # dead BB
# Step d:
for back_pred in back_preds[w]:
if back_pred != w:
node_pool.append(nodes[back_pred].find_set())
else:
type[w] = Basic_block_class.SELF
# Copy node_pool to worklist.
worklist = []
for np in node_pool:
worklist.append(np)
if len(node_pool):
type[w] = Basic_block_class.REDUCIBLE
# work the list...
#
while len(worklist):
x = worklist[0]
worklist = worklist[1:] # slow? *************************************************
# Step e:
#
# Step e represents the main difference from_n Tarjan's method.
# Chasing upwards from_n the sources of a node w's backedges. If
# there is a node y' that is not a descendant of w, w is marked
# the header of an irreducible loop, there is another entry
# into this loop that avoids w.
# The algorithm has degenerated. Break and
# return in this case.
non_back_size = len(non_back_preds[x.dfs_number_])
if non_back_size > Havlak_loop_finder.K_MAX_NON_BACK_PREDS:
return
for non_back_pred_iter in non_back_preds[x.dfs_number_]:
y = nodes[non_back_pred_iter]
ydash = y.find_set()
if not Havlak_loop_finder.is_ancestor(w, ydash.dfs_number_, last):
type[w] = Basic_block_class.IRREDUCIBLE
non_back_preds[w].add(ydash.dfs_number_)
else:
if ydash.dfs_number_ != w:
if ydash not in node_pool:
worklist.append(ydash)
node_pool.append(ydash)
# Collapse/Unionize nodes in a SCC to a single node
# For every SCC found, create a loop descriptor and link it in.
#
if len(node_pool) or type[w] == Basic_block_class.SELF:
loop = self.lsg_.create_new_loop()
# At this point, one can set attributes to the loop, such as:
#
# the bottom node:
# int[]::iterator iter = back_preds[w].begin()
# loop bottom is: nodes[*backp_iter].node)
#
# the number of backedges:
# back_preds[w].length
#
# whether this loop is reducible:
# type[w] != IRREDUCIBLE
#
# TODO(rhundt): Define those interfaces in the Loop Forest.
#
nodes[w].loop_ = loop
for node in node_pool:
# Add nodes to loop descriptor.
header[node.dfs_number_] = w
node.do_union(nodes[w])
# Nested loops are not added, but linked together.
if node.loop_:
node.loop_.parent_ = loop
else:
loop.add_node(node.bb_)
self.lsg_.loops_.append(loop)
def find_havlak_loops(cfg, lsg):
"""External entry point."""
finder = Havlak_loop_finder(cfg, lsg)
finder.find_loops()
return len(lsg.loops_)
def build_diamond(cfg, start):
bb0 = start
Basic_block_edge(cfg, bb0, bb0 + 1)
Basic_block_edge(cfg, bb0, bb0 + 2)
Basic_block_edge(cfg, bb0 + 1, bb0 + 3)
Basic_block_edge(cfg, bb0 + 2, bb0 + 3)
return bb0 + 3
def build_connect(cfg, start, end):
Basic_block_edge(cfg, start, end)
def build_straight(cfg, start, n):
for i in xrange(n):
build_connect(cfg, start + i, start + i + 1)
return start + n
def build_base_loop(cfg, from_n):
header = build_straight(cfg, from_n, 1)
diamond1 = build_diamond(cfg, header)
d11 = build_straight(cfg, diamond1, 1)
diamond2 = build_diamond(cfg, d11)
footer = build_straight(cfg, diamond2, 1)
build_connect(cfg, diamond2, d11)
build_connect(cfg, diamond1, header)
build_connect(cfg, footer, from_n)
footer = build_straight(cfg, footer, 1)
return footer
# --- MOCKING CODE begin -------------------
# These data structures are stubbed out to make the code below easier to review.
class Basic_block_edge(object):
"""Basic_block_edge only maintains two pointers to BasicBlocks."""
def __init__(self, cfg, from_name, to_name):
self.from_ = cfg.create_node(from_name)
self.to_ = cfg.create_node(to_name)
self.from_.out_edges_.append(self.to_)
self.to_.in_edges_.append(self.from_)
cfg.edge_list_.append(self)
class Basic_block(object):
"""Basic_block only maintains a vector of in-edges and a vector of out-edges."""
def __init__(self, name):
self.in_edges_ = []
self.out_edges_ = []
self.name_ = name
class MaoCFG(object):
"""MaoCFG maintains a list of nodes."""
def __init__(self):
self.basic_block_map_ = {}
self.start_node_ = None
self.edge_list_ = []
def create_node(self, name):
if name in self.basic_block_map_:
node = self.basic_block_map_[name]
else:
node = Basic_block(name)
self.basic_block_map_[name] = node
if len(self.basic_block_map_) == 1:
self.start_node_ = node
return node
#--- MOCKING CODE end -------------------
def main():
print "Welcome to LoopTesterApp, Python edition"
print "Constructing App..."
cfg = MaoCFG()
lsg = Loop_structure_graph()
print "Constructing Simple cfg..."
cfg.create_node(0) # top
build_base_loop(cfg, 0)
cfg.create_node(1) # bottom
Basic_block_edge(cfg, 0, 2)
print "15000 dummy loops"
for dummyLoops in xrange(15000):
lsglocal = Loop_structure_graph()
find_havlak_loops(cfg, lsglocal)
print "Constructing cfg..."
n = 2
for parlooptrees in xrange(10):
cfg.create_node(n + 1)
build_connect(cfg, 2, n + 1)
n += 1
for i in xrange(100):
top = n
n = build_straight(cfg, n, 1)
for j in xrange(25):
n = build_base_loop(cfg, n)
bottom = build_straight(cfg, n, 1)
build_connect(cfg, n, top)
n = bottom
build_connect(cfg, n, 1)
print "Performing Loop Recognition\n1 Iteration"
numLoops = find_havlak_loops(cfg, lsg)
print "Another 50 iterations..."
sum = 0
for i in xrange(50):
lsg2 = Loop_structure_graph()
stdout.write(".")
sum += find_havlak_loops(cfg, lsg2)
print "\nFound %d loops (including artificial root node)(%d)" % (numLoops, sum)
lsg.dump()
if __name__ == '__main__':
main()
| gpl-3.0 |
wwj718/murp-edx | lms/djangoapps/analytics/basic.py | 8 | 3788 | """
Student and course analytics.
Serve miscellaneous course and student data
"""
from django.contrib.auth.models import User
import xmodule.graders as xmgraders
STUDENT_FEATURES = ('id', 'username', 'first_name', 'last_name', 'is_staff', 'email')
PROFILE_FEATURES = ('name', 'language', 'location', 'year_of_birth', 'gender',
'level_of_education', 'mailing_address', 'goals')
AVAILABLE_FEATURES = STUDENT_FEATURES + PROFILE_FEATURES
def enrolled_students_features(course_id, features):
"""
Return list of student features as dictionaries.
enrolled_students_features(course_id, ['username, first_name'])
would return [
{'username': 'username1', 'first_name': 'firstname1'}
{'username': 'username2', 'first_name': 'firstname2'}
{'username': 'username3', 'first_name': 'firstname3'}
]
"""
students = User.objects.filter(
courseenrollment__course_id=course_id,
courseenrollment__is_active=1,
).order_by('username').select_related('profile')
def extract_student(student, features):
""" convert student to dictionary """
student_features = [x for x in STUDENT_FEATURES if x in features]
profile_features = [x for x in PROFILE_FEATURES if x in features]
student_dict = dict((feature, getattr(student, feature))
for feature in student_features)
profile = student.profile
if profile is not None:
profile_dict = dict((feature, getattr(profile, feature))
for feature in profile_features)
student_dict.update(profile_dict)
return student_dict
return [extract_student(student, features) for student in students]
def dump_grading_context(course):
"""
Render information about course grading context
(e.g. which problems are graded in what assignments)
Useful for debugging grading_policy.json and policy.json
Returns HTML string
"""
hbar = "{}\n".format("-" * 77)
msg = hbar
msg += "Course grader:\n"
msg += '%s\n' % course.grader.__class__
graders = {}
if isinstance(course.grader, xmgraders.WeightedSubsectionsGrader):
msg += '\n'
msg += "Graded sections:\n"
for subgrader, category, weight in course.grader.sections:
msg += " subgrader=%s, type=%s, category=%s, weight=%s\n"\
% (subgrader.__class__, subgrader.type, category, weight)
subgrader.index = 1
graders[subgrader.type] = subgrader
msg += hbar
msg += "Listing grading context for course %s\n" % course.id.to_deprecated_string()
gcontext = course.grading_context
msg += "graded sections:\n"
msg += '%s\n' % gcontext['graded_sections'].keys()
for (gsomething, gsvals) in gcontext['graded_sections'].items():
msg += "--> Section %s:\n" % (gsomething)
for sec in gsvals:
sdesc = sec['section_descriptor']
frmat = getattr(sdesc, 'format', None)
aname = ''
if frmat in graders:
gform = graders[frmat]
aname = '%s %02d' % (gform.short_label, gform.index)
gform.index += 1
elif sdesc.display_name in graders:
gform = graders[sdesc.display_name]
aname = '%s' % gform.short_label
notes = ''
if getattr(sdesc, 'score_by_attempt', False):
notes = ', score by attempt!'
msg += " %s (format=%s, Assignment=%s%s)\n"\
% (sdesc.display_name, frmat, aname, notes)
msg += "all descriptors:\n"
msg += "length=%d\n" % len(gcontext['all_descriptors'])
msg = '<pre>%s</pre>' % msg.replace('<', '<')
return msg
| agpl-3.0 |
Khaon/android_external_skia | platform_tools/android/gyp_gen/tool_makefile_writer.py | 63 | 3616 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Code for generating Android.mk for a tool."""
import android_framework_gyp
import gypd_parser
import makefile_writer
import os
import vars_dict_lib
def write_tool_android_mk(target_dir, var_dict, place_in_local_tmp):
"""Write Android.mk for a Skia tool.
Args:
target_dir: Destination for the makefile. Must not be None.
var_dict: VarsDict containing variables for the makefile.
place_in_local_tmp: If True, the executable will be synced to
/data/local/tmp.
"""
target_file = os.path.join(target_dir, 'Android.mk')
with open(target_file, 'w') as f:
f.write(makefile_writer.AUTOGEN_WARNING)
if place_in_local_tmp:
f.write('local_target_dir := $(TARGET_OUT_DATA)/local/tmp\n')
makefile_writer.write_local_path(f)
makefile_writer.write_clear_vars(f)
makefile_writer.write_local_vars(f, var_dict, False, None)
if place_in_local_tmp:
f.write('LOCAL_MODULE_PATH := $(local_target_dir)\n')
makefile_writer.write_include_stlport(f)
f.write('include $(BUILD_EXECUTABLE)\n')
def generate_tool(gyp_dir, target_file, skia_trunk, dest_dir,
skia_lib_var_dict, local_module_name, local_module_tags,
place_in_local_tmp=False):
"""Common steps for building one of the skia tools.
Parse a gyp file and create an Android.mk for this tool.
Args:
gyp_dir: Directory containing gyp files.
target_file: gyp file for the project to be built, contained in gyp_dir.
skia_trunk: Trunk of Skia, used for determining the destination to write
'Android.mk'.
dest_dir: Destination for 'Android.mk', relative to skia_trunk. Used for
both writing relative paths in the makefile and for determining the
destination to write the it.
skia_lib_var_dict: VarsDict representing libskia. Used as a reference to
ensure we do not duplicate anything in this Android.mk.
local_module_name: Name for this tool, to set as LOCAL_MODULE.
local_module_tags: Tags to pass to LOCAL_MODULE_TAG.
place_in_local_tmp: If True, the executable will be synced to
/data/local/tmp.
"""
result_file = android_framework_gyp.main(target_dir=gyp_dir,
target_file=target_file,
skia_arch_type='other',
have_neon=False)
var_dict = vars_dict_lib.VarsDict()
# Add known targets from skia_lib, so we do not reparse them.
var_dict.KNOWN_TARGETS.set(skia_lib_var_dict.KNOWN_TARGETS)
gypd_parser.parse_gypd(var_dict, result_file, dest_dir)
android_framework_gyp.clean_gypd_files(gyp_dir)
var_dict.LOCAL_MODULE.add(local_module_name)
for tag in local_module_tags:
var_dict.LOCAL_MODULE_TAGS.add(tag)
# No need for defines that are already in skia_lib.
for define in skia_lib_var_dict.DEFINES:
try:
var_dict.DEFINES.remove(define)
except ValueError:
# Okay if the define was not part of the parse for our tool.
pass
if skia_trunk:
full_dest = os.path.join(skia_trunk, dest_dir)
else:
full_dest = dest_dir
# If the path does not exist, create it. This will happen during testing,
# where there is no subdirectory for each tool (just a temporary folder).
if not os.path.exists(full_dest):
os.mkdir(full_dest)
write_tool_android_mk(target_dir=full_dest, var_dict=var_dict,
place_in_local_tmp=place_in_local_tmp)
| bsd-3-clause |
Nessphoro/sublimeassembly | code/completion.py | 1 | 2196 | import sublime
import sublime_plugin
from .helpers import *
from .context import *
class completionListener(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
if is_asm(view):
prefixLineStart = view.line(locations[0])
line = view.substr(prefixLineStart).strip()
lskip = view.substr(prefixLineStart).index(line)
splitLine = line.split(' ', maxsplit=1) # maybe str.partition?
if(len(splitLine) > 1):
# The user is typing in the middle. Check if the start was a label
if is_name(prefixLineStart.begin()+lskip, view):
print("Lable first")
else:
# No a label start, so could be instruction or a support directive
print("Instuction context")
return self.handleInstructionContext(view, prefix, locations)
else:
prefix = line # the user is just starting to type
if prefix.startswith("%"):
return self.handleSupportContext(view, prefix, locations)
else:
return self.handleInsturctionOpcodeContext(view, prefix, locations)
def handleInstructionContext(self, view, prefix, locations):
completions = [[name[0]+'\t'+name[1], name[0]] for name in contexts[view.id()].getLocals(set()) if name[0].casefold().startswith(prefix.casefold())]
return (completions, sublime.INHIBIT_WORD_COMPLETIONS)
def handleSupportContext(self, view, prefix, locations):
return [[si, si] for si in support_set.keys() if si.casefold().startswith(prefix.casefold())]
def handleInsturctionOpcodeContext(self, view, prefix, locations):
completions= [[instruction.casefold() + " \t" + instruction_set[instruction]["Brief"], instruction.lower()]\
for instruction in instruction_set.keys() if instruction.casefold().startswith(prefix.casefold())]
completions.extend(self.handleSupportContext(view, prefix, locations))
completions.sort(key=lambda x: len(x[1]))
return (completions, sublime.INHIBIT_WORD_COMPLETIONS)
| bsd-3-clause |
caseyc37/pygame_cffi | test/imageext_test.py | 2 | 2605 | import os
import sys
if __name__ == '__main__':
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests.test_utils \
import expected_error, test_not_implemented, example_path, unittest
else:
from test.test_utils \
import expected_error, test_not_implemented, example_path, unittest
import pygame, pygame.image, pygame.pkgdata
from pygame.compat import unicode_
from pygame.image import save as save_extended, load as load_extended
import os.path
class ImageextModuleTest( unittest.TestCase ):
# Most of the testing is done indirectly through image_test.py
# This just confirms file path encoding and error handling.
def test_save_non_string_file(self):
im = pygame.Surface((10, 10), 0, 32)
self.assertRaises(TypeError, save_extended, im, [])
def test_load_non_string_file(self):
self.assertRaises(pygame.error, load_extended, [])
def test_save_bad_filename(self):
im = pygame.Surface((10, 10), 0, 32)
u = u"a\x00b\x00c.png"
self.assertRaises(pygame.error, save_extended, im, u)
def test_load_bad_filename(self):
u = u"a\x00b\x00c.png"
self.assertRaises(pygame.error, load_extended, u)
# No longer necessary since image and imageext have been merged.
#def test_save_unknown_extension(self):
# im = pygame.Surface((10, 10), 0, 32)
# s = "foo.bar"
# self.assertRaises(pygame.error, save_extended, im, s)
def test_load_unknown_extension(self):
s = "foo.bar"
self.assertRaises(pygame.error, load_extended, s)
def test_load_unicode_path(self):
u = unicode_(example_path("data/alien1.png"))
im = load_extended(u)
def test_save_unicode_path(self):
temp_file = unicode_("tmpimg.png")
im = pygame.Surface((10, 10), 0, 32)
try:
os.remove(temp_file)
except EnvironmentError:
pass
self.assert_(not os.path.exists(temp_file))
try:
save_extended(im, temp_file)
self.assert_(os.path.getsize(temp_file) > 10)
finally:
try:
os.remove(temp_file)
except EnvironmentError:
pass
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
GoogleCloudPlatform/declarative-resource-client-library | python/services/compute/beta/ssl_certificate.py | 1 | 7634 | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.compute import ssl_certificate_pb2
from google3.cloud.graphite.mmv2.services.google.compute import ssl_certificate_pb2_grpc
from typing import List
class SslCertificate(object):
def __init__(
self,
id: int = None,
name: str = None,
description: str = None,
self_link: str = None,
self_managed: dict = None,
type: str = None,
subject_alternative_names: list = None,
expire_time: str = None,
project: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.description = description
self.self_managed = self_managed
self.type = type
self.project = project
self.service_account_file = service_account_file
def apply(self):
stub = ssl_certificate_pb2_grpc.ComputeBetaSslCertificateServiceStub(
channel.Channel()
)
request = ssl_certificate_pb2.ApplyComputeBetaSslCertificateRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if SslCertificateSelfManaged.to_proto(self.self_managed):
request.resource.self_managed.CopyFrom(
SslCertificateSelfManaged.to_proto(self.self_managed)
)
else:
request.resource.ClearField("self_managed")
if SslCertificateTypeEnum.to_proto(self.type):
request.resource.type = SslCertificateTypeEnum.to_proto(self.type)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
request.service_account_file = self.service_account_file
response = stub.ApplyComputeBetaSslCertificate(request)
self.id = Primitive.from_proto(response.id)
self.name = Primitive.from_proto(response.name)
self.description = Primitive.from_proto(response.description)
self.self_link = Primitive.from_proto(response.self_link)
self.self_managed = SslCertificateSelfManaged.from_proto(response.self_managed)
self.type = SslCertificateTypeEnum.from_proto(response.type)
self.subject_alternative_names = Primitive.from_proto(
response.subject_alternative_names
)
self.expire_time = Primitive.from_proto(response.expire_time)
self.project = Primitive.from_proto(response.project)
def delete(self):
stub = ssl_certificate_pb2_grpc.ComputeBetaSslCertificateServiceStub(
channel.Channel()
)
request = ssl_certificate_pb2.DeleteComputeBetaSslCertificateRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if SslCertificateSelfManaged.to_proto(self.self_managed):
request.resource.self_managed.CopyFrom(
SslCertificateSelfManaged.to_proto(self.self_managed)
)
else:
request.resource.ClearField("self_managed")
if SslCertificateTypeEnum.to_proto(self.type):
request.resource.type = SslCertificateTypeEnum.to_proto(self.type)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
response = stub.DeleteComputeBetaSslCertificate(request)
@classmethod
def list(self, project, service_account_file=""):
stub = ssl_certificate_pb2_grpc.ComputeBetaSslCertificateServiceStub(
channel.Channel()
)
request = ssl_certificate_pb2.ListComputeBetaSslCertificateRequest()
request.service_account_file = service_account_file
request.Project = project
return stub.ListComputeBetaSslCertificate(request).items
def to_proto(self):
resource = ssl_certificate_pb2.ComputeBetaSslCertificate()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if SslCertificateSelfManaged.to_proto(self.self_managed):
resource.self_managed.CopyFrom(
SslCertificateSelfManaged.to_proto(self.self_managed)
)
else:
resource.ClearField("self_managed")
if SslCertificateTypeEnum.to_proto(self.type):
resource.type = SslCertificateTypeEnum.to_proto(self.type)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
return resource
class SslCertificateSelfManaged(object):
def __init__(self, certificate: str = None, private_key: str = None):
self.certificate = certificate
self.private_key = private_key
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = ssl_certificate_pb2.ComputeBetaSslCertificateSelfManaged()
if Primitive.to_proto(resource.certificate):
res.certificate = Primitive.to_proto(resource.certificate)
if Primitive.to_proto(resource.private_key):
res.private_key = Primitive.to_proto(resource.private_key)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return SslCertificateSelfManaged(
certificate=Primitive.from_proto(resource.certificate),
private_key=Primitive.from_proto(resource.private_key),
)
class SslCertificateSelfManagedArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [SslCertificateSelfManaged.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [SslCertificateSelfManaged.from_proto(i) for i in resources]
class SslCertificateTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return ssl_certificate_pb2.ComputeBetaSslCertificateTypeEnum.Value(
"ComputeBetaSslCertificateTypeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return ssl_certificate_pb2.ComputeBetaSslCertificateTypeEnum.Name(resource)[
len("ComputeBetaSslCertificateTypeEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
| apache-2.0 |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/scipy/_lib/_util.py | 8 | 8458 | from __future__ import division, print_function, absolute_import
import functools
import operator
import sys
import warnings
import numbers
from collections import namedtuple
import inspect
import numpy as np
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""Allocate a new ndarray with aligned memory.
Primary use case for this currently is working around a f2py issue
in Numpy 1.9.1, where dtype.alignment is such that np.zeros() does
not necessarily create arrays aligned up to it.
"""
dtype = np.dtype(dtype)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + align + 1, np.uint8)
offset = buf.__array_interface__['data'][0] % align
if offset != 0:
offset = align - offset
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
class DeprecatedImport(object):
"""
Deprecated import, with redirection + warning.
Examples
--------
Suppose you previously had in some module::
from foo import spam
If this has to be deprecated, do::
spam = DeprecatedImport("foo.spam", "baz")
to redirect users to use "baz" module instead.
"""
def __init__(self, old_module_name, new_module_name):
self._old_name = old_module_name
self._new_name = new_module_name
__import__(self._new_name)
self._mod = sys.modules[self._new_name]
def __dir__(self):
return dir(self._mod)
def __getattr__(self, name):
warnings.warn("Module %s is deprecated, use %s instead"
% (self._old_name, self._new_name),
DeprecationWarning)
return getattr(self._mod, name)
# copy-pasted from scikit-learn utils/validation.py
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None (or np.random), return the RandomState singleton used
by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def _asarray_validated(a, check_finite=True,
sparse_ok=False, objects_ok=False, mask_ok=False,
as_inexact=False):
"""
Helper function for scipy argument validation.
Many scipy linear algebra functions do support arbitrary array-like
input arguments. Examples of commonly unsupported inputs include
matrices containing inf/nan, sparse matrix representations, and
matrices with complicated elements.
Parameters
----------
a : array_like
The array-like input.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
sparse_ok : bool, optional
True if scipy sparse matrices are allowed.
objects_ok : bool, optional
True if arrays with dype('O') are allowed.
mask_ok : bool, optional
True if masked arrays are allowed.
as_inexact : bool, optional
True to convert the input array to a np.inexact dtype.
Returns
-------
ret : ndarray
The converted validated array.
"""
if not sparse_ok:
import scipy.sparse
if scipy.sparse.issparse(a):
msg = ('Sparse matrices are not supported by this function. '
'Perhaps one of the scipy.linalg.sparse functions '
'would work instead.')
raise ValueError(msg)
if not mask_ok:
if np.ma.isMaskedArray(a):
raise ValueError('masked arrays are not supported')
toarray = np.asarray_chkfinite if check_finite else np.asarray
a = toarray(a)
if not objects_ok:
if a.dtype is np.dtype('O'):
raise ValueError('object arrays are not supported')
if as_inexact:
if not np.issubdtype(a.dtype, np.inexact):
try:
a = toarray(a, dtype=np.float_)
except TypeError:
# for compatibility with numpy 1.6
a = toarray(a).astype(np.float_)
return a
# Add a replacement for inspect.getargspec() which is deprecated in python 3.5
# The version below is borrowed from Django,
# https://github.com/django/django/pull/4846
# Note an inconsistency between inspect.getargspec(func) and
# inspect.signature(func). If `func` is a bound method, the latter does *not*
# list `self` as a first argument, while the former *does*.
# Hence cook up a common ground replacement: `getargspec_no_self` which
# mimics `inspect.getargspec` but does not list `self`.
#
# This way, the caller code does not need to know whether it uses a legacy
# .getargspec or bright and shiny .signature.
try:
# is it python 3.3 or higher?
inspect.signature
# Apparently, yes. Wrap inspect.signature
ArgSpec = namedtuple('ArgSpec', ['args', 'varargs', 'keywords', 'defaults'])
def getargspec_no_self(func):
"""inspect.getargspec replacement using inspect.signature.
inspect.getargspec is deprecated in python 3. This is a replacement
based on the (new in python 3.3) `inspect.signature`.
Parameters
----------
func : callable
A callable to inspect
Returns
-------
argspec : ArgSpec(args, varargs, varkw, defaults)
This is similar to the result of inspect.getargspec(func) under
python 2.x.
NOTE: if the first argument of `func` is self, it is *not*, I repeat
*not* included in argspec.args.
This is done for consistency between inspect.getargspec() under
python 2.x, and inspect.signature() under python 3.x.
"""
sig = inspect.signature(func)
args = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
varkw = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
varkw = varkw[0] if varkw else None
defaults = [
p.default for p in sig.parameters.values()
if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and
p.default is not p.empty)
] or None
return ArgSpec(args, varargs, varkw, defaults)
except AttributeError:
# python 2.x
def getargspec_no_self(func):
"""inspect.getargspec replacement for compatibility with python 3.x.
inspect.getargspec is deprecated in python 3. This wraps it, and
*removes* `self` from the argument list of `func`, if present.
This is done for forward compatibility with python 3.
Parameters
----------
func : callable
A callable to inspect
Returns
-------
argspec : ArgSpec(args, varargs, varkw, defaults)
This is similar to the result of inspect.getargspec(func) under
python 2.x.
NOTE: if the first argument of `func` is self, it is *not*, I repeat
*not* included in argspec.args.
This is done for consistency between inspect.getargspec() under
python 2.x, and inspect.signature() under python 3.x.
"""
argspec = inspect.getargspec(func)
if argspec.args[0] == 'self':
argspec.args.pop(0)
return argspec
| gpl-2.0 |
maginatics/swift | test/unit/container/test_server.py | 12 | 120178 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
import os
import mock
import unittest
import itertools
from contextlib import contextmanager
from shutil import rmtree
from StringIO import StringIO
from tempfile import mkdtemp
from test.unit import FakeLogger
from time import gmtime
from xml.dom import minidom
import time
import random
from eventlet import spawn, Timeout, listen
import simplejson
from swift import __version__ as swift_version
from swift.common.swob import Request, HeaderKeyDict
import swift.container
from swift.container import server as container_server
from swift.common import constraints
from swift.common.utils import (Timestamp, mkdirs, public, replication,
lock_parent_directory, json)
from test.unit import fake_http_connect
from swift.common.storage_policy import (POLICIES, StoragePolicy)
from swift.common.request_helpers import get_sys_meta_prefix
from test.unit import patch_policies
@contextmanager
def save_globals():
orig_http_connect = getattr(swift.container.server, 'http_connect',
None)
try:
yield True
finally:
swift.container.server.http_connect = orig_http_connect
@patch_policies
class TestContainerController(unittest.TestCase):
"""Test swift.container.server.ContainerController"""
def setUp(self):
"""Set up for testing swift.object_server.ObjectController"""
self.testdir = os.path.join(mkdtemp(),
'tmp_test_object_server_ObjectController')
mkdirs(self.testdir)
rmtree(self.testdir)
mkdirs(os.path.join(self.testdir, 'sda1'))
mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false'})
# some of the policy tests want at least two policies
self.assert_(len(POLICIES) > 1)
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
def _update_object_put_headers(self, req):
"""
Override this method in test subclasses to test post upgrade
behavior.
"""
pass
def _check_put_container_storage_policy(self, req, policy_index):
resp = req.get_response(self.controller)
self.assertEqual(201, resp.status_int)
req = Request.blank(req.path, method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(204, resp.status_int)
self.assertEqual(str(policy_index),
resp.headers['X-Backend-Storage-Policy-Index'])
def test_get_and_validate_policy_index(self):
# no policy is OK
req = Request.blank('/sda1/p/a/container_default', method='PUT',
headers={'X-Timestamp': '0'})
self._check_put_container_storage_policy(req, POLICIES.default.idx)
# bogus policies
for policy in ('nada', 999):
req = Request.blank('/sda1/p/a/c_%s' % policy, method='PUT',
headers={
'X-Timestamp': '0',
'X-Backend-Storage-Policy-Index': policy
})
resp = req.get_response(self.controller)
self.assertEqual(400, resp.status_int)
self.assert_('invalid' in resp.body.lower())
# good policies
for policy in POLICIES:
req = Request.blank('/sda1/p/a/c_%s' % policy.name, method='PUT',
headers={
'X-Timestamp': '0',
'X-Backend-Storage-Policy-Index':
policy.idx,
})
self._check_put_container_storage_policy(req, policy.idx)
def test_acl_container(self):
# Ensure no acl by default
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0'})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('201'))
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assert_('x-container-read' not in response.headers)
self.assert_('x-container-write' not in response.headers)
# Ensure POSTing acls works
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '1', 'X-Container-Read': '.r:*',
'X-Container-Write': 'account:user'})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('204'))
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assertEquals(response.headers.get('x-container-read'), '.r:*')
self.assertEquals(response.headers.get('x-container-write'),
'account:user')
# Ensure we can clear acls on POST
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '3', 'X-Container-Read': '',
'X-Container-Write': ''})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('204'))
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assert_('x-container-read' not in response.headers)
self.assert_('x-container-write' not in response.headers)
# Ensure PUTing acls works
req = Request.blank(
'/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '4', 'X-Container-Read': '.r:*',
'X-Container-Write': 'account:user'})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('201'))
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assertEquals(response.headers.get('x-container-read'), '.r:*')
self.assertEquals(response.headers.get('x-container-write'),
'account:user')
def test_HEAD(self):
start = int(time.time())
ts = (Timestamp(t).internal for t in itertools.count(start))
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'x-timestamp': ts.next()})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c', method='HEAD')
response = req.get_response(self.controller)
self.assertEqual(response.status_int, 204)
self.assertEqual(response.headers['x-container-bytes-used'], '0')
self.assertEqual(response.headers['x-container-object-count'], '0')
obj_put_request = Request.blank(
'/sda1/p/a/c/o', method='PUT', headers={
'x-timestamp': ts.next(),
'x-size': 42,
'x-content-type': 'text/plain',
'x-etag': 'x',
})
self._update_object_put_headers(obj_put_request)
obj_put_resp = obj_put_request.get_response(self.controller)
self.assertEqual(obj_put_resp.status_int // 100, 2)
# re-issue HEAD request
response = req.get_response(self.controller)
self.assertEqual(response.status_int // 100, 2)
self.assertEqual(response.headers['x-container-bytes-used'], '42')
self.assertEqual(response.headers['x-container-object-count'], '1')
# created at time...
created_at_header = Timestamp(response.headers['x-timestamp'])
self.assertEqual(response.headers['x-timestamp'],
created_at_header.normal)
self.assert_(created_at_header >= start)
self.assertEqual(response.headers['x-put-timestamp'],
Timestamp(start).normal)
# backend headers
self.assertEqual(int(response.headers
['X-Backend-Storage-Policy-Index']),
int(POLICIES.default))
self.assert_(
Timestamp(response.headers['x-backend-timestamp']) >= start)
self.assertEqual(response.headers['x-backend-put-timestamp'],
Timestamp(start).internal)
self.assertEqual(response.headers['x-backend-delete-timestamp'],
Timestamp(0).internal)
self.assertEqual(response.headers['x-backend-status-changed-at'],
Timestamp(start).internal)
def test_HEAD_not_found(self):
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(int(resp.headers['X-Backend-Storage-Policy-Index']),
0)
self.assertEqual(resp.headers['x-backend-timestamp'],
Timestamp(0).internal)
self.assertEqual(resp.headers['x-backend-put-timestamp'],
Timestamp(0).internal)
self.assertEqual(resp.headers['x-backend-status-changed-at'],
Timestamp(0).internal)
self.assertEqual(resp.headers['x-backend-delete-timestamp'],
Timestamp(0).internal)
for header in ('x-container-object-count', 'x-container-bytes-used',
'x-timestamp', 'x-put-timestamp'):
self.assertEqual(resp.headers[header], None)
def test_deleted_headers(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
request_method_times = {
'PUT': ts.next(),
'DELETE': ts.next(),
}
# setup a deleted container
for method in ('PUT', 'DELETE'):
x_timestamp = request_method_times[method]
req = Request.blank('/sda1/p/a/c', method=method,
headers={'x-timestamp': x_timestamp})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
for method in ('GET', 'HEAD'):
req = Request.blank('/sda1/p/a/c', method=method)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
# backend headers
self.assertEqual(int(resp.headers[
'X-Backend-Storage-Policy-Index']),
int(POLICIES.default))
self.assert_(Timestamp(resp.headers['x-backend-timestamp']) >=
Timestamp(request_method_times['PUT']))
self.assertEqual(resp.headers['x-backend-put-timestamp'],
request_method_times['PUT'])
self.assertEqual(resp.headers['x-backend-delete-timestamp'],
request_method_times['DELETE'])
self.assertEqual(resp.headers['x-backend-status-changed-at'],
request_method_times['DELETE'])
for header in ('x-container-object-count',
'x-container-bytes-used', 'x-timestamp',
'x-put-timestamp'):
self.assertEqual(resp.headers[header], None)
def test_HEAD_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_HEAD_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_HEAD_invalid_content_type(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'},
headers={'Accept': 'application/plain'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 406)
def test_HEAD_invalid_format(self):
format = '%D1%BD%8A9' # invalid UTF-8; should be %E1%BD%8A9 (E -> D)
req = Request.blank(
'/sda1/p/a/c?format=' + format,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_OPTIONS(self):
server_handler = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false'})
req = Request.blank('/sda1/p/a/c/o', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = server_handler.OPTIONS(req)
self.assertEquals(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD REPLICATE'.split():
self.assertTrue(
verb in resp.headers['Allow'].split(', '))
self.assertEquals(len(resp.headers['Allow'].split(', ')), 7)
self.assertEquals(resp.headers['Server'],
(self.controller.server_type + '/' + swift_version))
def test_PUT(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
def test_PUT_simulated_create_race(self):
state = ['initial']
from swift.container.backend import ContainerBroker as OrigCoBr
class InterceptedCoBr(OrigCoBr):
def __init__(self, *args, **kwargs):
super(InterceptedCoBr, self).__init__(*args, **kwargs)
if state[0] == 'initial':
# Do nothing initially
pass
elif state[0] == 'race':
# Save the original db_file attribute value
self._saved_db_file = self.db_file
self.db_file += '.doesnotexist'
def initialize(self, *args, **kwargs):
if state[0] == 'initial':
# Do nothing initially
pass
elif state[0] == 'race':
# Restore the original db_file attribute to get the race
# behavior
self.db_file = self._saved_db_file
return super(InterceptedCoBr, self).initialize(*args, **kwargs)
with mock.patch("swift.container.server.ContainerBroker",
InterceptedCoBr):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
state[0] = "race"
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
def test_PUT_obj_not_found(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '1', 'X-Size': '0',
'X-Content-Type': 'text/plain', 'X-ETag': 'e'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_PUT_good_policy_specified(self):
policy = random.choice(list(POLICIES))
# Set metadata header
req = Request.blank('/sda1/p/a/c', method='PUT',
headers={'X-Timestamp': Timestamp(1).internal,
'X-Backend-Storage-Policy-Index':
policy.idx})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
# now make sure we read it back
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
def test_PUT_no_policy_specified(self):
# Set metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(POLICIES.default.idx))
# now make sure the default was used (pol 1)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(POLICIES.default.idx))
def test_PUT_bad_policy_specified(self):
# Set metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Backend-Storage-Policy-Index': 'nada'})
resp = req.get_response(self.controller)
# make sure we get bad response
self.assertEquals(resp.status_int, 400)
self.assertFalse('X-Backend-Storage-Policy-Index' in resp.headers)
def test_PUT_no_policy_change(self):
ts = (Timestamp(t).internal for t in itertools.count(time.time()))
policy = random.choice(list(POLICIES))
# Set metadata header
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': policy.idx})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# make sure we get the right index back
self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
# now try to update w/o changing the policy
for method in ('POST', 'PUT'):
req = Request.blank('/sda1/p/a/c', method=method, headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': policy.idx
})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int // 100, 2)
# make sure we get the right index back
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
def test_PUT_bad_policy_change(self):
ts = (Timestamp(t).internal for t in itertools.count(time.time()))
policy = random.choice(list(POLICIES))
# Set metadata header
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': policy.idx})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# make sure we get the right index back
self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
other_policies = [p for p in POLICIES if p != policy]
for other_policy in other_policies:
# now try to change it and make sure we get a conflict
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': other_policy.idx
})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 409)
self.assertEquals(
resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
# and make sure there is no change!
req = Request.blank('/sda1/p/a/c')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# make sure we get the right index back
self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
def test_POST_ignores_policy_change(self):
ts = (Timestamp(t).internal for t in itertools.count(time.time()))
policy = random.choice(list(POLICIES))
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': policy.idx})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# make sure we get the right index back
self.assertEquals(resp.headers.get('X-Backend-Storage-Policy-Index'),
str(policy.idx))
other_policies = [p for p in POLICIES if p != policy]
for other_policy in other_policies:
# now try to change it and make sure we get a conflict
req = Request.blank('/sda1/p/a/c', method='POST', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': other_policy.idx
})
resp = req.get_response(self.controller)
# valid request
self.assertEquals(resp.status_int // 100, 2)
# but it does nothing
req = Request.blank('/sda1/p/a/c')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# make sure we get the right index back
self.assertEquals(resp.headers.get
('X-Backend-Storage-Policy-Index'),
str(policy.idx))
def test_PUT_no_policy_for_existing_default(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
# create a container with the default storage policy
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
# check the policy index
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'],
str(POLICIES.default.idx))
# put again without specifying the storage policy
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202) # sanity check
# policy index is unchanged
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'],
str(POLICIES.default.idx))
def test_PUT_proxy_default_no_policy_for_existing_default(self):
# make it look like the proxy has a different default than we do, like
# during a config change restart across a multi node cluster.
proxy_default = random.choice([p for p in POLICIES if not
p.is_default])
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
# create a container with the default storage policy
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Default': int(proxy_default),
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
# check the policy index
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(int(resp.headers['X-Backend-Storage-Policy-Index']),
int(proxy_default))
# put again without proxy specifying the different default
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Default': int(POLICIES.default),
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202) # sanity check
# policy index is unchanged
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(int(resp.headers['X-Backend-Storage-Policy-Index']),
int(proxy_default))
def test_PUT_no_policy_for_existing_non_default(self):
ts = (Timestamp(t).internal for t in itertools.count(time.time()))
non_default_policy = [p for p in POLICIES if not p.is_default][0]
# create a container with the non-default storage policy
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': non_default_policy.idx,
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
# check the policy index
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'],
str(non_default_policy.idx))
# put again without specifying the storage policy
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202) # sanity check
# policy index is unchanged
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'],
str(non_default_policy.idx))
def test_PUT_GET_metadata(self):
# Set metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Container-Meta-Test': 'Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value')
# Set another metadata header, ensuring old one doesn't disappear
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Container-Meta-Test2': 'Value2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value')
self.assertEquals(resp.headers.get('x-container-meta-test2'), 'Value2')
# Update metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(3).internal,
'X-Container-Meta-Test': 'New Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(2).internal,
'X-Container-Meta-Test': 'Old Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(4).internal,
'X-Container-Meta-Test': ''})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assert_('x-container-meta-test' not in resp.headers)
def test_PUT_GET_sys_metadata(self):
prefix = get_sys_meta_prefix('container')
key = '%sTest' % prefix
key2 = '%sTest2' % prefix
# Set metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal,
key: 'Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()), 'Value')
# Set another metadata header, ensuring old one doesn't disappear
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(1).internal,
key2: 'Value2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()), 'Value')
self.assertEquals(resp.headers.get(key2.lower()), 'Value2')
# Update metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(3).internal,
key: 'New Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()),
'New Value')
# Send old update to metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(2).internal,
key: 'Old Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(4).internal,
key: ''})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assert_(key.lower() not in resp.headers)
def test_PUT_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_PUT_timestamp_not_float(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_PUT_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_POST_HEAD_metadata(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# Set metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Container-Meta-Test': 'Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value')
# Update metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(3).internal,
'X-Container-Meta-Test': 'New Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(2).internal,
'X-Container-Meta-Test': 'Old Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(4).internal,
'X-Container-Meta-Test': ''})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assert_('x-container-meta-test' not in resp.headers)
def test_POST_HEAD_sys_metadata(self):
prefix = get_sys_meta_prefix('container')
key = '%sTest' % prefix
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# Set metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(1).internal,
key: 'Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()), 'Value')
# Update metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(3).internal,
key: 'New Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()),
'New Value')
# Send old update to metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(2).internal,
key: 'Old Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': Timestamp(4).internal,
key: ''})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assert_(key.lower() not in resp.headers)
def test_POST_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_POST_timestamp_not_float(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_POST_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_POST_invalid_container_sync_to(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'},
headers={'x-container-sync-to': '192.168.0.1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_POST_after_DELETE_not_found(self):
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c/',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '3'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_obj_not_found(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_container_not_found(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_PUT_utf8(self):
snowman = u'\u2603'
container_name = snowman.encode('utf-8')
req = Request.blank(
'/sda1/p/a/%s' % container_name, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
def test_account_update_mismatched_host_device(self):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'},
headers={'X-Timestamp': '0000000001.00000',
'X-Account-Host': '127.0.0.1:0',
'X-Account-Partition': '123',
'X-Account-Device': 'sda1,sda2'})
broker = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
resp = self.controller.account_update(req, 'a', 'c', broker)
self.assertEquals(resp.status_int, 400)
def test_account_update_account_override_deleted(self):
bindsock = listen(('127.0.0.1', 0))
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'},
headers={'X-Timestamp': '0000000001.00000',
'X-Account-Host': '%s:%s' %
bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1',
'X-Account-Override-Deleted': 'yes'})
with save_globals():
new_connect = fake_http_connect(200, count=123)
swift.container.server.http_connect = new_connect
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
def test_PUT_account_update(self):
bindsock = listen(('127.0.0.1', 0))
def accept(return_code, expected_timestamp):
try:
with Timeout(3):
sock, addr = bindsock.accept()
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEquals(inc.readline(),
'PUT /sda1/123/a/c HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != '\r\n':
headers[line.split(':')[0].lower()] = \
line.split(':')[1].strip()
line = inc.readline()
self.assertEquals(headers['x-put-timestamp'],
expected_timestamp)
except BaseException as err:
return err
return None
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(1).internal,
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 201, Timestamp(1).internal)
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(3).internal,
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 404, Timestamp(3).internal)
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': Timestamp(5).internal,
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 503, Timestamp(5).internal)
got_exc = False
try:
with Timeout(3):
resp = req.get_response(self.controller)
except BaseException as err:
got_exc = True
finally:
err = event.wait()
if err:
raise Exception(err)
self.assert_(not got_exc)
def test_PUT_reset_container_sync(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
db.set_x_container_sync_points(123, 456)
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to same value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to new value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
def test_POST_reset_container_sync(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
db.set_x_container_sync_points(123, 456)
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to same value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to new value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
def test_DELETE(self):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'}, headers={'X-Timestamp': '3'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_PUT_recreate(self):
path = '/sda1/p/a/c'
req = Request.blank(path, method='PUT',
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(path, method='DELETE',
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(path, method='GET')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404) # sanity
# backend headers
expectations = {
'x-backend-put-timestamp': Timestamp(1).internal,
'x-backend-delete-timestamp': Timestamp(2).internal,
'x-backend-status-changed-at': Timestamp(2).internal,
}
for header, value in expectations.items():
self.assertEqual(resp.headers[header], value,
'response header %s was %s not %s' % (
header, resp.headers[header], value))
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
self.assertEqual(True, db.is_deleted())
info = db.get_info()
self.assertEquals(info['put_timestamp'], Timestamp('1').internal)
self.assertEquals(info['delete_timestamp'], Timestamp('2').internal)
self.assertEquals(info['status_changed_at'], Timestamp('2').internal)
# recreate
req = Request.blank(path, method='PUT',
headers={'X-Timestamp': '4'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
self.assertEqual(False, db.is_deleted())
info = db.get_info()
self.assertEquals(info['put_timestamp'], Timestamp('4').internal)
self.assertEquals(info['delete_timestamp'], Timestamp('2').internal)
self.assertEquals(info['status_changed_at'], Timestamp('4').internal)
for method in ('GET', 'HEAD'):
req = Request.blank(path)
resp = req.get_response(self.controller)
expectations = {
'x-put-timestamp': Timestamp(4).normal,
'x-backend-put-timestamp': Timestamp(4).internal,
'x-backend-delete-timestamp': Timestamp(2).internal,
'x-backend-status-changed-at': Timestamp(4).internal,
}
for header, expected in expectations.items():
self.assertEqual(resp.headers[header], expected,
'header %s was %s is not expected %s' % (
header, resp.headers[header], expected))
def test_DELETE_PUT_recreate_replication_race(self):
path = '/sda1/p/a/c'
# create a deleted db
req = Request.blank(path, method='PUT',
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
req = Request.blank(path, method='DELETE',
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(path, method='GET')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404) # sanity
self.assertEqual(True, db.is_deleted())
# now save a copy of this db (and remove it from the "current node")
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
db_path = db.db_file
other_path = os.path.join(self.testdir, 'othernode.db')
os.rename(db_path, other_path)
# that should make it missing on this node
req = Request.blank(path, method='GET')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404) # sanity
# setup the race in os.path.exists (first time no, then yes)
mock_called = []
_real_exists = os.path.exists
def mock_exists(db_path):
rv = _real_exists(db_path)
if not mock_called:
# be as careful as we might hope backend replication can be...
with lock_parent_directory(db_path, timeout=1):
os.rename(other_path, db_path)
mock_called.append((rv, db_path))
return rv
req = Request.blank(path, method='PUT',
headers={'X-Timestamp': '4'})
with mock.patch.object(container_server.os.path, 'exists',
mock_exists):
resp = req.get_response(self.controller)
# db was successfully created
self.assertEqual(resp.status_int // 100, 2)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
self.assertEqual(False, db.is_deleted())
# mock proves the race
self.assertEqual(mock_called[:2],
[(exists, db.db_file) for exists in (False, True)])
# info was updated
info = db.get_info()
self.assertEquals(info['put_timestamp'], Timestamp('4').internal)
self.assertEquals(info['delete_timestamp'], Timestamp('2').internal)
def test_DELETE_not_found(self):
# Even if the container wasn't previously heard of, the container
# server will accept the delete and replicate it to where it belongs
# later.
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE', 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_change_storage_policy_via_DELETE_then_PUT(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
policy = random.choice(list(POLICIES))
req = Request.blank(
'/sda1/p/a/c', method='PUT',
headers={'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': policy.idx})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
# try re-recreate with other policies
other_policies = [p for p in POLICIES if p != policy]
for other_policy in other_policies:
# first delete the existing container
req = Request.blank('/sda1/p/a/c', method='DELETE', headers={
'X-Timestamp': ts.next()})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204) # sanity check
# at this point, the DB should still exist but be in a deleted
# state, so changing the policy index is perfectly acceptable
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': other_policy.idx})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
req = Request.blank(
'/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'],
str(other_policy.idx))
def test_change_to_default_storage_policy_via_DELETE_then_PUT(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
non_default_policy = random.choice([p for p in POLICIES
if not p.is_default])
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next(),
'X-Backend-Storage-Policy-Index': non_default_policy.idx,
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
req = Request.blank(
'/sda1/p/a/c', method='DELETE',
headers={'X-Timestamp': ts.next()})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204) # sanity check
# at this point, the DB should still exist but be in a deleted state,
# so changing the policy index is perfectly acceptable
req = Request.blank(
'/sda1/p/a/c', method='PUT',
headers={'X-Timestamp': ts.next()})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity check
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.headers['X-Backend-Storage-Policy-Index'],
str(POLICIES.default.idx))
def test_DELETE_object(self):
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': Timestamp(2).internal})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o', method='PUT', headers={
'X-Timestamp': Timestamp(0).internal, 'X-Size': 1,
'X-Content-Type': 'text/plain', 'X-Etag': 'x'})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
ts = (Timestamp(t).internal for t in
itertools.count(3))
req = Request.blank('/sda1/p/a/c', method='DELETE', headers={
'X-Timestamp': ts.next()})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 409)
req = Request.blank('/sda1/p/a/c/o', method='DELETE', headers={
'X-Timestamp': ts.next()})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', method='DELETE', headers={
'X-Timestamp': ts.next()})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', method='GET', headers={
'X-Timestamp': ts.next()})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_object_update_with_offset(self):
ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
# create container
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts.next()})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# check status
req = Request.blank('/sda1/p/a/c', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(int(resp.headers['X-Backend-Storage-Policy-Index']),
int(POLICIES.default))
# create object
obj_timestamp = ts.next()
req = Request.blank(
'/sda1/p/a/c/o', method='PUT', headers={
'X-Timestamp': obj_timestamp, 'X-Size': 1,
'X-Content-Type': 'text/plain', 'X-Etag': 'x'})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# check listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 1)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 1)
listing_data = json.loads(resp.body)
self.assertEqual(1, len(listing_data))
for obj in listing_data:
self.assertEqual(obj['name'], 'o')
self.assertEqual(obj['bytes'], 1)
self.assertEqual(obj['hash'], 'x')
self.assertEqual(obj['content_type'], 'text/plain')
# send an update with an offset
offset_timestamp = Timestamp(obj_timestamp, offset=1).internal
req = Request.blank(
'/sda1/p/a/c/o', method='PUT', headers={
'X-Timestamp': offset_timestamp, 'X-Size': 2,
'X-Content-Type': 'text/html', 'X-Etag': 'y'})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# check updated listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 1)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 2)
listing_data = json.loads(resp.body)
self.assertEqual(1, len(listing_data))
for obj in listing_data:
self.assertEqual(obj['name'], 'o')
self.assertEqual(obj['bytes'], 2)
self.assertEqual(obj['hash'], 'y')
self.assertEqual(obj['content_type'], 'text/html')
# now overwrite with a newer time
delete_timestamp = ts.next()
req = Request.blank(
'/sda1/p/a/c/o', method='DELETE', headers={
'X-Timestamp': delete_timestamp})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# check empty listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 0)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 0)
listing_data = json.loads(resp.body)
self.assertEqual(0, len(listing_data))
# recreate with an offset
offset_timestamp = Timestamp(delete_timestamp, offset=1).internal
req = Request.blank(
'/sda1/p/a/c/o', method='PUT', headers={
'X-Timestamp': offset_timestamp, 'X-Size': 3,
'X-Content-Type': 'text/enriched', 'X-Etag': 'z'})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# check un-deleted listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 1)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 3)
listing_data = json.loads(resp.body)
self.assertEqual(1, len(listing_data))
for obj in listing_data:
self.assertEqual(obj['name'], 'o')
self.assertEqual(obj['bytes'], 3)
self.assertEqual(obj['hash'], 'z')
self.assertEqual(obj['content_type'], 'text/enriched')
# delete offset with newer offset
delete_timestamp = Timestamp(offset_timestamp, offset=1).internal
req = Request.blank(
'/sda1/p/a/c/o', method='DELETE', headers={
'X-Timestamp': delete_timestamp})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# check empty listing
req = Request.blank('/sda1/p/a/c', method='GET',
query_string='format=json')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(int(resp.headers['X-Container-Object-Count']), 0)
self.assertEqual(int(resp.headers['X-Container-Bytes-Used']), 0)
listing_data = json.loads(resp.body)
self.assertEqual(0, len(listing_data))
def test_DELETE_account_update(self):
bindsock = listen(('127.0.0.1', 0))
def accept(return_code, expected_timestamp):
try:
with Timeout(3):
sock, addr = bindsock.accept()
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEquals(inc.readline(),
'PUT /sda1/123/a/c HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != '\r\n':
headers[line.split(':')[0].lower()] = \
line.split(':')[1].strip()
line = inc.readline()
self.assertEquals(headers['x-delete-timestamp'],
expected_timestamp)
except BaseException as err:
return err
return None
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': Timestamp(2).internal,
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 204, Timestamp(2).internal)
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': Timestamp(2).internal})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': Timestamp(3).internal,
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 404, Timestamp(3).internal)
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': Timestamp(4).internal})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': Timestamp(5).internal,
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 503, Timestamp(5).internal)
got_exc = False
try:
with Timeout(3):
resp = req.get_response(self.controller)
except BaseException as err:
got_exc = True
finally:
err = event.wait()
if err:
raise Exception(err)
self.assert_(not got_exc)
def test_DELETE_invalid_partition(self):
req = Request.blank(
'/sda1/./a/c', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_DELETE_timestamp_not_float(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_DELETE_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_GET_over_limit(self):
req = Request.blank(
'/sda1/p/a/c?limit=%d' %
(constraints.CONTAINER_LISTING_LIMIT + 1),
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 412)
def test_GET_json(self):
# make a container
req = Request.blank(
'/sda1/p/a/jsonc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# test an empty container
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 200)
self.assertEquals(simplejson.loads(resp.body), [])
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/jsonc/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test format
json_body = [{"name": "0",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": "1",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": "2",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}]
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(simplejson.loads(resp.body), json_body)
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
for accept in ('application/json', 'application/json;q=1.0,*/*;q=0.9',
'*/*;q=0.9,application/json;q=1.0', 'application/*'):
req = Request.blank(
'/sda1/p/a/jsonc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
simplejson.loads(resp.body), json_body,
'Invalid body for Accept: %s' % accept)
self.assertEquals(
resp.content_type, 'application/json',
'Invalid content_type for Accept: %s' % accept)
req = Request.blank(
'/sda1/p/a/jsonc',
environ={'REQUEST_METHOD': 'HEAD'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.content_type, 'application/json',
'Invalid content_type for Accept: %s' % accept)
def test_GET_plain(self):
# make a container
req = Request.blank(
'/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# test an empty container
req = Request.blank(
'/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/plainc/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
plain_body = '0\n1\n2\n'
req = Request.blank('/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.body, plain_body)
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
for accept in ('', 'text/plain', 'application/xml;q=0.8,*/*;q=0.9',
'*/*;q=0.9,application/xml;q=0.8', '*/*',
'text/plain,application/xml'):
req = Request.blank(
'/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.body, plain_body,
'Invalid body for Accept: %s' % accept)
self.assertEquals(
resp.content_type, 'text/plain',
'Invalid content_type for Accept: %s' % accept)
req = Request.blank(
'/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.content_type, 'text/plain',
'Invalid content_type for Accept: %s' % accept)
# test conflicting formats
req = Request.blank(
'/sda1/p/a/plainc?format=plain',
environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/json'
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.body, plain_body)
# test unknown format uses default plain
req = Request.blank(
'/sda1/p/a/plainc?format=somethingelse',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.body, plain_body)
def test_GET_json_last_modified(self):
# make a container
req = Request.blank(
'/sda1/p/a/jsonc', environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i, d in [(0, 1.5), (1, 1.0), ]:
req = Request.blank(
'/sda1/p/a/jsonc/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': d,
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test format
# last_modified format must be uniform, even when there are not msecs
json_body = [{"name": "0",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.500000"},
{"name": "1",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}, ]
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(simplejson.loads(resp.body), json_body)
self.assertEquals(resp.charset, 'utf-8')
def test_GET_xml(self):
# make a container
req = Request.blank(
'/sda1/p/a/xmlc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/xmlc/%s' % i,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
xml_body = '<?xml version="1.0" encoding="UTF-8"?>\n' \
'<container name="xmlc">' \
'<object><name>0</name><hash>x</hash><bytes>0</bytes>' \
'<content_type>text/plain</content_type>' \
'<last_modified>1970-01-01T00:00:01.000000' \
'</last_modified></object>' \
'<object><name>1</name><hash>x</hash><bytes>0</bytes>' \
'<content_type>text/plain</content_type>' \
'<last_modified>1970-01-01T00:00:01.000000' \
'</last_modified></object>' \
'<object><name>2</name><hash>x</hash><bytes>0</bytes>' \
'<content_type>text/plain</content_type>' \
'<last_modified>1970-01-01T00:00:01.000000' \
'</last_modified></object>' \
'</container>'
# tests
req = Request.blank(
'/sda1/p/a/xmlc?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
self.assertEquals(resp.body, xml_body)
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/xmlc?format=xml',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
for xml_accept in (
'application/xml', 'application/xml;q=1.0,*/*;q=0.9',
'*/*;q=0.9,application/xml;q=1.0', 'application/xml,text/xml'):
req = Request.blank(
'/sda1/p/a/xmlc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = xml_accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.body, xml_body,
'Invalid body for Accept: %s' % xml_accept)
self.assertEquals(
resp.content_type, 'application/xml',
'Invalid content_type for Accept: %s' % xml_accept)
req = Request.blank(
'/sda1/p/a/xmlc',
environ={'REQUEST_METHOD': 'HEAD'})
req.accept = xml_accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.content_type, 'application/xml',
'Invalid content_type for Accept: %s' % xml_accept)
req = Request.blank(
'/sda1/p/a/xmlc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = 'text/xml'
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/xml')
self.assertEquals(resp.body, xml_body)
def test_GET_marker(self):
# make a container
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/c/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test limit with marker
req = Request.blank('/sda1/p/a/c?limit=2&marker=1',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = resp.body.split()
self.assertEquals(result, ['2', ])
def test_weird_content_types(self):
snowman = u'\u2603'
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i, ctype in enumerate((snowman.encode('utf-8'),
'text/plain; charset="utf-8"')):
req = Request.blank(
'/sda1/p/a/c/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': ctype,
'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = [x['content_type'] for x in simplejson.loads(resp.body)]
self.assertEquals(result, [u'\u2603', 'text/plain;charset="utf-8"'])
def test_GET_accept_not_valid(self):
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': Timestamp(0).internal})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c', method='GET')
req.accept = 'application/xml*'
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 406)
def test_GET_limit(self):
# make a container
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test limit
req = Request.blank(
'/sda1/p/a/c?limit=2', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = resp.body.split()
self.assertEquals(result, ['0', '1'])
def test_GET_prefix(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('a1', 'b1', 'a2', 'b2', 'a3', 'b3'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.body.split(), ['a1', 'a2', 'a3'])
def test_GET_delimiter_too_long(self):
req = Request.blank('/sda1/p/a/c?delimiter=xx',
environ={'REQUEST_METHOD': 'GET',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 412)
def test_GET_delimiter(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US-TX-A', 'US-TX-B', 'US-OK-A', 'US-OK-B', 'US-UT-A'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=US-&delimiter=-&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(
simplejson.loads(resp.body),
[{"subdir": "US-OK-"},
{"subdir": "US-TX-"},
{"subdir": "US-UT-"}])
def test_GET_delimiter_xml(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US-TX-A', 'US-TX-B', 'US-OK-A', 'US-OK-B', 'US-UT-A'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=US-&delimiter=-&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(
resp.body, '<?xml version="1.0" encoding="UTF-8"?>'
'\n<container name="c"><subdir name="US-OK-">'
'<name>US-OK-</name></subdir>'
'<subdir name="US-TX-"><name>US-TX-</name></subdir>'
'<subdir name="US-UT-"><name>US-UT-</name></subdir></container>')
def test_GET_delimiter_xml_with_quotes(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c/<\'sub\' "dir">/object',
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?delimiter=/&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
dom = minidom.parseString(resp.body)
self.assert_(len(dom.getElementsByTagName('container')) == 1)
container = dom.getElementsByTagName('container')[0]
self.assert_(len(container.getElementsByTagName('subdir')) == 1)
subdir = container.getElementsByTagName('subdir')[0]
self.assertEquals(unicode(subdir.attributes['name'].value),
u'<\'sub\' "dir">/')
self.assert_(len(subdir.getElementsByTagName('name')) == 1)
name = subdir.getElementsByTagName('name')[0]
self.assertEquals(unicode(name.childNodes[0].data),
u'<\'sub\' "dir">/')
def test_GET_path(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US/TX', 'US/TX/B', 'US/OK', 'US/OK/B', 'US/UT/A'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?path=US&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(
simplejson.loads(resp.body),
[{"name": "US/OK", "hash": "x", "bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": "US/TX", "hash": "x", "bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}])
def test_GET_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_through_call(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '404 ')
def test_through_call_invalid_path(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/bob',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '400 ')
def test_through_call_invalid_path_utf8(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '\x00',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '412 ')
def test_invalid_method_doesnt_exist(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'method_doesnt_exist',
'PATH_INFO': '/sda1/p/a/c'},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '405 ')
def test_invalid_method_is_not_public(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': '__init__',
'PATH_INFO': '/sda1/p/a/c'},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '405 ')
def test_params_format(self):
req = Request.blank(
'/sda1/p/a/c', method='PUT',
headers={'X-Timestamp': Timestamp(1).internal})
req.get_response(self.controller)
for format in ('xml', 'json'):
req = Request.blank('/sda1/p/a/c?format=%s' % format,
method='GET')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 200)
def test_params_utf8(self):
# Bad UTF8 sequence, all parameters should cause 400 error
for param in ('delimiter', 'limit', 'marker', 'path', 'prefix',
'end_marker', 'format'):
req = Request.blank('/sda1/p/a/c?%s=\xce' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400,
"%d on param %s" % (resp.status_int, param))
# Good UTF8 sequence for delimiter, too long (1 byte delimiters only)
req = Request.blank('/sda1/p/a/c?delimiter=\xce\xa9',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 412,
"%d on param delimiter" % (resp.status_int))
req = Request.blank('/sda1/p/a/c', method='PUT',
headers={'X-Timestamp': Timestamp(1).internal})
req.get_response(self.controller)
# Good UTF8 sequence, ignored for limit, doesn't affect other queries
for param in ('limit', 'marker', 'path', 'prefix', 'end_marker',
'format'):
req = Request.blank('/sda1/p/a/c?%s=\xce\xa9' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204,
"%d on param %s" % (resp.status_int, param))
def test_put_auto_create(self):
headers = {'x-timestamp': Timestamp(1).internal,
'x-size': '0',
'x-content-type': 'text/plain',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e'}
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/.a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/.c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/a/c/.o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_delete_auto_create(self):
headers = {'x-timestamp': Timestamp(1).internal}
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/.a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/.c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/a/.c/.o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_content_type_on_HEAD(self):
Request.blank('/sda1/p/a/o',
headers={'X-Timestamp': Timestamp(1).internal},
environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.controller)
env = {'REQUEST_METHOD': 'HEAD'}
req = Request.blank('/sda1/p/a/o?format=xml', environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a/o?format=json', environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a/o', environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/o', headers={'Accept': 'application/json'}, environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/o', headers={'Accept': 'application/xml'}, environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
self.assertEquals(resp.charset, 'utf-8')
def test_updating_multiple_container_servers(self):
http_connect_args = []
def fake_http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
class SuccessfulFakeConn(object):
@property
def status(self):
return 200
def getresponse(self):
return self
def read(self):
return ''
captured_args = {'ipaddr': ipaddr, 'port': port,
'device': device, 'partition': partition,
'method': method, 'path': path, 'ssl': ssl,
'headers': headers, 'query_string': query_string}
http_connect_args.append(
dict((k, v) for k, v in captured_args.iteritems()
if v is not None))
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '12345',
'X-Account-Partition': '30',
'X-Account-Host': '1.2.3.4:5, 6.7.8.9:10',
'X-Account-Device': 'sdb1, sdf1'})
orig_http_connect = container_server.http_connect
try:
container_server.http_connect = fake_http_connect
req.get_response(self.controller)
finally:
container_server.http_connect = orig_http_connect
http_connect_args.sort(key=operator.itemgetter('ipaddr'))
self.assertEquals(len(http_connect_args), 2)
self.assertEquals(
http_connect_args[0],
{'ipaddr': '1.2.3.4',
'port': '5',
'path': '/a/c',
'device': 'sdb1',
'partition': '30',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-bytes-used': 0,
'x-delete-timestamp': '0',
'x-object-count': 0,
'x-put-timestamp': Timestamp(12345).internal,
'X-Backend-Storage-Policy-Index': '%s' % POLICIES.default.idx,
'referer': 'PUT http://localhost/sda1/p/a/c',
'user-agent': 'container-server %d' % os.getpid(),
'x-trans-id': '-'})})
self.assertEquals(
http_connect_args[1],
{'ipaddr': '6.7.8.9',
'port': '10',
'path': '/a/c',
'device': 'sdf1',
'partition': '30',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-bytes-used': 0,
'x-delete-timestamp': '0',
'x-object-count': 0,
'x-put-timestamp': Timestamp(12345).internal,
'X-Backend-Storage-Policy-Index': '%s' % POLICIES.default.idx,
'referer': 'PUT http://localhost/sda1/p/a/c',
'user-agent': 'container-server %d' % os.getpid(),
'x-trans-id': '-'})})
def test_serv_reserv(self):
# Test replication_server flag was set from configuration file.
container_controller = container_server.ContainerController
conf = {'devices': self.testdir, 'mount_check': 'false'}
self.assertEquals(container_controller(conf).replication_server, None)
for val in [True, '1', 'True', 'true']:
conf['replication_server'] = val
self.assertTrue(container_controller(conf).replication_server)
for val in [False, 0, '0', 'False', 'false', 'test_string']:
conf['replication_server'] = val
self.assertFalse(container_controller(conf).replication_server)
def test_list_allowed_methods(self):
# Test list of allowed_methods
obj_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST']
repl_methods = ['REPLICATE']
for method_name in obj_methods:
method = getattr(self.controller, method_name)
self.assertFalse(hasattr(method, 'replication'))
for method_name in repl_methods:
method = getattr(self.controller, method_name)
self.assertEquals(method.replication, True)
def test_correct_allowed_method(self):
# Test correct work for allowed method using
# swift.container.server.ContainerController.__call__
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
method_res = mock.MagicMock()
mock_method = public(lambda x: mock.MagicMock(return_value=method_res))
with mock.patch.object(self.controller, method, new=mock_method):
response = self.controller(env, start_response)
self.assertEqual(response, method_res)
def test_not_allowed_method(self):
# Test correct work for NOT allowed method using
# swift.container.server.ContainerController.__call__
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
answer = ['<html><h1>Method Not Allowed</h1><p>The method is not '
'allowed for this resource.</p></html>']
mock_method = replication(public(lambda x: mock.MagicMock()))
with mock.patch.object(self.controller, method, new=mock_method):
response = self.controller.__call__(env, start_response)
self.assertEqual(response, answer)
def test_call_incorrect_replication_method(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'true'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
obj_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST', 'OPTIONS']
for method in obj_methods:
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
self.controller(env, start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '405 ')
def test_GET_log_requests_true(self):
self.controller.logger = FakeLogger()
self.controller.log_requests = True
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue(self.controller.logger.log_dict['info'])
def test_GET_log_requests_false(self):
self.controller.logger = FakeLogger()
self.controller.log_requests = False
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertFalse(self.controller.logger.log_dict['info'])
def test_log_line_format(self):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
self.controller.logger = FakeLogger()
with mock.patch(
'time.gmtime', mock.MagicMock(side_effect=[gmtime(10001.0)])):
with mock.patch(
'time.time',
mock.MagicMock(side_effect=[10000.0, 10001.0, 10002.0])):
with mock.patch(
'os.getpid', mock.MagicMock(return_value=1234)):
req.get_response(self.controller)
self.assertEqual(
self.controller.logger.log_dict['info'],
[(('1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD /sda1/p/a/c" '
'404 - "-" "-" "-" 2.0000 "-" 1234 0',), {})])
@patch_policies([
StoragePolicy(0, 'legacy'),
StoragePolicy(1, 'one'),
StoragePolicy(2, 'two', True),
StoragePolicy(3, 'three'),
StoragePolicy(4, 'four'),
])
class TestNonLegacyDefaultStoragePolicy(TestContainerController):
"""
Test swift.container.server.ContainerController with a non-legacy default
Storage Policy.
"""
def _update_object_put_headers(self, req):
"""
Add policy index headers for containers created with default policy
- which in this TestCase is 1.
"""
req.headers['X-Backend-Storage-Policy-Index'] = \
str(POLICIES.default.idx)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
AOSPU/external_chromium_org | third_party/closure_linter/closure_linter/javascripttokens.py | 266 | 4955 | #!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to represent JavaScript tokens."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
from closure_linter.common import tokens
class JavaScriptTokenType(tokens.TokenType):
"""Enumeration of JavaScript token types, and useful sets of token types."""
NUMBER = 'number'
START_SINGLE_LINE_COMMENT = '//'
START_BLOCK_COMMENT = '/*'
START_DOC_COMMENT = '/**'
END_BLOCK_COMMENT = '*/'
END_DOC_COMMENT = 'doc */'
COMMENT = 'comment'
SINGLE_QUOTE_STRING_START = "'string"
SINGLE_QUOTE_STRING_END = "string'"
DOUBLE_QUOTE_STRING_START = '"string'
DOUBLE_QUOTE_STRING_END = 'string"'
STRING_TEXT = 'string'
START_BLOCK = '{'
END_BLOCK = '}'
START_PAREN = '('
END_PAREN = ')'
START_BRACKET = '['
END_BRACKET = ']'
REGEX = '/regex/'
FUNCTION_DECLARATION = 'function(...)'
FUNCTION_NAME = 'function functionName(...)'
START_PARAMETERS = 'startparams('
PARAMETERS = 'pa,ra,ms'
END_PARAMETERS = ')endparams'
SEMICOLON = ';'
DOC_FLAG = '@flag'
DOC_INLINE_FLAG = '{@flag ...}'
DOC_START_BRACE = 'doc {'
DOC_END_BRACE = 'doc }'
DOC_PREFIX = 'comment prefix: * '
SIMPLE_LVALUE = 'lvalue='
KEYWORD = 'keyword'
OPERATOR = 'operator'
IDENTIFIER = 'identifier'
STRING_TYPES = frozenset([
SINGLE_QUOTE_STRING_START, SINGLE_QUOTE_STRING_END,
DOUBLE_QUOTE_STRING_START, DOUBLE_QUOTE_STRING_END, STRING_TEXT])
COMMENT_TYPES = frozenset([START_SINGLE_LINE_COMMENT, COMMENT,
START_BLOCK_COMMENT, START_DOC_COMMENT,
END_BLOCK_COMMENT, END_DOC_COMMENT,
DOC_START_BRACE, DOC_END_BRACE,
DOC_FLAG, DOC_INLINE_FLAG, DOC_PREFIX])
FLAG_DESCRIPTION_TYPES = frozenset([
DOC_INLINE_FLAG, COMMENT, DOC_START_BRACE, DOC_END_BRACE])
FLAG_ENDING_TYPES = frozenset([DOC_FLAG, END_DOC_COMMENT])
NON_CODE_TYPES = COMMENT_TYPES | frozenset([
tokens.TokenType.WHITESPACE, tokens.TokenType.BLANK_LINE])
UNARY_OPERATORS = ['!', 'new', 'delete', 'typeof', 'void']
UNARY_OK_OPERATORS = ['--', '++', '-', '+'] + UNARY_OPERATORS
UNARY_POST_OPERATORS = ['--', '++']
# An expression ender is any token that can end an object - i.e. we could have
# x.y or [1, 2], or (10 + 9) or {a: 10}.
EXPRESSION_ENDER_TYPES = [tokens.TokenType.NORMAL, IDENTIFIER, NUMBER,
SIMPLE_LVALUE, END_BRACKET, END_PAREN, END_BLOCK,
SINGLE_QUOTE_STRING_END, DOUBLE_QUOTE_STRING_END]
class JavaScriptToken(tokens.Token):
"""JavaScript token subclass of Token, provides extra instance checks.
The following token types have data in attached_object:
- All JsDoc flags: a parser.JsDocFlag object.
"""
def IsKeyword(self, keyword):
"""Tests if this token is the given keyword.
Args:
keyword: The keyword to compare to.
Returns:
True if this token is a keyword token with the given name.
"""
return self.type == JavaScriptTokenType.KEYWORD and self.string == keyword
def IsOperator(self, operator):
"""Tests if this token is the given operator.
Args:
operator: The operator to compare to.
Returns:
True if this token is a operator token with the given name.
"""
return self.type == JavaScriptTokenType.OPERATOR and self.string == operator
def IsAssignment(self):
"""Tests if this token is an assignment operator.
Returns:
True if this token is an assignment operator.
"""
return (self.type == JavaScriptTokenType.OPERATOR and
self.string.endswith('=') and
self.string not in ('==', '!=', '>=', '<=', '===', '!=='))
def IsComment(self):
"""Tests if this token is any part of a comment.
Returns:
True if this token is any part of a comment.
"""
return self.type in JavaScriptTokenType.COMMENT_TYPES
def IsCode(self):
"""Tests if this token is code, as opposed to a comment or whitespace."""
return self.type not in JavaScriptTokenType.NON_CODE_TYPES
def __repr__(self):
return '<JavaScriptToken: %d, %s, "%s", %r, %r>' % (self.line_number,
self.type, self.string,
self.values,
self.metadata)
| bsd-3-clause |
michaelkirk/QGIS | python/plugins/processing/algs/grass/GrassUtils.py | 3 | 15753 | # -*- coding: utf-8 -*-
"""
***************************************************************************
GrassUtils.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import stat
import shutil
import codecs
import subprocess
import os
from qgis.core import QgsApplication
from PyQt4.QtCore import QCoreApplication
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.ProcessingLog import ProcessingLog
from processing.tools.system import userFolder, isMac, isWindows, mkdir, tempFolder
from processing.tests.TestData import points
class GrassUtils:
GRASS_REGION_XMIN = 'GRASS_REGION_XMIN'
GRASS_REGION_YMIN = 'GRASS_REGION_YMIN'
GRASS_REGION_XMAX = 'GRASS_REGION_XMAX'
GRASS_REGION_YMAX = 'GRASS_REGION_YMAX'
GRASS_REGION_CELLSIZE = 'GRASS_REGION_CELLSIZE'
GRASS_FOLDER = 'GRASS_FOLDER'
GRASS_WIN_SHELL = 'GRASS_WIN_SHELL'
GRASS_LOG_COMMANDS = 'GRASS_LOG_COMMANDS'
GRASS_LOG_CONSOLE = 'GRASS_LOG_CONSOLE'
sessionRunning = False
sessionLayers = {}
projectionSet = False
isGrassInstalled = False
@staticmethod
def grassBatchJobFilename():
'''This is used in Linux. This is the batch job that we assign to
GRASS_BATCH_JOB and then call GRASS and let it do the work
'''
filename = 'grass_batch_job.sh'
batchfile = userFolder() + os.sep + filename
return batchfile
@staticmethod
def grassScriptFilename():
'''This is used in windows. We create a script that initializes
GRASS and then uses grass commands
'''
filename = 'grass_script.bat'
filename = userFolder() + os.sep + filename
return filename
@staticmethod
def getGrassVersion():
# I do not know if this should be removed or let the user enter it
# or something like that... This is just a temporary thing
return '6.4.0'
@staticmethod
def grassPath():
if not isWindows() and not isMac():
return ''
folder = ProcessingConfig.getSetting(GrassUtils.GRASS_FOLDER)
if folder is None:
if isWindows():
testfolder = os.path.dirname(QgsApplication.prefixPath())
testfolder = os.path.join(testfolder, 'grass')
if os.path.isdir(testfolder):
for subfolder in os.listdir(testfolder):
if subfolder.startswith('grass'):
folder = os.path.join(testfolder, subfolder)
break
else:
folder = os.path.join(QgsApplication.prefixPath(), 'grass')
if not os.path.isdir(folder):
folder = '/Applications/GRASS-6.4.app/Contents/MacOS'
return folder or ''
@staticmethod
def grassWinShell():
folder = ProcessingConfig.getSetting(GrassUtils.GRASS_WIN_SHELL)
if folder is None:
folder = os.path.dirname(str(QgsApplication.prefixPath()))
folder = os.path.join(folder, 'msys')
return folder
@staticmethod
def grassDescriptionPath():
return os.path.join(os.path.dirname(__file__), 'description')
@staticmethod
def createGrassScript(commands):
folder = GrassUtils.grassPath()
shell = GrassUtils.grassWinShell()
script = GrassUtils.grassScriptFilename()
gisrc = userFolder() + os.sep + 'processing.gisrc'
# Temporary gisrc file
output = codecs.open(gisrc, 'w', encoding='utf-8')
location = 'temp_location'
gisdbase = GrassUtils.grassDataFolder()
output.write('GISDBASE: ' + gisdbase + '\n')
output.write('LOCATION_NAME: ' + location + '\n')
output.write('MAPSET: PERMANENT \n')
output.write('GRASS_GUI: text\n')
output.close()
output = codecs.open(script, 'w', encoding='utf-8')
output.write('set HOME=' + os.path.expanduser('~') + '\n')
output.write('set GISRC=' + gisrc + '\n')
output.write('set GRASS_SH=' + shell + '\\bin\\sh.exe\n')
output.write('set PATH=' + shell + os.sep + 'bin;' + shell + os.sep
+ 'lib;' + '%PATH%\n')
output.write('set WINGISBASE=' + folder + '\n')
output.write('set GISBASE=' + folder + '\n')
output.write('set GRASS_PROJSHARE=' + folder + os.sep + 'share'
+ os.sep + 'proj' + '\n')
output.write('set GRASS_MESSAGE_FORMAT=gui\n')
# Replacement code for etc/Init.bat
output.write('if "%GRASS_ADDON_PATH%"=="" set PATH=%WINGISBASE%\\bin;%WINGISBASE%\\lib;%PATH%\n')
output.write('if not "%GRASS_ADDON_PATH%"=="" set PATH=%WINGISBASE%\\bin;%WINGISBASE%\\lib;%GRASS_ADDON_PATH%;%PATH%\n')
output.write('\n')
output.write('set GRASS_VERSION=' + GrassUtils.getGrassVersion()
+ '\n')
output.write('if not "%LANG%"=="" goto langset\n')
output.write('FOR /F "usebackq delims==" %%i IN (`"%WINGISBASE%\\etc\\winlocale"`) DO @set LANG=%%i\n')
output.write(':langset\n')
output.write('\n')
output.write('set PATHEXT=%PATHEXT%;.PY\n')
output.write('set PYTHONPATH=%PYTHONPATH%;%WINGISBASE%\\etc\\python;%WINGISBASE%\\etc\\wxpython\\n')
output.write('\n')
output.write('g.gisenv.exe set="MAPSET=PERMANENT"\n')
output.write('g.gisenv.exe set="LOCATION=' + location + '"\n')
output.write('g.gisenv.exe set="LOCATION_NAME=' + location + '"\n')
output.write('g.gisenv.exe set="GISDBASE=' + gisdbase + '"\n')
output.write('g.gisenv.exe set="GRASS_GUI=text"\n')
for command in commands:
output.write(command.encode('utf8') + '\n')
output.write('\n')
output.write('exit\n')
output.close()
@staticmethod
def createGrassBatchJobFileFromGrassCommands(commands):
fout = codecs.open(GrassUtils.grassBatchJobFilename(), 'w', encoding='utf-8')
for command in commands:
fout.write(command.encode('utf8') + '\n')
fout.write('exit')
fout.close()
@staticmethod
def grassMapsetFolder():
folder = os.path.join(GrassUtils.grassDataFolder(), 'temp_location')
mkdir(folder)
return folder
@staticmethod
def grassDataFolder():
tempfolder = os.path.join(tempFolder(), 'grassdata')
mkdir(tempfolder)
return tempfolder
@staticmethod
def createTempMapset():
'''Creates a temporary location and mapset(s) for GRASS data
processing. A minimal set of folders and files is created in the
system's default temporary directory. The settings files are
written with sane defaults, so GRASS can do its work. The mapset
projection will be set later, based on the projection of the first
input image or vector
'''
folder = GrassUtils.grassMapsetFolder()
mkdir(os.path.join(folder, 'PERMANENT'))
mkdir(os.path.join(folder, 'PERMANENT', '.tmp'))
GrassUtils.writeGrassWindow(os.path.join(folder, 'PERMANENT',
'DEFAULT_WIND'))
outfile = codecs.open(os.path.join(folder, 'PERMANENT', 'MYNAME'), 'w', encoding='utf-8')
outfile.write(
'QGIS GRASS interface: temporary data processing location.\n')
outfile.close()
GrassUtils.writeGrassWindow(os.path.join(folder, 'PERMANENT', 'WIND'))
mkdir(os.path.join(folder, 'PERMANENT', 'dbf'))
outfile = codecs.open(os.path.join(folder, 'PERMANENT', 'VAR'), 'w', encoding='utf-8')
outfile.write('DB_DRIVER: dbf\n')
outfile.write('DB_DATABASE: $GISDBASE/$LOCATION_NAME/$MAPSET/dbf/\n')
outfile.close()
@staticmethod
def writeGrassWindow(filename):
out = codecs.open(filename, 'w', encoding='utf-8')
out.write('proj: 0\n')
out.write('zone: 0\n')
out.write('north: 1\n')
out.write('south: 0\n')
out.write('east: 1\n')
out.write('west: 0\n')
out.write('cols: 1\n')
out.write('rows: 1\n')
out.write('e-w resol: 1\n')
out.write('n-s resol: 1\n')
out.write('top: 1\n')
out.write('bottom: 0\n')
out.write('cols3: 1\n')
out.write('rows3: 1\n')
out.write('depths: 1\n')
out.write('e-w resol3: 1\n')
out.write('n-s resol3: 1\n')
out.write('t-b resol: 1\n')
out.close()
@staticmethod
def prepareGrassExecution(commands):
if isWindows():
GrassUtils.createGrassScript(commands)
command = ['cmd.exe', '/C ', GrassUtils.grassScriptFilename()]
else:
gisrc = userFolder() + os.sep + 'processing.gisrc'
os.putenv('GISRC', gisrc)
os.putenv('GRASS_MESSAGE_FORMAT', 'gui')
os.putenv('GRASS_BATCH_JOB', GrassUtils.grassBatchJobFilename())
GrassUtils.createGrassBatchJobFileFromGrassCommands(commands)
os.chmod(GrassUtils.grassBatchJobFilename(), stat.S_IEXEC
| stat.S_IREAD | stat.S_IWRITE)
if isMac():
command = GrassUtils.grassPath() + os.sep + 'grass.sh ' \
+ GrassUtils.grassMapsetFolder() + '/PERMANENT'
else:
command = 'grass64 ' + GrassUtils.grassMapsetFolder() \
+ '/PERMANENT'
return command
@staticmethod
def executeGrass(commands, progress, outputCommands=None):
loglines = []
loglines.append('GRASS execution console output')
grassOutDone = False
command = GrassUtils.prepareGrassExecution(commands)
proc = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stdin=open(os.devnull),
stderr=subprocess.STDOUT,
universal_newlines=True,
).stdout
progress.setInfo('GRASS commands output:')
for line in iter(proc.readline, ''):
if 'GRASS_INFO_PERCENT' in line:
try:
progress.setPercentage(int(line[len('GRASS_INFO_PERCENT')
+ 2:]))
except:
pass
else:
if 'r.out' in line or 'v.out' in line:
grassOutDone = True
loglines.append(line)
progress.setConsoleInfo(line)
# Some GRASS scripts, like r.mapcalculator or r.fillnulls, call
# other GRASS scripts during execution. This may override any
# commands that are still to be executed by the subprocess, which
# are usually the output ones. If that is the case runs the output
# commands again.
if not grassOutDone and outputCommands:
command = GrassUtils.prepareGrassExecution(outputCommands)
proc = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stdin=open(os.devnull),
stderr=subprocess.STDOUT,
universal_newlines=True,
).stdout
for line in iter(proc.readline, ''):
if 'GRASS_INFO_PERCENT' in line:
try:
progress.setPercentage(int(
line[len('GRASS_INFO_PERCENT') + 2:]))
except:
pass
else:
loglines.append(line)
progress.setConsoleInfo(line)
if ProcessingConfig.getSetting(GrassUtils.GRASS_LOG_CONSOLE):
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines)
# GRASS session is used to hold the layers already exported or
# produced in GRASS between multiple calls to GRASS algorithms.
# This way they don't have to be loaded multiple times and
# following algorithms can use the results of the previous ones.
# Starting a session just involves creating the temp mapset
# structure
@staticmethod
def startGrassSession():
if not GrassUtils.sessionRunning:
GrassUtils.createTempMapset()
GrassUtils.sessionRunning = True
# End session by removing the temporary GRASS mapset and all
# the layers.
@staticmethod
def endGrassSession():
shutil.rmtree(GrassUtils.grassMapsetFolder(), True)
GrassUtils.sessionRunning = False
GrassUtils.sessionLayers = {}
GrassUtils.projectionSet = False
@staticmethod
def getSessionLayers():
return GrassUtils.sessionLayers
@staticmethod
def addSessionLayers(exportedLayers):
GrassUtils.sessionLayers = dict(GrassUtils.sessionLayers.items()
+ exportedLayers.items())
@staticmethod
def checkGrassIsInstalled(ignorePreviousState=False):
if isWindows():
path = GrassUtils.grassPath()
if path == '':
return GrassUtils.tr(
'GRASS folder is not configured.\nPlease configure '
'it before running GRASS algorithms.')
cmdpath = os.path.join(path, 'bin', 'r.out.gdal.exe')
if not os.path.exists(cmdpath):
return GrassUtils.tr(
'The specified GRASS folder does not contain a valid '
'set of GRASS modules. Please, go to the Processing '
'settings dialog, and check that the GRASS folder is '
'correctly configured')
if not ignorePreviousState:
if GrassUtils.isGrassInstalled:
return
try:
from processing import runalg
result = runalg(
'grass:v.voronoi',
points(),
False,
False,
'270778.60198,270855.745301,4458921.97814,4458983.8488',
-1,
0.0001,
0,
None,
)
if not os.path.exists(result['output']):
return GrassUtils.tr(
'It seems that GRASS is not correctly installed and '
'configured in your system.\nPlease install it before '
'running GRASS algorithms.')
except:
return GrassUtils.tr(
'Error while checking GRASS installation. GRASS might not '
'be correctly configured.\n')
GrassUtils.isGrassInstalled = True
@staticmethod
def tr(string, context=''):
if context == '':
context = 'GrassUtils'
return QCoreApplication.translate(context, string)
| gpl-2.0 |
Tiotao/morpherpy | env/Lib/site-packages/pip/_vendor/distlib/locators.py | 203 | 48796 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2014 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError:
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, string_types, build_opener,
HTTPRedirectHandler as BaseRedirectHandler,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata
from .util import (cached_property, parse_credentials, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile('^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'http://python.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
return client.list_packages()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None:
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None:
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
return (t.scheme != 'https', 'pypi.python.org' in t.netloc,
posixpath.basename(t.path))
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implement favours http:// URLs over https://, archives
from PyPI over those from other locations and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
name1, name2 = name1.lower(), name2.lower()
if name1 == name2:
result = True
else:
# distribute replaces '-' by '_' in project names, so it
# can tell where the version starts in a filename.
result = name1.replace('_', '-') == name2.replace('_', '-')
return result
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='):
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/':
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if is_compatible(wheel, self.wheel_tags):
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e:
logger.warning('invalid path for wheel: %s', path)
elif path.endswith(self.downloadable_extensions):
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t:
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver:
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at keys of the form
'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None:
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if versions:
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception:
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd:
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
urls = d['urls']
if urls:
info = urls[0]
md.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[md.version] = dist
for info in urls:
url = info['url']
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = digest
except Exception as e:
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\s*=\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\s\n]*))\s+)?
href\s*=\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\s\n]*))
(\s+rel\s*=\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|'
r'win(32|-amd64)|macosx-?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError:
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e:
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e:
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path):
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
| mit |
oberlin/django | django/core/serializers/json.py | 320 | 3782 | """
Serialize data to/from JSON
"""
# Avoid shadowing the standard library json module
from __future__ import absolute_import, unicode_literals
import datetime
import decimal
import json
import sys
import uuid
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import (
Deserializer as PythonDeserializer, Serializer as PythonSerializer,
)
from django.utils import six
from django.utils.timezone import is_aware
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def _init_options(self):
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
self.options.update({'use_decimal': False})
self._current = None
self.json_kwargs = self.options.copy()
self.json_kwargs.pop('stream', None)
self.json_kwargs.pop('fields', None)
if self.options.get('indent'):
# Prevent trailing spaces
self.json_kwargs['separators'] = (',', ': ')
def start_serialization(self):
self._init_options()
self.stream.write("[")
def end_serialization(self):
if self.options.get("indent"):
self.stream.write("\n")
self.stream.write("]")
if self.options.get("indent"):
self.stream.write("\n")
def end_object(self, obj):
# self._current has the field data
indent = self.options.get("indent")
if not self.first:
self.stream.write(",")
if not indent:
self.stream.write(" ")
if indent:
self.stream.write("\n")
json.dump(self.get_dump_object(obj), self.stream,
cls=DjangoJSONEncoder, **self.json_kwargs)
self._current = None
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if not isinstance(stream_or_string, (bytes, six.string_types)):
stream_or_string = stream_or_string.read()
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
try:
objects = json.loads(stream_or_string)
for obj in PythonDeserializer(objects, **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
class DjangoJSONEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time, decimal types and UUIDs.
"""
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.datetime):
r = o.isoformat()
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, datetime.date):
return o.isoformat()
elif isinstance(o, datetime.time):
if is_aware(o):
raise ValueError("JSON can't represent timezone-aware times.")
r = o.isoformat()
if o.microsecond:
r = r[:12]
return r
elif isinstance(o, decimal.Decimal):
return str(o)
elif isinstance(o, uuid.UUID):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = DjangoJSONEncoder
| bsd-3-clause |
petemoore/git-repo | subcmds/grep.py | 89 | 7931 | #
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
from color import Coloring
from command import PagedCommand
from git_command import git_require, GitCommand
class GrepColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'grep')
self.project = self.printer('project', attr='bold')
class Grep(PagedCommand):
common = True
helpSummary = "Print lines matching a pattern"
helpUsage = """
%prog {pattern | -e pattern} [<project>...]
"""
helpDescription = """
Search for the specified patterns in all project files.
Boolean Options
---------------
The following options can appear as often as necessary to express
the pattern to locate:
-e PATTERN
--and, --or, --not, -(, -)
Further, the -r/--revision option may be specified multiple times
in order to scan multiple trees. If the same file matches in more
than one tree, only the first result is reported, prefixed by the
revision name it was found under.
Examples
-------
Look for a line that has '#define' and either 'MAX_PATH or 'PATH_MAX':
repo grep -e '#define' --and -\\( -e MAX_PATH -e PATH_MAX \\)
Look for a line that has 'NODE' or 'Unexpected' in files that
contain a line that matches both expressions:
repo grep --all-match -e NODE -e Unexpected
"""
def _Options(self, p):
def carry(option,
opt_str,
value,
parser):
pt = getattr(parser.values, 'cmd_argv', None)
if pt is None:
pt = []
setattr(parser.values, 'cmd_argv', pt)
if opt_str == '-(':
pt.append('(')
elif opt_str == '-)':
pt.append(')')
else:
pt.append(opt_str)
if value is not None:
pt.append(value)
g = p.add_option_group('Sources')
g.add_option('--cached',
action='callback', callback=carry,
help='Search the index, instead of the work tree')
g.add_option('-r', '--revision',
dest='revision', action='append', metavar='TREEish',
help='Search TREEish, instead of the work tree')
g = p.add_option_group('Pattern')
g.add_option('-e',
action='callback', callback=carry,
metavar='PATTERN', type='str',
help='Pattern to search for')
g.add_option('-i', '--ignore-case',
action='callback', callback=carry,
help='Ignore case differences')
g.add_option('-a', '--text',
action='callback', callback=carry,
help="Process binary files as if they were text")
g.add_option('-I',
action='callback', callback=carry,
help="Don't match the pattern in binary files")
g.add_option('-w', '--word-regexp',
action='callback', callback=carry,
help='Match the pattern only at word boundaries')
g.add_option('-v', '--invert-match',
action='callback', callback=carry,
help='Select non-matching lines')
g.add_option('-G', '--basic-regexp',
action='callback', callback=carry,
help='Use POSIX basic regexp for patterns (default)')
g.add_option('-E', '--extended-regexp',
action='callback', callback=carry,
help='Use POSIX extended regexp for patterns')
g.add_option('-F', '--fixed-strings',
action='callback', callback=carry,
help='Use fixed strings (not regexp) for pattern')
g = p.add_option_group('Pattern Grouping')
g.add_option('--all-match',
action='callback', callback=carry,
help='Limit match to lines that have all patterns')
g.add_option('--and', '--or', '--not',
action='callback', callback=carry,
help='Boolean operators to combine patterns')
g.add_option('-(', '-)',
action='callback', callback=carry,
help='Boolean operator grouping')
g = p.add_option_group('Output')
g.add_option('-n',
action='callback', callback=carry,
help='Prefix the line number to matching lines')
g.add_option('-C',
action='callback', callback=carry,
metavar='CONTEXT', type='str',
help='Show CONTEXT lines around match')
g.add_option('-B',
action='callback', callback=carry,
metavar='CONTEXT', type='str',
help='Show CONTEXT lines before match')
g.add_option('-A',
action='callback', callback=carry,
metavar='CONTEXT', type='str',
help='Show CONTEXT lines after match')
g.add_option('-l', '--name-only', '--files-with-matches',
action='callback', callback=carry,
help='Show only file names containing matching lines')
g.add_option('-L', '--files-without-match',
action='callback', callback=carry,
help='Show only file names not containing matching lines')
def Execute(self, opt, args):
out = GrepColoring(self.manifest.manifestProject.config)
cmd_argv = ['grep']
if out.is_on and git_require((1, 6, 3)):
cmd_argv.append('--color')
cmd_argv.extend(getattr(opt, 'cmd_argv', []))
if '-e' not in cmd_argv:
if not args:
self.Usage()
cmd_argv.append('-e')
cmd_argv.append(args[0])
args = args[1:]
projects = self.GetProjects(args)
full_name = False
if len(projects) > 1:
cmd_argv.append('--full-name')
full_name = True
have_rev = False
if opt.revision:
if '--cached' in cmd_argv:
print('fatal: cannot combine --cached and --revision', file=sys.stderr)
sys.exit(1)
have_rev = True
cmd_argv.extend(opt.revision)
cmd_argv.append('--')
bad_rev = False
have_match = False
for project in projects:
p = GitCommand(project,
cmd_argv,
bare = False,
capture_stdout = True,
capture_stderr = True)
if p.Wait() != 0:
# no results
#
if p.stderr:
if have_rev and 'fatal: ambiguous argument' in p.stderr:
bad_rev = True
else:
out.project('--- project %s ---' % project.relpath)
out.nl()
out.write("%s", p.stderr)
out.nl()
continue
have_match = True
# We cut the last element, to avoid a blank line.
#
r = p.stdout.split('\n')
r = r[0:-1]
if have_rev and full_name:
for line in r:
rev, line = line.split(':', 1)
out.write("%s", rev)
out.write(':')
out.project(project.relpath)
out.write('/')
out.write("%s", line)
out.nl()
elif full_name:
for line in r:
out.project(project.relpath)
out.write('/')
out.write("%s", line)
out.nl()
else:
for line in r:
print(line)
if have_match:
sys.exit(0)
elif have_rev and bad_rev:
for r in opt.revision:
print("error: can't search revision %s" % r, file=sys.stderr)
sys.exit(1)
else:
sys.exit(1)
| apache-2.0 |
anaran/kuma | vendor/packages/pyflakes/reporter.py | 52 | 2666 | """
Provide the Reporter class.
"""
import re
import sys
class Reporter(object):
"""
Formats the results of pyflakes checks to users.
"""
def __init__(self, warningStream, errorStream):
"""
Construct a L{Reporter}.
@param warningStream: A file-like object where warnings will be
written to. The stream's C{write} method must accept unicode.
C{sys.stdout} is a good value.
@param errorStream: A file-like object where error output will be
written to. The stream's C{write} method must accept unicode.
C{sys.stderr} is a good value.
"""
self._stdout = warningStream
self._stderr = errorStream
def unexpectedError(self, filename, msg):
"""
An unexpected error occurred trying to process C{filename}.
@param filename: The path to a file that we could not process.
@ptype filename: C{unicode}
@param msg: A message explaining the problem.
@ptype msg: C{unicode}
"""
self._stderr.write("%s: %s\n" % (filename, msg))
def syntaxError(self, filename, msg, lineno, offset, text):
"""
There was a syntax errror in C{filename}.
@param filename: The path to the file with the syntax error.
@ptype filename: C{unicode}
@param msg: An explanation of the syntax error.
@ptype msg: C{unicode}
@param lineno: The line number where the syntax error occurred.
@ptype lineno: C{int}
@param offset: The column on which the syntax error occurred, or None.
@ptype offset: C{int}
@param text: The source code containing the syntax error.
@ptype text: C{unicode}
"""
line = text.splitlines()[-1]
if offset is not None:
offset = offset - (len(text) - len(line))
self._stderr.write('%s:%d:%d: %s\n' %
(filename, lineno, offset + 1, msg))
else:
self._stderr.write('%s:%d: %s\n' % (filename, lineno, msg))
self._stderr.write(line)
self._stderr.write('\n')
if offset is not None:
self._stderr.write(re.sub(r'\S', ' ', line[:offset]) +
"^\n")
def flake(self, message):
"""
pyflakes found something wrong with the code.
@param: A L{pyflakes.messages.Message}.
"""
self._stdout.write(str(message))
self._stdout.write('\n')
def _makeDefaultReporter():
"""
Make a reporter that can be used when no reporter is specified.
"""
return Reporter(sys.stdout, sys.stderr)
| mpl-2.0 |
Winand/pandas | pandas/tests/indexes/timedeltas/test_ops.py | 6 | 48590 | import pytest
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index)
from pandas._libs.tslib import iNaT
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setup_method(self, method):
super(TestTimedeltaIndexOps, self).setup_method(method)
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
f = lambda x: isinstance(x, TimedeltaIndex)
self.check_ops_properties(TimedeltaIndex._field_ops, f)
self.check_ops_properties(TimedeltaIndex._object_ops, f)
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
assert idx1.is_monotonic
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timedelta('1 days')
assert idx.max() == Timedelta('3 days')
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
assert pd.isna(getattr(obj, op)())
obj = TimedeltaIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
assert np.min(td) == Timedelta('16815 days')
assert np.max(td) == Timedelta('16820 days')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, td, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, td, out=0)
assert np.argmin(td) == 0
assert np.argmax(td) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, td, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
td.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, td.round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
assert result == expected
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
assert result == expected
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
assert result == expected
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
pytest.raises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# floor divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng // offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
pytest.raises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
pytest.raises(TypeError, lambda: tdi - dt)
pytest.raises(TypeError, lambda: tdi - dti)
pytest.raises(TypeError, lambda: td - dt)
pytest.raises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
pytest.raises(TypeError, lambda: dt_tz - ts)
pytest.raises(TypeError, lambda: dt_tz - dt)
pytest.raises(TypeError, lambda: dt_tz - ts_tz2)
pytest.raises(TypeError, lambda: dt - dt_tz)
pytest.raises(TypeError, lambda: ts - dt_tz)
pytest.raises(TypeError, lambda: ts_tz2 - ts)
pytest.raises(TypeError, lambda: ts_tz2 - dt)
pytest.raises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
pytest.raises(TypeError, lambda: dti - ts_tz)
pytest.raises(TypeError, lambda: dti_tz - ts)
pytest.raises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
pytest.raises(ValueError, lambda: tdi + dti[0:1])
pytest.raises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
pytest.raises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
assert idx[0] in idx
def test_unknown_attribute(self):
# see gh-9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
assert 'foo' not in ts.__dict__.keys()
pytest.raises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]),
check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, idx[::-1])
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
assert result == pd.Timedelta('1 day')
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
assert result == pd.Timedelta('1 day')
result = idx.take([-1])
assert result == pd.Timedelta('31 day')
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_nat(self):
assert pd.TimedeltaIndex._na_value is pd.NaT
assert pd.TimedeltaIndex([])._na_value is pd.NaT
idx = pd.TimedeltaIndex(['1 days', '2 days'])
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert not idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.asobject)
assert idx.asobject.equals(idx)
assert idx.asobject.equals(idx.asobject)
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.asobject)
assert not idx.asobject.equals(idx2)
assert not idx.asobject.equals(idx2.asobject)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
class TestTimedeltas(object):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
assert -td == Timedelta(-10, unit='d')
assert +td == Timedelta(10, unit='d')
assert td - td == Timedelta(0, unit='ns')
assert (td - pd.NaT) is pd.NaT
assert td + td == Timedelta(20, unit='d')
assert (td + pd.NaT) is pd.NaT
assert td * 2 == Timedelta(20, unit='d')
assert (td * pd.NaT) is pd.NaT
assert td / 2 == Timedelta(5, unit='d')
assert td // 2 == Timedelta(5, unit='d')
assert abs(td) == td
assert abs(-td) == td
assert td / td == 1
assert (td / pd.NaT) is np.nan
assert (td // pd.NaT) is np.nan
# invert
assert -td == Timedelta('-10d')
assert td * -1 == Timedelta('-10d')
assert -1 * td == Timedelta('-10d')
assert abs(-td) == Timedelta('10d')
# invalid multiply with another timedelta
pytest.raises(TypeError, lambda: td * td)
# can't operate with integers
pytest.raises(TypeError, lambda: td + 2)
pytest.raises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
assert Timedelta(241, unit='h') == td + pd.offsets.Hour(1)
assert Timedelta(241, unit='h') == pd.offsets.Hour(1) + td
assert 240 == td / pd.offsets.Hour(1)
assert 1 / 240.0 == pd.offsets.Hour(1) / td
assert Timedelta(239, unit='h') == td - pd.offsets.Hour(1)
assert Timedelta(-239, unit='h') == pd.offsets.Hour(1) - td
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
tm.assert_numpy_array_equal(other + td, expected)
pytest.raises(TypeError, lambda: td + np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
tm.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
tm.assert_numpy_array_equal(-other + td, expected)
pytest.raises(TypeError, lambda: td - np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
pytest.raises(TypeError, lambda: td * other)
pytest.raises(TypeError, lambda: other * td)
tm.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
tm.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
tm.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
tm.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
tm.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
assert s.dtype == object
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
assert s2.dtype == object
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
assert s.dtype == object
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
assert td.__add__(other) is NotImplemented
assert td.__sub__(other) is NotImplemented
assert td.__truediv__(other) is NotImplemented
assert td.__mul__(other) is NotImplemented
assert td.__floordiv__(other) is NotImplemented
def test_ops_error_str(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for l, r in [(tdi, 'a'), ('a', tdi)]:
with pytest.raises(TypeError):
l + r
with pytest.raises(TypeError):
l > r
with pytest.raises(TypeError):
l == r
with pytest.raises(TypeError):
l != r
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
assert result == expected
result = td.to_frame().mean()
assert result[0] == expected
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
assert result == expected
result = td.median()
expected = to_timedelta('00:00:09')
assert result == expected
result = td.to_frame().median()
assert result[0] == expected
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
assert result == expected
result = td.to_frame().sum()
assert result[0] == expected
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
assert result == expected
result = td.to_frame().std()
assert result[0] == expected
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
pytest.raises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
assert s.diff().median() == timedelta(days=4)
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
assert s.diff().median() == timedelta(days=6)
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
assert result == expected_add
result = base - offset
assert result == expected_sub
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
pd.offsets.Day() + pd.offsets.Second(10)]:
result = base + offset
assert result == expected_add
result = base - offset
assert result == expected_sub
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
assert_series_equal(actual, s2)
actual = s2 - s1
assert_series_equal(actual, s1)
actual = s1 + scalar1
assert_series_equal(actual, s2)
actual = scalar1 + s1
assert_series_equal(actual, s2)
actual = s2 - scalar1
assert_series_equal(actual, s1)
actual = -scalar1 + s2
assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 + NA
assert_series_equal(actual, sn)
actual = NA + s1
assert_series_equal(actual, sn)
actual = s1 - NA
assert_series_equal(actual, sn)
actual = -NA + s1
assert_series_equal(actual, sn)
actual = s1 + pd.NaT
assert_series_equal(actual, sn)
actual = s2 - pd.NaT
assert_series_equal(actual, sn)
actual = s1 + df1
assert_frame_equal(actual, df2)
actual = s2 - df1
assert_frame_equal(actual, df1)
actual = df1 + s1
assert_frame_equal(actual, df2)
actual = df2 - s1
assert_frame_equal(actual, df1)
actual = df1 + df1
assert_frame_equal(actual, df2)
actual = df2 - df1
assert_frame_equal(actual, df1)
actual = df1 + scalar1
assert_frame_equal(actual, df2)
actual = df2 - scalar1
assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 + NA
assert_frame_equal(actual, dfn)
actual = df1 - NA
assert_frame_equal(actual, dfn)
actual = df1 + pd.NaT # NaT is datetime, not timedelta
assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
assert_frame_equal(actual, dfn)
def test_compare_timedelta_series(self):
# regresssion test for GH5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_compare_timedelta_ndarray(self):
# GH11835
periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')]
arr = np.array(periods)
result = arr[0] > arr
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
class TestSlicing(object):
def test_tdi_ops_attributes(self):
rng = timedelta_range('2 days', periods=5, freq='2D', name='x')
result = rng + 1
exp = timedelta_range('4 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
result = rng - 2
exp = timedelta_range('-2 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
result = rng * 2
exp = timedelta_range('4 days', periods=5, freq='4D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4D'
result = rng / 2
exp = timedelta_range('1 days', periods=5, freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
result = -rng
exp = timedelta_range('-2 days', periods=5, freq='-2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2D'
rng = pd.timedelta_range('-2 days', periods=5, freq='D', name='x')
result = abs(rng)
exp = TimedeltaIndex(['2 days', '1 days', '0 days', '1 days',
'2 days'], name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_add_overflow(self):
# see gh-14068
msg = "too (big|large) to convert"
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta(106580, 'D') + Timestamp('2000')
with tm.assert_raises_regex(OverflowError, msg):
Timestamp('2000') + to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta([106580], 'D') + Timestamp('2000')
with tm.assert_raises_regex(OverflowError, msg):
Timestamp('2000') + to_timedelta([106580], 'D')
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta([_NaT]) - Timedelta('1 days')
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with tm.assert_raises_regex(OverflowError, msg):
(to_timedelta([_NaT, '5 days', '1 hours']) -
to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (to_timedelta([pd.NaT, '5 days', '1 hours']) +
to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
| bsd-3-clause |
GoSteven/Diary | django/contrib/localflavor/at/forms.py | 10 | 2336 | """
AT-specific Form helpers
"""
import re
from django.utils.translation import ugettext_lazy as _
from django.forms.fields import Field, RegexField, Select
from django.forms import ValidationError
re_ssn = re.compile(r'^\d{4} \d{6}')
class ATZipCodeField(RegexField):
"""
A form field that validates its input is an Austrian postcode.
Accepts 4 digits.
"""
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXX.'),
}
def __init__(self, *args, **kwargs):
super(ATZipCodeField, self).__init__(r'^\d{4}$',
max_length=None, min_length=None, *args, **kwargs)
class ATStateSelect(Select):
"""
A Select widget that uses a list of AT states as its choices.
"""
def __init__(self, attrs=None):
from django.contrib.localflavor.at.at_states import STATE_CHOICES
super(ATStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
class ATSocialSecurityNumberField(Field):
"""
Austrian Social Security numbers are composed of a 4 digits and 6 digits
field. The latter represents in most cases the person's birthdate while
the first 4 digits represent a 3-digits counter and a one-digit checksum.
The 6-digits field can also differ from the person's birthdate if the
3-digits counter suffered an overflow.
This code is based on information available on
http://de.wikipedia.org/wiki/Sozialversicherungsnummer#.C3.96sterreich
"""
default_error_messages = {
'invalid': _(u'Enter a valid Austrian Social Security Number in XXXX XXXXXX format.'),
}
def clean(self, value):
if not re_ssn.search(value):
raise ValidationError(self.error_messages['invalid'])
sqnr, date = value.split(" ")
sqnr, check = (sqnr[:3], (sqnr[3]))
if int(sqnr) < 100:
raise ValidationError(self.error_messages['invalid'])
res = int(sqnr[0])*3 + int(sqnr[1])*7 + int(sqnr[2])*9 \
+ int(date[0])*5 + int(date[1])*8 + int(date[2])*4 \
+ int(date[3])*2 + int(date[4])*1 + int(date[5])*6
res = res % 11
if res != int(check):
raise ValidationError(self.error_messages['invalid'])
return u'%s%s %s'%(sqnr, check, date,)
| bsd-3-clause |
project-asap/IReS-Platform | asap-tools/kafka/kafka_file_producer.py | 1 | 2429 | #!/usr/bin/env python
from os import listdir
from os.path import isfile, isdir
from collections import Iterable
from kafka import SimpleProducer, KafkaClient
from argparse import ArgumentParser
def read_all(files, max_lines=None):
if not isinstance(files, Iterable):files = [files]
read_lines =0
for f in files:
for line in open(f, 'r') :
# limit the output lines
if max_lines and read_lines >= max_lines:
return
yield line
read_lines +=1
def read_all_from_file_or_dict(path, maxLines=None):
if isdir(path):
# construct the list of files to read
entries = map(lambda e: path+"/"+e, listdir(path))
files = sorted(filter(isfile, entries))
elif isfile(path):
files = [path]
else:
raise Exception("The given path ({}) is neither a dir or a file".format(path))
return read_all(files, maxLines)
def get_kafka_producer(broker, async=True):
import logging
logging.basicConfig(
format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s',
level=logging.ERROR
)
kafka = KafkaClient(broker)
# To send messages asynchronously
producer = SimpleProducer(kafka, async=async)
return producer
parser = ArgumentParser("producer for kafka that reads first L lines from file")
parser.add_argument('-f', "--file", help="the input file to read lines from", required=True)
parser.add_argument('-l', "--lines", help="the number of lines to read from input file", type=int , required=True)
parser.add_argument('-b', '--broker', default="localhost:9092")
parser.add_argument('-t', "--topic", help="asynchronous producer", default="test")
parser.add_argument('-a', "--async", help="asynchronous producer", action='store_true')
args = parser.parse_args()
filepath = args.file
topic = args.topic
producer = get_kafka_producer(args.broker, args.async)
# method that sends messages to given topic
send_message = lambda msg: producer.send_messages(topic, msg)
read_lines = 0
read_chars = 0
print "starting"
for l in read_all_from_file_or_dict(filepath, args.lines):
read_lines +=1
read_chars += len(l)
responses = send_message(l)
if read_lines < args.lines:
print "Not enough lines in file"
print "stopping"
producer.stop()
print "stopped"
print "Read", read_lines, "lines"
print "Read", read_chars, "chars"
| apache-2.0 |
13xforever/webserver | admin/plugins/fcgi.py | 5 | 1725 | # -*- coding: utf-8 -*-
#
# Cherokee-admin
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2001-2014 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import CTK
import Cherokee
import validations
import CgiBase
import Balancer
from util import *
from consts import *
HELPS = CgiBase.HELPS + [('modules_handlers_fcgi', "FastCGI")]
class Plugin_fcgi (CgiBase.PluginHandlerCGI):
def __init__ (self, key, **kwargs):
kwargs['show_script_alias'] = True
kwargs['show_change_uid'] = False
kwargs['show_document_root'] = True
# CGI Generic
CgiBase.PluginHandlerCGI.__init__ (self, key, **kwargs)
CgiBase.PluginHandlerCGI.AddCommon (self)
# Balancer
modul = CTK.PluginSelector('%s!balancer'%(key), trans_options(Cherokee.support.filter_available (BALANCERS)))
table = CTK.PropsTable()
table.Add (_("Balancer"), modul.selector_widget, _(Balancer.NOTE_BALANCER))
self += CTK.RawHTML ('<h2>%s</h2>' %(_('FastCGI Specific')))
self += CTK.Indenter (table)
self += modul
| gpl-2.0 |
joram/sickbeard-orange | lib/enzyme/mkv.py | 163 | 30439 | # -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
# Copyright 2003-2006 Thomas Schueppel <stain@acm.org>
# Copyright 2003-2006 Dirk Meyer <dischi@freevo.org>
# Copyright 2003-2006 Jason Tackaberry <tack@urandom.ca>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from exceptions import ParseError
from struct import unpack
import core
import logging
import re
__all__ = ['Parser']
# get logging object
log = logging.getLogger(__name__)
# Main IDs for the Matroska streams
MATROSKA_VIDEO_TRACK = 0x01
MATROSKA_AUDIO_TRACK = 0x02
MATROSKA_SUBTITLES_TRACK = 0x11
MATROSKA_HEADER_ID = 0x1A45DFA3
MATROSKA_TRACKS_ID = 0x1654AE6B
MATROSKA_CUES_ID = 0x1C53BB6B
MATROSKA_SEGMENT_ID = 0x18538067
MATROSKA_SEGMENT_INFO_ID = 0x1549A966
MATROSKA_CLUSTER_ID = 0x1F43B675
MATROSKA_VOID_ID = 0xEC
MATROSKA_CRC_ID = 0xBF
MATROSKA_TIMECODESCALE_ID = 0x2AD7B1
MATROSKA_DURATION_ID = 0x4489
MATROSKA_CRC32_ID = 0xBF
MATROSKA_TIMECODESCALE_ID = 0x2AD7B1
MATROSKA_MUXING_APP_ID = 0x4D80
MATROSKA_WRITING_APP_ID = 0x5741
MATROSKA_CODEC_ID = 0x86
MATROSKA_CODEC_PRIVATE_ID = 0x63A2
MATROSKA_FRAME_DURATION_ID = 0x23E383
MATROSKA_VIDEO_SETTINGS_ID = 0xE0
MATROSKA_VIDEO_WIDTH_ID = 0xB0
MATROSKA_VIDEO_HEIGHT_ID = 0xBA
MATROSKA_VIDEO_INTERLACED_ID = 0x9A
MATROSKA_VIDEO_DISPLAY_WIDTH_ID = 0x54B0
MATROSKA_VIDEO_DISPLAY_HEIGHT_ID = 0x54BA
MATROSKA_AUDIO_SETTINGS_ID = 0xE1
MATROSKA_AUDIO_SAMPLERATE_ID = 0xB5
MATROSKA_AUDIO_CHANNELS_ID = 0x9F
MATROSKA_TRACK_UID_ID = 0x73C5
MATROSKA_TRACK_NUMBER_ID = 0xD7
MATROSKA_TRACK_TYPE_ID = 0x83
MATROSKA_TRACK_LANGUAGE_ID = 0x22B59C
MATROSKA_TRACK_OFFSET = 0x537F
MATROSKA_TRACK_FLAG_DEFAULT_ID = 0x88
MATROSKA_TRACK_FLAG_ENABLED_ID = 0xB9
MATROSKA_TITLE_ID = 0x7BA9
MATROSKA_DATE_UTC_ID = 0x4461
MATROSKA_NAME_ID = 0x536E
MATROSKA_CHAPTERS_ID = 0x1043A770
MATROSKA_CHAPTER_UID_ID = 0x73C4
MATROSKA_EDITION_ENTRY_ID = 0x45B9
MATROSKA_CHAPTER_ATOM_ID = 0xB6
MATROSKA_CHAPTER_TIME_START_ID = 0x91
MATROSKA_CHAPTER_TIME_END_ID = 0x92
MATROSKA_CHAPTER_FLAG_ENABLED_ID = 0x4598
MATROSKA_CHAPTER_DISPLAY_ID = 0x80
MATROSKA_CHAPTER_LANGUAGE_ID = 0x437C
MATROSKA_CHAPTER_STRING_ID = 0x85
MATROSKA_ATTACHMENTS_ID = 0x1941A469
MATROSKA_ATTACHED_FILE_ID = 0x61A7
MATROSKA_FILE_DESC_ID = 0x467E
MATROSKA_FILE_NAME_ID = 0x466E
MATROSKA_FILE_MIME_TYPE_ID = 0x4660
MATROSKA_FILE_DATA_ID = 0x465C
MATROSKA_SEEKHEAD_ID = 0x114D9B74
MATROSKA_SEEK_ID = 0x4DBB
MATROSKA_SEEKID_ID = 0x53AB
MATROSKA_SEEK_POSITION_ID = 0x53AC
MATROSKA_TAGS_ID = 0x1254C367
MATROSKA_TAG_ID = 0x7373
MATROSKA_TARGETS_ID = 0x63C0
MATROSKA_TARGET_TYPE_VALUE_ID = 0x68CA
MATROSKA_TARGET_TYPE_ID = 0x63CA
MATRSOKA_TAGS_TRACK_UID_ID = 0x63C5
MATRSOKA_TAGS_EDITION_UID_ID = 0x63C9
MATRSOKA_TAGS_CHAPTER_UID_ID = 0x63C4
MATRSOKA_TAGS_ATTACHMENT_UID_ID = 0x63C6
MATROSKA_SIMPLE_TAG_ID = 0x67C8
MATROSKA_TAG_NAME_ID = 0x45A3
MATROSKA_TAG_LANGUAGE_ID = 0x447A
MATROSKA_TAG_STRING_ID = 0x4487
MATROSKA_TAG_BINARY_ID = 0x4485
# See mkv spec for details:
# http://www.matroska.org/technical/specs/index.html
# Map to convert to well known codes
# http://haali.cs.msu.ru/mkv/codecs.pdf
FOURCCMap = {
'V_THEORA': 'THEO',
'V_SNOW': 'SNOW',
'V_MPEG4/ISO/ASP': 'MP4V',
'V_MPEG4/ISO/AVC': 'AVC1',
'A_AC3': 0x2000,
'A_MPEG/L3': 0x0055,
'A_MPEG/L2': 0x0050,
'A_MPEG/L1': 0x0050,
'A_DTS': 0x2001,
'A_PCM/INT/LIT': 0x0001,
'A_PCM/FLOAT/IEEE': 0x003,
'A_TTA1': 0x77a1,
'A_WAVPACK4': 0x5756,
'A_VORBIS': 0x6750,
'A_FLAC': 0xF1AC,
'A_AAC': 0x00ff,
'A_AAC/': 0x00ff
}
def matroska_date_to_datetime(date):
"""
Converts a date in Matroska's date format to a python datetime object.
Returns the given date string if it could not be converted.
"""
# From the specs:
# The fields with dates should have the following format: YYYY-MM-DD
# HH:MM:SS.MSS [...] To store less accuracy, you remove items starting
# from the right. To store only the year, you would use, "2004". To store
# a specific day such as May 1st, 2003, you would use "2003-05-01".
format = re.split(r'([-:. ])', '%Y-%m-%d %H:%M:%S.%f')
while format:
try:
return datetime.strptime(date, ''.join(format))
except ValueError:
format = format[:-2]
return date
def matroska_bps_to_bitrate(bps):
"""
Tries to convert a free-form bps string into a bitrate (bits per second).
"""
m = re.search('([\d.]+)\s*(\D.*)', bps)
if m:
bps, suffix = m.groups()
if 'kbit' in suffix:
return float(bps) * 1024
elif 'kbyte' in suffix:
return float(bps) * 1024 * 8
elif 'byte' in suffix:
return float(bps) * 8
elif 'bps' in suffix or 'bit' in suffix:
return float(bps)
if bps.replace('.', '').isdigit():
if float(bps) < 30000:
# Assume kilobits and convert to bps
return float(bps) * 1024
return float(bps)
# Used to convert the official matroska tag names (only lower-cased) to core
# attributes. tag name -> attr, filter
TAGS_MAP = {
# From Media core
u'title': ('title', None),
u'subtitle': ('caption', None),
u'comment': ('comment', None),
u'url': ('url', None),
u'artist': ('artist', None),
u'keywords': ('keywords', lambda s: [word.strip() for word in s.split(',')]),
u'composer_nationality': ('country', None),
u'date_released': ('datetime', None),
u'date_recorded': ('datetime', None),
u'date_written': ('datetime', None),
# From Video core
u'encoder': ('encoder', None),
u'bps': ('bitrate', matroska_bps_to_bitrate),
u'part_number': ('trackno', int),
u'total_parts': ('trackof', int),
u'copyright': ('copyright', None),
u'genre': ('genre', None),
u'actor': ('actors', None),
u'written_by': ('writer', None),
u'producer': ('producer', None),
u'production_studio': ('studio', None),
u'law_rating': ('rating', None),
u'summary': ('summary', None),
u'synopsis': ('synopsis', None),
}
class EbmlEntity:
"""
This is class that is responsible to handle one Ebml entity as described in
the Matroska/Ebml spec
"""
def __init__(self, inbuf):
# Compute the EBML id
# Set the CRC len to zero
self.crc_len = 0
# Now loop until we find an entity without CRC
try:
self.build_entity(inbuf)
except IndexError:
raise ParseError()
while self.get_id() == MATROSKA_CRC32_ID:
self.crc_len += self.get_total_len()
inbuf = inbuf[self.get_total_len():]
self.build_entity(inbuf)
def build_entity(self, inbuf):
self.compute_id(inbuf)
if self.id_len == 0:
log.error(u'EBML entity not found, bad file format')
raise ParseError()
self.entity_len, self.len_size = self.compute_len(inbuf[self.id_len:])
self.entity_data = inbuf[self.get_header_len() : self.get_total_len()]
self.ebml_length = self.entity_len
self.entity_len = min(len(self.entity_data), self.entity_len)
# if the data size is 8 or less, it could be a numeric value
self.value = 0
if self.entity_len <= 8:
for pos, shift in zip(range(self.entity_len), range((self.entity_len - 1) * 8, -1, -8)):
self.value |= ord(self.entity_data[pos]) << shift
def add_data(self, data):
maxlen = self.ebml_length - len(self.entity_data)
if maxlen <= 0:
return
self.entity_data += data[:maxlen]
self.entity_len = len(self.entity_data)
def compute_id(self, inbuf):
self.id_len = 0
if len(inbuf) < 1:
return 0
first = ord(inbuf[0])
if first & 0x80:
self.id_len = 1
self.entity_id = first
elif first & 0x40:
if len(inbuf) < 2:
return 0
self.id_len = 2
self.entity_id = ord(inbuf[0]) << 8 | ord(inbuf[1])
elif first & 0x20:
if len(inbuf) < 3:
return 0
self.id_len = 3
self.entity_id = (ord(inbuf[0]) << 16) | (ord(inbuf[1]) << 8) | \
(ord(inbuf[2]))
elif first & 0x10:
if len(inbuf) < 4:
return 0
self.id_len = 4
self.entity_id = (ord(inbuf[0]) << 24) | (ord(inbuf[1]) << 16) | \
(ord(inbuf[2]) << 8) | (ord(inbuf[3]))
self.entity_str = inbuf[0:self.id_len]
def compute_len(self, inbuf):
if not inbuf:
return 0, 0
i = num_ffs = 0
len_mask = 0x80
len = ord(inbuf[0])
while not len & len_mask:
i += 1
len_mask >>= 1
if i >= 8:
return 0, 0
len &= len_mask - 1
if len == len_mask - 1:
num_ffs += 1
for p in range(i):
len = (len << 8) | ord(inbuf[p + 1])
if len & 0xff == 0xff:
num_ffs += 1
if num_ffs == i + 1:
len = 0
return len, i + 1
def get_crc_len(self):
return self.crc_len
def get_value(self):
return self.value
def get_float_value(self):
if len(self.entity_data) == 4:
return unpack('!f', self.entity_data)[0]
elif len(self.entity_data) == 8:
return unpack('!d', self.entity_data)[0]
return 0.0
def get_data(self):
return self.entity_data
def get_utf8(self):
return unicode(self.entity_data, 'utf-8', 'replace')
def get_str(self):
return unicode(self.entity_data, 'ascii', 'replace')
def get_id(self):
return self.entity_id
def get_str_id(self):
return self.entity_str
def get_len(self):
return self.entity_len
def get_total_len(self):
return self.entity_len + self.id_len + self.len_size
def get_header_len(self):
return self.id_len + self.len_size
class Matroska(core.AVContainer):
"""
Matroska video and audio parser. If at least one video stream is
detected it will set the type to MEDIA_AV.
"""
def __init__(self, file):
core.AVContainer.__init__(self)
self.samplerate = 1
self.file = file
# Read enough that we're likely to get the full seekhead (FIXME: kludge)
buffer = file.read(2000)
if len(buffer) == 0:
# Regular File end
raise ParseError()
# Check the Matroska header
header = EbmlEntity(buffer)
if header.get_id() != MATROSKA_HEADER_ID:
raise ParseError()
log.debug(u'HEADER ID found %08X' % header.get_id())
self.mime = 'video/x-matroska'
self.type = 'Matroska'
self.has_idx = False
self.objects_by_uid = {}
# Now get the segment
self.segment = segment = EbmlEntity(buffer[header.get_total_len():])
# Record file offset of segment data for seekheads
self.segment.offset = header.get_total_len() + segment.get_header_len()
if segment.get_id() != MATROSKA_SEGMENT_ID:
log.debug(u'SEGMENT ID not found %08X' % segment.get_id())
return
log.debug(u'SEGMENT ID found %08X' % segment.get_id())
try:
for elem in self.process_one_level(segment):
if elem.get_id() == MATROSKA_SEEKHEAD_ID:
self.process_elem(elem)
except ParseError:
pass
if not self.has_idx:
log.warning(u'File has no index')
self._set('corrupt', True)
def process_elem(self, elem):
elem_id = elem.get_id()
log.debug(u'BEGIN: process element %r' % hex(elem_id))
if elem_id == MATROSKA_SEGMENT_INFO_ID:
duration = 0
scalecode = 1000000.0
for ielem in self.process_one_level(elem):
ielem_id = ielem.get_id()
if ielem_id == MATROSKA_TIMECODESCALE_ID:
scalecode = ielem.get_value()
elif ielem_id == MATROSKA_DURATION_ID:
duration = ielem.get_float_value()
elif ielem_id == MATROSKA_TITLE_ID:
self.title = ielem.get_utf8()
elif ielem_id == MATROSKA_DATE_UTC_ID:
timestamp = unpack('!q', ielem.get_data())[0] / 10.0 ** 9
# Date is offset 2001-01-01 00:00:00 (timestamp 978307200.0)
self.timestamp = int(timestamp + 978307200)
self.length = duration * scalecode / 1000000000.0
elif elem_id == MATROSKA_TRACKS_ID:
self.process_tracks(elem)
elif elem_id == MATROSKA_CHAPTERS_ID:
self.process_chapters(elem)
elif elem_id == MATROSKA_ATTACHMENTS_ID:
self.process_attachments(elem)
elif elem_id == MATROSKA_SEEKHEAD_ID:
self.process_seekhead(elem)
elif elem_id == MATROSKA_TAGS_ID:
self.process_tags(elem)
elif elem_id == MATROSKA_CUES_ID:
self.has_idx = True
log.debug(u'END: process element %r' % hex(elem_id))
return True
def process_seekhead(self, elem):
for seek_elem in self.process_one_level(elem):
if seek_elem.get_id() != MATROSKA_SEEK_ID:
continue
for sub_elem in self.process_one_level(seek_elem):
if sub_elem.get_id() == MATROSKA_SEEKID_ID:
if sub_elem.get_value() == MATROSKA_CLUSTER_ID:
# Not interested in these.
return
elif sub_elem.get_id() == MATROSKA_SEEK_POSITION_ID:
self.file.seek(self.segment.offset + sub_elem.get_value())
buffer = self.file.read(100)
try:
elem = EbmlEntity(buffer)
except ParseError:
continue
# Fetch all data necessary for this element.
elem.add_data(self.file.read(elem.ebml_length))
self.process_elem(elem)
def process_tracks(self, tracks):
tracksbuf = tracks.get_data()
index = 0
while index < tracks.get_len():
trackelem = EbmlEntity(tracksbuf[index:])
log.debug (u'ELEMENT %X found' % trackelem.get_id())
self.process_track(trackelem)
index += trackelem.get_total_len() + trackelem.get_crc_len()
def process_one_level(self, item):
buf = item.get_data()
index = 0
while index < item.get_len():
if len(buf[index:]) == 0:
break
elem = EbmlEntity(buf[index:])
yield elem
index += elem.get_total_len() + elem.get_crc_len()
def set_track_defaults(self, track):
track.language = 'eng'
def process_track(self, track):
# Collapse generator into a list since we need to iterate over it
# twice.
elements = [x for x in self.process_one_level(track)]
track_type = [x.get_value() for x in elements if x.get_id() == MATROSKA_TRACK_TYPE_ID]
if not track_type:
log.debug(u'Bad track: no type id found')
return
track_type = track_type[0]
track = None
if track_type == MATROSKA_VIDEO_TRACK:
log.debug(u'Video track found')
track = self.process_video_track(elements)
elif track_type == MATROSKA_AUDIO_TRACK:
log.debug(u'Audio track found')
track = self.process_audio_track(elements)
elif track_type == MATROSKA_SUBTITLES_TRACK:
log.debug(u'Subtitle track found')
track = core.Subtitle()
self.set_track_defaults(track)
track.id = len(self.subtitles)
self.subtitles.append(track)
for elem in elements:
self.process_track_common(elem, track)
def process_track_common(self, elem, track):
elem_id = elem.get_id()
if elem_id == MATROSKA_TRACK_LANGUAGE_ID:
track.language = elem.get_str()
log.debug(u'Track language found: %r' % track.language)
elif elem_id == MATROSKA_NAME_ID:
track.title = elem.get_utf8()
elif elem_id == MATROSKA_TRACK_NUMBER_ID:
track.trackno = elem.get_value()
elif elem_id == MATROSKA_TRACK_FLAG_ENABLED_ID:
track.enabled = bool(elem.get_value())
elif elem_id == MATROSKA_TRACK_FLAG_DEFAULT_ID:
track.default = bool(elem.get_value())
elif elem_id == MATROSKA_CODEC_ID:
track.codec = elem.get_str()
elif elem_id == MATROSKA_CODEC_PRIVATE_ID:
track.codec_private = elem.get_data()
elif elem_id == MATROSKA_TRACK_UID_ID:
self.objects_by_uid[elem.get_value()] = track
def process_video_track(self, elements):
track = core.VideoStream()
# Defaults
track.codec = u'Unknown'
track.fps = 0
self.set_track_defaults(track)
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_CODEC_ID:
track.codec = elem.get_str()
elif elem_id == MATROSKA_FRAME_DURATION_ID:
try:
track.fps = 1 / (pow(10, -9) * (elem.get_value()))
except ZeroDivisionError:
pass
elif elem_id == MATROSKA_VIDEO_SETTINGS_ID:
d_width = d_height = None
for settings_elem in self.process_one_level(elem):
settings_elem_id = settings_elem.get_id()
if settings_elem_id == MATROSKA_VIDEO_WIDTH_ID:
track.width = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_HEIGHT_ID:
track.height = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_DISPLAY_WIDTH_ID:
d_width = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_DISPLAY_HEIGHT_ID:
d_height = settings_elem.get_value()
elif settings_elem_id == MATROSKA_VIDEO_INTERLACED_ID:
value = int(settings_elem.get_value())
self._set('interlaced', value)
if None not in [d_width, d_height]:
track.aspect = float(d_width) / d_height
else:
self.process_track_common(elem, track)
# convert codec information
# http://haali.cs.msu.ru/mkv/codecs.pdf
if track.codec in FOURCCMap:
track.codec = FOURCCMap[track.codec]
elif '/' in track.codec and track.codec.split('/')[0] + '/' in FOURCCMap:
track.codec = FOURCCMap[track.codec.split('/')[0] + '/']
elif track.codec.endswith('FOURCC') and len(track.codec_private or '') == 40:
track.codec = track.codec_private[16:20]
elif track.codec.startswith('V_REAL/'):
track.codec = track.codec[7:]
elif track.codec.startswith('V_'):
# FIXME: add more video codecs here
track.codec = track.codec[2:]
track.id = len(self.video)
self.video.append(track)
return track
def process_audio_track(self, elements):
track = core.AudioStream()
track.codec = u'Unknown'
self.set_track_defaults(track)
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_CODEC_ID:
track.codec = elem.get_str()
elif elem_id == MATROSKA_AUDIO_SETTINGS_ID:
for settings_elem in self.process_one_level(elem):
settings_elem_id = settings_elem.get_id()
if settings_elem_id == MATROSKA_AUDIO_SAMPLERATE_ID:
track.samplerate = settings_elem.get_float_value()
elif settings_elem_id == MATROSKA_AUDIO_CHANNELS_ID:
track.channels = settings_elem.get_value()
else:
self.process_track_common(elem, track)
if track.codec in FOURCCMap:
track.codec = FOURCCMap[track.codec]
elif '/' in track.codec and track.codec.split('/')[0] + '/' in FOURCCMap:
track.codec = FOURCCMap[track.codec.split('/')[0] + '/']
elif track.codec.startswith('A_'):
track.codec = track.codec[2:]
track.id = len(self.audio)
self.audio.append(track)
return track
def process_chapters(self, chapters):
elements = self.process_one_level(chapters)
for elem in elements:
if elem.get_id() == MATROSKA_EDITION_ENTRY_ID:
buf = elem.get_data()
index = 0
while index < elem.get_len():
sub_elem = EbmlEntity(buf[index:])
if sub_elem.get_id() == MATROSKA_CHAPTER_ATOM_ID:
self.process_chapter_atom(sub_elem)
index += sub_elem.get_total_len() + sub_elem.get_crc_len()
def process_chapter_atom(self, atom):
elements = self.process_one_level(atom)
chap = core.Chapter()
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_CHAPTER_TIME_START_ID:
# Scale timecode to seconds (float)
chap.pos = elem.get_value() / 1000000 / 1000.0
elif elem_id == MATROSKA_CHAPTER_FLAG_ENABLED_ID:
chap.enabled = elem.get_value()
elif elem_id == MATROSKA_CHAPTER_DISPLAY_ID:
# Matroska supports multiple (chapter name, language) pairs for
# each chapter, so chapter names can be internationalized. This
# logic will only take the last one in the list.
for display_elem in self.process_one_level(elem):
if display_elem.get_id() == MATROSKA_CHAPTER_STRING_ID:
chap.name = display_elem.get_utf8()
elif elem_id == MATROSKA_CHAPTER_UID_ID:
self.objects_by_uid[elem.get_value()] = chap
log.debug(u'Chapter %r found', chap.name)
chap.id = len(self.chapters)
self.chapters.append(chap)
def process_attachments(self, attachments):
buf = attachments.get_data()
index = 0
while index < attachments.get_len():
elem = EbmlEntity(buf[index:])
if elem.get_id() == MATROSKA_ATTACHED_FILE_ID:
self.process_attachment(elem)
index += elem.get_total_len() + elem.get_crc_len()
def process_attachment(self, attachment):
elements = self.process_one_level(attachment)
name = desc = mimetype = ""
data = None
for elem in elements:
elem_id = elem.get_id()
if elem_id == MATROSKA_FILE_NAME_ID:
name = elem.get_utf8()
elif elem_id == MATROSKA_FILE_DESC_ID:
desc = elem.get_utf8()
elif elem_id == MATROSKA_FILE_MIME_TYPE_ID:
mimetype = elem.get_data()
elif elem_id == MATROSKA_FILE_DATA_ID:
data = elem.get_data()
# Right now we only support attachments that could be cover images.
# Make a guess to see if this attachment is a cover image.
if mimetype.startswith("image/") and u"cover" in (name + desc).lower() and data:
self.thumbnail = data
log.debug(u'Attachment %r found' % name)
def process_tags(self, tags):
# Tags spec: http://www.matroska.org/technical/specs/tagging/index.html
# Iterate over Tags children. Tags element children is a
# Tag element (whose children are SimpleTags) and a Targets element
# whose children specific what objects the tags apply to.
for tag_elem in self.process_one_level(tags):
# Start a new dict to hold all SimpleTag elements.
tags_dict = core.Tags()
# A list of target uids this tags dict applies too. If empty,
# tags are global.
targets = []
for sub_elem in self.process_one_level(tag_elem):
if sub_elem.get_id() == MATROSKA_SIMPLE_TAG_ID:
self.process_simple_tag(sub_elem, tags_dict)
elif sub_elem.get_id() == MATROSKA_TARGETS_ID:
# Targets element: if there is no uid child (track uid,
# chapter uid, etc.) then the tags dict applies to the
# whole file (top-level Media object).
for target_elem in self.process_one_level(sub_elem):
target_elem_id = target_elem.get_id()
if target_elem_id in (MATRSOKA_TAGS_TRACK_UID_ID, MATRSOKA_TAGS_EDITION_UID_ID,
MATRSOKA_TAGS_CHAPTER_UID_ID, MATRSOKA_TAGS_ATTACHMENT_UID_ID):
targets.append(target_elem.get_value())
elif target_elem_id == MATROSKA_TARGET_TYPE_VALUE_ID:
# Target types not supported for now. (Unclear how this
# would fit with kaa.metadata.)
pass
if targets:
# Assign tags to all listed uids
for target in targets:
try:
self.objects_by_uid[target].tags.update(tags_dict)
self.tags_to_attributes(self.objects_by_uid[target], tags_dict)
except KeyError:
log.warning(u'Tags assigned to unknown/unsupported target uid %d', target)
else:
self.tags.update(tags_dict)
self.tags_to_attributes(self, tags_dict)
def process_simple_tag(self, simple_tag_elem, tags_dict):
"""
Returns a dict representing the Tag element.
"""
name = lang = value = children = None
binary = False
for elem in self.process_one_level(simple_tag_elem):
elem_id = elem.get_id()
if elem_id == MATROSKA_TAG_NAME_ID:
name = elem.get_utf8().lower()
elif elem_id == MATROSKA_TAG_STRING_ID:
value = elem.get_utf8()
elif elem_id == MATROSKA_TAG_BINARY_ID:
value = elem.get_data()
binary = True
elif elem_id == MATROSKA_TAG_LANGUAGE_ID:
lang = elem.get_utf8()
elif elem_id == MATROSKA_SIMPLE_TAG_ID:
if children is None:
children = core.Tags()
self.process_simple_tag(elem, children)
if children:
# Convert ourselves to a Tags object.
children.value = value
children.langcode = lang
value = children
else:
if name.startswith('date_'):
# Try to convert date to a datetime object.
value = matroska_date_to_datetime(value)
value = core.Tag(value, lang, binary)
if name in tags_dict:
# Multiple items of this tag name.
if not isinstance(tags_dict[name], list):
# Convert to a list
tags_dict[name] = [tags_dict[name]]
# Append to list
tags_dict[name].append(value)
else:
tags_dict[name] = value
def tags_to_attributes(self, obj, tags):
# Convert tags to core attributes.
for name, tag in tags.items():
if isinstance(tag, dict):
# Nested tags dict, recurse.
self.tags_to_attributes(obj, tag)
continue
elif name not in TAGS_MAP:
continue
attr, filter = TAGS_MAP[name]
if attr not in obj._keys and attr not in self._keys:
# Tag is not in any core attribute for this object or global,
# so skip.
continue
# Pull value out of Tag object or list of Tag objects.
value = [item.value for item in tag] if isinstance(tag, list) else tag.value
if filter:
try:
value = [filter(item) for item in value] if isinstance(value, list) else filter(value)
except Exception, e:
log.warning(u'Failed to convert tag to core attribute: %r', e)
# Special handling for tv series recordings. The 'title' tag
# can be used for both the series and the episode name. The
# same is true for trackno which may refer to the season
# and the episode number. Therefore, if we find these
# attributes already set we try some guessing.
if attr == 'trackno' and getattr(self, attr) is not None:
# delete trackno and save season and episode
self.season = self.trackno
self.episode = value
self.trackno = None
continue
if attr == 'title' and getattr(self, attr) is not None:
# store current value of title as series and use current
# value of title as title
self.series = self.title
if attr in obj._keys:
setattr(obj, attr, value)
else:
setattr(self, attr, value)
Parser = Matroska
| gpl-3.0 |
FusionSP/android_external_chromium_org | tools/ipc_fuzzer/play_testcase.py | 56 | 3131 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper around chrome.
Replaces all the child processes (renderer, GPU, plugins and utility) with the
IPC fuzzer. The fuzzer will then play back a specified testcase.
Depends on ipc_fuzzer being available on the same directory as chrome.
"""
import argparse
import os
import platform
import subprocess
import sys
def main():
desc = 'Wrapper to run chrome with child processes replaced by IPC fuzzers'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--out-dir', dest='out_dir', default='out',
help='output directory under src/ directory')
parser.add_argument('--build-type', dest='build_type', default='Release',
help='Debug vs. Release build')
parser.add_argument('--gdb-browser', dest='gdb_browser', default=False,
action='store_true',
help='run browser process inside gdb')
parser.add_argument('testcase',
help='IPC file to be replayed')
parser.add_argument('chrome_args',
nargs=argparse.REMAINDER,
help='any additional arguments are passed to chrome')
args = parser.parse_args()
chrome_binary = 'chrome'
fuzzer_binary = 'ipc_fuzzer_replay'
script_path = os.path.realpath(__file__)
ipc_fuzzer_dir = os.path.dirname(script_path)
src_dir = os.path.abspath(os.path.join(ipc_fuzzer_dir, os.pardir, os.pardir))
out_dir = os.path.join(src_dir, args.out_dir)
build_dir = os.path.join(out_dir, args.build_type)
chrome_path = os.path.join(build_dir, chrome_binary)
if not os.path.exists(chrome_path):
print 'chrome executable not found at ', chrome_path
return 1
fuzzer_path = os.path.join(build_dir, fuzzer_binary)
if not os.path.exists(fuzzer_path):
print 'fuzzer executable not found at ', fuzzer_path
print ('ensure GYP_DEFINES="enable_ipc_fuzzer=1" and build target ' +
fuzzer_binary + '.')
return 1
prefixes = {
'--renderer-cmd-prefix',
'--gpu-launcher',
'--plugin-launcher',
'--ppapi-plugin-launcher',
'--utility-cmd-prefix',
}
chrome_command = [
chrome_path,
'--ipc-fuzzer-testcase=' + args.testcase,
'--no-sandbox',
'--disable-kill-after-bad-ipc',
]
if args.gdb_browser:
chrome_command = ['gdb', '--args'] + chrome_command
launchers = {}
for prefix in prefixes:
launchers[prefix] = fuzzer_path
for arg in args.chrome_args:
if arg.find('=') != -1:
switch, value = arg.split('=', 1)
if switch in prefixes:
launchers[switch] = value + ' ' + launchers[switch]
continue
chrome_command.append(arg)
for switch, value in launchers.items():
chrome_command.append(switch + '=' + value)
command_line = ' '.join(['\'' + arg + '\'' for arg in chrome_command])
print 'Executing: ' + command_line
return subprocess.call(chrome_command)
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
campbe13/openhatch | vendor/packages/mechanize/test-tools/twisted-ftpserver.py | 22 | 2670 | import optparse
import sys
import twisted.cred.checkers
import twisted.cred.credentials
import twisted.cred.portal
import twisted.internet
import twisted.protocols.ftp
from twisted.python import filepath, log
from zope.interface import implements
def make_ftp_shell(avatar_id, root_path):
if avatar_id is twisted.cred.checkers.ANONYMOUS:
return twisted.protocols.ftp.FTPAnonymousShell(root_path)
else:
return twisted.protocols.ftp.FTPShell(root_path)
class FTPRealm(object):
implements(twisted.cred.portal.IRealm)
def __init__(self, root_path):
self._root_path = filepath.FilePath(root_path)
def requestAvatar(self, avatarId, mind, *interfaces):
for iface in interfaces:
if iface is twisted.protocols.ftp.IFTPShell:
avatar = make_ftp_shell(avatarId, self._root_path)
return (twisted.protocols.ftp.IFTPShell,
avatar,
getattr(avatar, "logout", lambda: None))
raise NotImplementedError()
class FtpServerFactory(object):
"""
port = FtpServerFactory("/tmp", 2121).makeListner()
self.addCleanup(port.stopListening)
"""
def __init__(self, root_path, port):
factory = twisted.protocols.ftp.FTPFactory()
realm = FTPRealm(root_path)
portal = twisted.cred.portal.Portal(realm)
portal.registerChecker(twisted.cred.checkers.AllowAnonymousAccess(),
twisted.cred.credentials.IAnonymous)
checker = twisted.cred.checkers.\
InMemoryUsernamePasswordDatabaseDontUse()
checker.addUser("john", "john")
portal.registerChecker(checker)
factory.tld = root_path
factory.userAnonymous = "anon"
factory.portal = portal
factory.protocol = twisted.protocols.ftp.FTP
self._factory = factory
self._port = port
def makeListener(self):
# XXX use 0 instead of self._port?
return twisted.internet.reactor.listenTCP(
self._port, self._factory, interface="127.0.0.1")
def parse_options(args):
parser = optparse.OptionParser()
parser.add_option("--log", action="store_true")
parser.add_option("--port", type="int", default=2121)
options, remaining_args = parser.parse_args(args)
options.root_path = remaining_args[0]
return options
def main(argv):
options = parse_options(argv[1:])
if options.log:
log.startLogging(sys.stdout)
factory = FtpServerFactory(options.root_path, options.port)
factory.makeListener()
twisted.internet.reactor.run()
if __name__ == "__main__":
main(sys.argv)
| agpl-3.0 |
vincepandolfo/django | django/contrib/gis/sitemaps/views.py | 144 | 2365 | from __future__ import unicode_literals
from django.apps import apps
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.functions import AsKML, Transform
from django.contrib.gis.shortcuts import render_to_kml, render_to_kmz
from django.core.exceptions import FieldDoesNotExist
from django.db import DEFAULT_DB_ALIAS, connections
from django.http import Http404
def kml(request, label, model, field_name=None, compress=False, using=DEFAULT_DB_ALIAS):
"""
This view generates KML for the given app label, model, and field name.
The field name must be that of a geographic field.
"""
placemarks = []
try:
klass = apps.get_model(label, model)
except LookupError:
raise Http404('You must supply a valid app label and module name. Got "%s.%s"' % (label, model))
if field_name:
try:
field = klass._meta.get_field(field_name)
if not isinstance(field, GeometryField):
raise FieldDoesNotExist
except FieldDoesNotExist:
raise Http404('Invalid geometry field.')
connection = connections[using]
if connection.features.has_AsKML_function:
# Database will take care of transformation.
placemarks = klass._default_manager.using(using).annotate(kml=AsKML(field_name))
else:
# If the database offers no KML method, we use the `kml`
# attribute of the lazy geometry instead.
placemarks = []
if connection.features.has_Transform_function:
qs = klass._default_manager.using(using).annotate(
**{'%s_4326' % field_name: Transform(field_name, 4326)})
field_name += '_4326'
else:
qs = klass._default_manager.using(using).all()
for mod in qs:
mod.kml = getattr(mod, field_name).kml
placemarks.append(mod)
# Getting the render function and rendering to the correct.
if compress:
render = render_to_kmz
else:
render = render_to_kml
return render('gis/kml/placemarks.kml', {'places': placemarks})
def kmz(request, label, model, field_name=None, using=DEFAULT_DB_ALIAS):
"""
This view returns KMZ for the given app label, model, and field name.
"""
return kml(request, label, model, field_name, compress=True, using=using)
| bsd-3-clause |
woutersmet/Zeosummer | share/plugins/builder/tube.py | 2 | 11813 | # Zeobuilder is an extensible GUI-toolkit for molecular model construction.
# Copyright (C) 2007 - 2009 Toon Verstraelen <Toon.Verstraelen@UGent.be>, Center
# for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights
# reserved unless otherwise stated.
#
# This file is part of Zeobuilder.
#
# Zeobuilder is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "ZEOBUILDER: a GUI toolkit for the construction of complex molecules on the
# nanoscale with building blocks", Toon Verstraelen, Veronique Van Speybroeck
# and Michel Waroquier, Journal of Chemical Information and Modeling, Vol. 48
# (7), 1530-1541, 2008
# DOI:10.1021/ci8000748
#
# Zeobuilder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from zeobuilder import context
from zeobuilder.actions.composed import ImmediateWithMemory, Parameters, UserError
from zeobuilder.actions.collections.menu import MenuInfo
from zeobuilder.moltools import yield_atoms
from zeobuilder.undefined import Undefined
from zeobuilder.gui.fields_dialogs import FieldsDialogSimple
import zeobuilder.actions.primitive as primitive
import zeobuilder.gui.fields as fields
import zeobuilder.authors as authors
from molmod.transformations import Translation
from molmod.units import angstrom
from molmod.unit_cell import UnitCell
import numpy, gtk, copy
class CreateTube(ImmediateWithMemory):
description = "Create Tube"
menu_info = MenuInfo("default/_Object:tools/_Builder:generate", "_Create tube", order=(0, 4, 1, 6, 1, 0))
authors = [authors.toon_verstraelen]
@staticmethod
def analyze_selection(parameters=None):
if not ImmediateWithMemory.analyze_selection(parameters): return False
if context.application.model.universe.cell_active.sum() != 2: return False
return True
parameters_dialog = FieldsDialogSimple(
"Tube parameters",
fields.group.Table(fields=[
fields.faulty.Int(
label_text="n",
attribute_name="n",
minimum=1,
maximum=100,
),
fields.faulty.Int(
label_text="m",
attribute_name="m",
minimum=0,
maximum=100,
),
fields.edit.CheckButton(
label_text="Flat",
attribute_name="flat",
),
fields.optional.RadioOptional(
fields.group.Table(
label_text="Periodic (along tube axis)",
fields=[
fields.faulty.Length(
label_text="Maximum tube length",
attribute_name="max_length",
low=0.0,
low_inclusive=False,
),
fields.faulty.Length(
label_text="Maximum mismatch",
attribute_name="max_error",
low=0.0,
low_inclusive=False,
),
]
)
),
fields.optional.RadioOptional(
fields.group.Table(
label_text="Not periodic",
fields=[
fields.faulty.Length(
label_text="Tube length",
attribute_name="tube_length",
low=0.0,
low_inclusive=False,
),
]
)
),
]),
((gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL), (gtk.STOCK_OK, gtk.RESPONSE_OK)),
)
@classmethod
def default_parameters(cls):
result = Parameters()
result.n = 5
result.m = 1
result.flat = False
result.max_length = 50*angstrom
result.max_error = 0.01*angstrom
result.tube_length = Undefined(50*angstrom)
return result
def do(self):
# the indices (n,m) that define the tube, see e.g. the wikipedia page
# about nanotubes for the interpretation of these indices:
# http://en.wikipedia.org/wiki/Carbon_nanotube
n = self.parameters.n
m = self.parameters.m
periodic_tube = isinstance(self.parameters.tube_length, Undefined)
universe = context.application.model.universe
def define_flat():
"Reads and converts the unit cell vectors from the current model."
# some parts of the algorithm have been arranged sub functions like
# these, to reduce the number of local variables in self.do. This
# should also clarify the code.
active, inactive = universe.get_active_inactive()
lengths, angles = universe.get_parameters()
a = lengths[active[0]]
b = lengths[active[1]]
theta = angles[inactive[0]]
return (
numpy.array([a, 0], float),
numpy.array([b*numpy.cos(theta), b*numpy.sin(theta)], float)
)
flat_a, flat_b = define_flat()
def create_pattern():
"Read the atom positions and transform them to the flat coordinates"
active, inactive = universe.get_active_inactive()
tmp_cell = UnitCell()
tmp_cell.add_cell_vector(universe.cell[:,active[0]])
tmp_cell.add_cell_vector(universe.cell[:,active[1]])
r = tmp_cell.calc_align_rotation_matrix()
return [
(atom.number, numpy.dot(r, atom.get_absolute_frame().t))
for atom in yield_atoms([universe])
]
pattern = create_pattern()
def define_big_periodic():
"Based on (n,m) calculate the size of the periodic sheet (that will be folded into a tube)."
big_a = n*flat_a - m*flat_b
norm_a = numpy.linalg.norm(big_a)
radius = norm_a/(2*numpy.pi)
big_x = big_a/norm_a
big_y = numpy.array([-big_x[1], big_x[0]], float)
big_b = None
stack_vector = flat_b - flat_a*numpy.dot(big_x, flat_b)/numpy.dot(big_x, flat_a)
stack_length = numpy.linalg.norm(stack_vector)
nominator = numpy.linalg.norm(stack_vector - flat_b)
denominator = numpy.linalg.norm(flat_a)
fraction = nominator/denominator
stack_size = 1
while True:
repeat = fraction*stack_size
if stack_length*stack_size > self.parameters.max_length:
break
if abs(repeat - round(repeat))*denominator < self.parameters.max_error:
big_b = stack_vector*stack_size
break
stack_size += 1
if big_b is None:
raise UserError("Could not create a periodic tube shorter than the given maximum length.")
rotation = numpy.array([big_x, big_y], float)
return big_a, big_b, rotation, stack_vector, stack_size, radius
def define_big_not_periodic():
"Based on (n,m) calculate the size of the non-periodic sheet (that will be folded into a tube)."
big_a = n*flat_a - m*flat_b
norm_a = numpy.linalg.norm(big_a)
radius = norm_a/(2*numpy.pi)
big_x = big_a/norm_a
big_y = numpy.array([-big_x[1], big_x[0]], float)
stack_vector = flat_b - flat_a*numpy.dot(big_x, flat_b)/numpy.dot(big_x, flat_a)
stack_length = numpy.linalg.norm(stack_vector)
stack_size = int(self.parameters.tube_length/stack_length)
big_b = stack_vector*stack_size
rotation = numpy.array([big_x, big_y], float)
return big_a, big_b, rotation, stack_vector, stack_size, radius
if periodic_tube:
big_a, big_b, rotation, stack_vector, stack_size, radius = define_big_periodic()
else:
big_a, big_b, rotation, stack_vector, stack_size, radius = define_big_not_periodic()
def yield_translations():
"Yields the indices of the periodic images that are part of the tube."
to_fractional = numpy.linalg.inv(numpy.array([big_a, big_b]).transpose())
col_len = int(numpy.linalg.norm(big_a + m*stack_vector)/numpy.linalg.norm(flat_a))+4
shift = numpy.dot(stack_vector - flat_b, flat_a)/numpy.linalg.norm(flat_a)**2
for row in xrange(-m-1, stack_size+1):
col_start = int(numpy.floor(row*shift))-1
for col in xrange(col_start, col_start+col_len):
p = col*flat_a + row*flat_b
i = numpy.dot(to_fractional, p)
if (i >= 0).all() and (i < 1-1e-15).all():
yield p
#yield p, (i >= 0).all() and (i < 1).all()
def yield_pattern():
for number, coordinate in pattern:
yield number, coordinate.copy()
# first delete everything the universe:
for child in copy.copy(universe.children):
primitive.Delete(child)
# add the new atoms
Atom = context.application.plugins.get_node("Atom")
if self.parameters.flat:
rot_a = numpy.dot(rotation, big_a)
rot_b = numpy.dot(rotation, big_b)
big_cell = numpy.array([
[rot_a[0], rot_b[0], 0],
[rot_a[1], rot_b[1], 0],
[0, 0, 10*angstrom],
], float)
primitive.SetProperty(universe, "cell", big_cell)
primitive.SetProperty(universe, "cell_active", numpy.array([True, periodic_tube, False], bool))
for p in yield_translations():
for number, coordinate in yield_pattern():
coordinate[:2] += p
coordinate[:2] = numpy.dot(rotation, coordinate[:2])
translation = Translation()
translation.t[:] = coordinate
primitive.Add(Atom(number=number, transformation=translation), universe)
else:
tube_length = numpy.linalg.norm(big_b)
primitive.SetProperty(universe, "cell", numpy.diag([radius*2, radius*2, tube_length]))
primitive.SetProperty(universe, "cell_active", numpy.array([False, False, periodic_tube], bool))
for p in yield_translations():
for number, coordinate in yield_pattern():
coordinate[:2] += p
coordinate[:2] = numpy.dot(rotation, coordinate[:2])
translation = Translation()
translation.t[0] = (radius+coordinate[2])*numpy.cos(coordinate[0]/radius)
translation.t[1] = (radius+coordinate[2])*numpy.sin(coordinate[0]/radius)
translation.t[2] = coordinate[1]
primitive.Add(Atom(number=number, transformation=translation), universe)
actions = {
"CreateTube": CreateTube,
}
| gpl-3.0 |
amyvmiwei/kbengine | kbe/src/lib/python/Lib/distutils/tests/test_config_cmd.py | 95 | 2602 | """Tests for distutils.command.config."""
import unittest
import os
import sys
from test.support import run_unittest
from distutils.command.config import dump_file, config
from distutils.tests import support
from distutils import log
class ConfigTestCase(support.LoggingSilencer,
support.TempdirManager,
unittest.TestCase):
def _info(self, msg, *args):
for line in msg.splitlines():
self._logs.append(line)
def setUp(self):
super(ConfigTestCase, self).setUp()
self._logs = []
self.old_log = log.info
log.info = self._info
def tearDown(self):
log.info = self.old_log
super(ConfigTestCase, self).tearDown()
def test_dump_file(self):
this_file = os.path.splitext(__file__)[0] + '.py'
f = open(this_file)
try:
numlines = len(f.readlines())
finally:
f.close()
dump_file(this_file, 'I am the header')
self.assertEqual(len(self._logs), numlines+1)
@unittest.skipIf(sys.platform == 'win32', "can't test on Windows")
def test_search_cpp(self):
pkg_dir, dist = self.create_dist()
cmd = config(dist)
# simple pattern searches
match = cmd.search_cpp(pattern='xxx', body='/* xxx */')
self.assertEqual(match, 0)
match = cmd.search_cpp(pattern='_configtest', body='/* xxx */')
self.assertEqual(match, 1)
def test_finalize_options(self):
# finalize_options does a bit of transformation
# on options
pkg_dir, dist = self.create_dist()
cmd = config(dist)
cmd.include_dirs = 'one%stwo' % os.pathsep
cmd.libraries = 'one'
cmd.library_dirs = 'three%sfour' % os.pathsep
cmd.ensure_finalized()
self.assertEqual(cmd.include_dirs, ['one', 'two'])
self.assertEqual(cmd.libraries, ['one'])
self.assertEqual(cmd.library_dirs, ['three', 'four'])
def test_clean(self):
# _clean removes files
tmp_dir = self.mkdtemp()
f1 = os.path.join(tmp_dir, 'one')
f2 = os.path.join(tmp_dir, 'two')
self.write_file(f1, 'xxx')
self.write_file(f2, 'xxx')
for f in (f1, f2):
self.assertTrue(os.path.exists(f))
pkg_dir, dist = self.create_dist()
cmd = config(dist)
cmd._clean(f1, f2)
for f in (f1, f2):
self.assertFalse(os.path.exists(f))
def test_suite():
return unittest.makeSuite(ConfigTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| lgpl-3.0 |
GeoscienceAustralia/PyRate | tests/test_algorithm.py | 1 | 8635 | # This Python module is part of the PyRate software package.
#
# Copyright 2020 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Python module contains tests for the algorithm.py PyRate module.
"""
from datetime import date
from math import pi, cos, sin, radians
from numpy import array, reshape, squeeze
from os.path import join
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_allclose
from pyrate.core.algorithm import (least_squares_covariance,
is_square,
unit_vector,
ifg_date_lookup,
get_all_epochs,
get_epochs,
first_second_ids,
factorise_integer,
)
from pyrate.core.config import parse_namelist
from pyrate.core.shared import Ifg, convert_radians_to_mm
from tests.common import small5_mock_ifgs, SML_TEST_TIF, UnitTestAdaptation
class TestLeastSquaresTests(UnitTestAdaptation):
"""
Unit tests for the PyRate least_squares_covariance() implementation.
"""
@staticmethod
def test_least_squares_covariance():
b = array([[13, 7.2, 5.7]]).T
A = array([[1, 0.4, 0.3], [1, 1, 1]]).T
v = array([[1, 1, 1]]).T
r = least_squares_covariance(A, b, v)
exp = [10.1628, 2.8744]
assert_array_almost_equal(r.T.squeeze(), exp, decimal=4)
def test_least_squares_covariance_overdetermined(self):
# must be overdetermined, ie. more observations than params
b = array([[10]]).T
A = array([[1]]).T
v = array([[1]]).T
self.assertRaises(ValueError, least_squares_covariance, A, b, v)
# try non transposed style
b = array([[10]])
A = array([[1]])
v = array([[1]])
self.assertRaises(ValueError, least_squares_covariance, A, b, v)
class TestAlgorithmTests(UnitTestAdaptation):
"""
Misc unittests for functions in the algorithm module.
"""
def test_factorise(self):
self.assertEqual(factorise_integer(1), (1, 1))
self.assertEqual(factorise_integer(2), (2, 1))
self.assertEqual(factorise_integer(4), (2, 2))
self.assertEqual(factorise_integer(9), (3, 3))
self.assertEqual(factorise_integer(76), (4, 19))
self.assertEqual(factorise_integer(76.5), (4, 19))
a, b = factorise_integer(12)
self.assertEqual(type(a), int)
self.assertEqual(type(b), int)
def test_is_square(self):
self.assertTrue(is_square(np.empty((2, 2))))
def test_is_not_square(self):
for shape in [(3, 2), (2, 3)]:
self.assertFalse(is_square(np.empty(shape)))
@staticmethod
def test_phase_conversion():
# ROIPAC interferograms in units of radians, verify conversion to mm
xs, ys = 5, 7
data = (np.arange(xs * ys) - 1.7) * 0.1 # fake a range of values
data = np.where(data == 0, np.nan, data)
wavelen = 0.0562356424
exp = (data * wavelen * 1000) / (4 * pi)
act = convert_radians_to_mm(data, wavelen)
assert_allclose(exp, act)
def test_unit_vector(self):
# last values here simulate a descending pass
incidence = [radians(x) for x in (34.3, 39.3, 29.3, 34.3)]
azimuth = [radians(x) for x in (77.8, 77.9, 80.0, 282.2)]
vert, ns, ew = [], [], []
for i, a in zip(incidence, azimuth):
vert.append(cos(i))
ns.append(sin(i) * cos(a))
ew.append(sin(i) * sin(a))
sh = 4
unitv = [array(ew), array(ns), array(vert)]
unitv = [a.reshape(sh) for a in unitv]
# NB: assumes radian inputs
act = unit_vector(reshape(incidence, sh), reshape(azimuth, sh))
for a, e in zip(act, unitv):
assert_array_almost_equal(squeeze(a), e)
# check unit vec components have correct signs
E, N, V = act
# test E/W component of ascending is +ve
self.assertTrue((E[:-2]).all() > 0)
self.assertTrue(E[-1] < 0) # test E/W component of descending is -ve
self.assertTrue((N > 0).all()) # ensure all north values are positive
# check unit vec components have correct magnitudes
self.assertTrue((abs(V) > abs(E)).all())
self.assertTrue((abs(V) > abs(N)).all())
self.assertTrue((abs(E) > abs(N)).all())
class TestDateLookup(UnitTestAdaptation):
"""
Tests for the algorithm.ifg_date_lookup() function.
"""
@classmethod
def setup_class(cls):
cls.ifgs = small5_mock_ifgs()
def test_ifg_date_lookup(self):
# check reverse lookup of ifg given a first and second date tuple
date_pair = (date(2006, 8, 28), date(2006, 12, 11))
i = ifg_date_lookup(self.ifgs, date_pair)
self.assertEqual(self.ifgs[0], i)
# test with reversed date tuple, should reorder it according to age
date_pair = (date(2006, 12, 11), date(2006, 11, 6))
i = ifg_date_lookup(self.ifgs, date_pair)
self.assertEqual(self.ifgs[1], i)
def test_ifg_date_lookup_failure(self):
# error when lookup cannot find an ifg given a date pair
dates = (date(2006, 12, 11), date(2007, 3, 26))
self.assertRaises(ValueError, ifg_date_lookup, self.ifgs, dates)
def test_date_lookup_bad_inputs(self):
# test some bad inputs to date lookup
inputs = [(None, None), (1, 10), (34.56, 345.93),
(date(2007, 3, 26), ""), (date(2007, 3, 26), None)]
for d in inputs:
self.assertRaises(ValueError, ifg_date_lookup, self.ifgs, d)
# TODO: InitialModelTests
#class InitialModelTests(unittest.TestCase):
# def test_initial_model(self):
# 1. fake an RSC file with coords
# 2. fake a ones(shape) # could also make a ramp etc
# data is single band of DISPLACEMENT
#raise NotImplementedError
class TestEpochs(UnitTestAdaptation):
"""
Unittests for the EpochList class.
"""
def test_get_epochs(self):
def str2date(s):
segs = s[:4], s[4:6], s[6:] # year, month, day
return date(*[int(sg) for sg in segs])
raw_date = ['20060619', '20060828', '20061002', '20061106', '20061211',
'20070115', '20070219', '20070326', '20070430', '20070604',
'20070709', '20070813', '20070917']
exp_dates = [str2date(d) for d in raw_date]
exp_repeat = [1, 1, 3, 3, 4, 3, 3, 3, 3, 3, 3, 2, 2]
exp_spans = [0, 0.1916, 0.2875, 0.3833, 0.4791, 0.5749, 0.6708, 0.7666,
0.8624, 0.9582, 1.0541, 1.1499, 1.2457]
ifms = join(SML_TEST_TIF, "ifms_17")
ifgs = [Ifg(join(SML_TEST_TIF, p)) for p in parse_namelist(ifms)]
for i in ifgs:
i.open()
epochs = get_epochs(ifgs)[0]
self.assertTrue((exp_dates == epochs.dates).all())
self.assertTrue((exp_repeat == epochs.repeat).all())
assert_array_almost_equal(exp_spans, epochs.spans, decimal=4)
def test_get_all_epochs(self):
# test function to extract all dates from sequence of ifgs
ifgs = small5_mock_ifgs()
for i in ifgs:
i.nodata_value = 0
dates = [date(2006, 8, 28), date(2006, 11, 6), date(2006, 12, 11),
date(2007, 1, 15), date(2007, 3, 26), date(2007, 9, 17)]
self.assertEqual(dates, sorted(set(get_all_epochs(ifgs))))
def test_get_epoch_count(self):
self.assertEqual(6, len(set(get_all_epochs(small5_mock_ifgs()))))
def test_first_second_ids(self):
d0 = date(2006, 6, 19)
d1 = date(2006, 8, 28)
d2 = date(2006, 10, 2)
d3 = date(2006, 11, 6)
exp = {d0: 0, d1: 1, d2: 2, d3: 3}
# test unordered and with duplicates
self.assertEqual(exp, first_second_ids([d3, d0, d2, d1]))
self.assertEqual(exp, first_second_ids([d3, d0, d2, d1, d3, d0]))
| apache-2.0 |
commshare/testLiveSRS | trunk/objs/CherryPy-3.2.4/cherrypy/process/plugins.py | 37 | 25462 | """Site services for use with a Web Site Process Bus."""
import os
import re
import signal as _signal
import sys
import time
import threading
from cherrypy._cpcompat import basestring, get_daemon, get_thread_ident, ntob, set, Timer, SetDaemonProperty
# _module__file__base is used by Autoreload to make
# absolute any filenames retrieved from sys.modules which are not
# already absolute paths. This is to work around Python's quirk
# of importing the startup script and using a relative filename
# for it in sys.modules.
#
# Autoreload examines sys.modules afresh every time it runs. If an application
# changes the current directory by executing os.chdir(), then the next time
# Autoreload runs, it will not be able to find any filenames which are
# not absolute paths, because the current directory is not the same as when the
# module was first imported. Autoreload will then wrongly conclude the file has
# "changed", and initiate the shutdown/re-exec sequence.
# See ticket #917.
# For this workaround to have a decent probability of success, this module
# needs to be imported as early as possible, before the app has much chance
# to change the working directory.
_module__file__base = os.getcwd()
class SimplePlugin(object):
"""Plugin base class which auto-subscribes methods for known channels."""
bus = None
"""A :class:`Bus <cherrypy.process.wspbus.Bus>`, usually cherrypy.engine."""
def __init__(self, bus):
self.bus = bus
def subscribe(self):
"""Register this object as a (multi-channel) listener on the bus."""
for channel in self.bus.listeners:
# Subscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.subscribe(channel, method)
def unsubscribe(self):
"""Unregister this object as a listener on the bus."""
for channel in self.bus.listeners:
# Unsubscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.unsubscribe(channel, method)
class SignalHandler(object):
"""Register bus channels (and listeners) for system signals.
You can modify what signals your application listens for, and what it does
when it receives signals, by modifying :attr:`SignalHandler.handlers`,
a dict of {signal name: callback} pairs. The default set is::
handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
The :func:`SignalHandler.handle_SIGHUP`` method calls
:func:`bus.restart()<cherrypy.process.wspbus.Bus.restart>`
if the process is daemonized, but
:func:`bus.exit()<cherrypy.process.wspbus.Bus.exit>`
if the process is attached to a TTY. This is because Unix window
managers tend to send SIGHUP to terminal windows when the user closes them.
Feel free to add signals which are not available on every platform. The
:class:`SignalHandler` will ignore errors raised from attempting to register
handlers for unknown signals.
"""
handlers = {}
"""A map from signal names (e.g. 'SIGTERM') to handlers (e.g. bus.exit)."""
signals = {}
"""A map from signal numbers to names."""
for k, v in vars(_signal).items():
if k.startswith('SIG') and not k.startswith('SIG_'):
signals[v] = k
del k, v
def __init__(self, bus):
self.bus = bus
# Set default handlers
self.handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
if sys.platform[:4] == 'java':
del self.handlers['SIGUSR1']
self.handlers['SIGUSR2'] = self.bus.graceful
self.bus.log("SIGUSR1 cannot be set on the JVM platform. "
"Using SIGUSR2 instead.")
self.handlers['SIGINT'] = self._jython_SIGINT_handler
self._previous_handlers = {}
def _jython_SIGINT_handler(self, signum=None, frame=None):
# See http://bugs.jython.org/issue1313
self.bus.log('Keyboard Interrupt: shutting down bus')
self.bus.exit()
def subscribe(self):
"""Subscribe self.handlers to signals."""
for sig, func in self.handlers.items():
try:
self.set_handler(sig, func)
except ValueError:
pass
def unsubscribe(self):
"""Unsubscribe self.handlers from signals."""
for signum, handler in self._previous_handlers.items():
signame = self.signals[signum]
if handler is None:
self.bus.log("Restoring %s handler to SIG_DFL." % signame)
handler = _signal.SIG_DFL
else:
self.bus.log("Restoring %s handler %r." % (signame, handler))
try:
our_handler = _signal.signal(signum, handler)
if our_handler is None:
self.bus.log("Restored old %s handler %r, but our "
"handler was not registered." %
(signame, handler), level=30)
except ValueError:
self.bus.log("Unable to restore %s handler %r." %
(signame, handler), level=40, traceback=True)
def set_handler(self, signal, listener=None):
"""Subscribe a handler for the given signal (number or name).
If the optional 'listener' argument is provided, it will be
subscribed as a listener for the given signal's channel.
If the given signal name or number is not available on the current
platform, ValueError is raised.
"""
if isinstance(signal, basestring):
signum = getattr(_signal, signal, None)
if signum is None:
raise ValueError("No such signal: %r" % signal)
signame = signal
else:
try:
signame = self.signals[signal]
except KeyError:
raise ValueError("No such signal: %r" % signal)
signum = signal
prev = _signal.signal(signum, self._handle_signal)
self._previous_handlers[signum] = prev
if listener is not None:
self.bus.log("Listening for %s." % signame)
self.bus.subscribe(signame, listener)
def _handle_signal(self, signum=None, frame=None):
"""Python signal handler (self.set_handler subscribes it for you)."""
signame = self.signals[signum]
self.bus.log("Caught signal %s." % signame)
self.bus.publish(signame)
def handle_SIGHUP(self):
"""Restart if daemonized, else exit."""
if os.isatty(sys.stdin.fileno()):
# not daemonized (may be foreground or background)
self.bus.log("SIGHUP caught but not daemonized. Exiting.")
self.bus.exit()
else:
self.bus.log("SIGHUP caught while daemonized. Restarting.")
self.bus.restart()
try:
import pwd, grp
except ImportError:
pwd, grp = None, None
class DropPrivileges(SimplePlugin):
"""Drop privileges. uid/gid arguments not available on Windows.
Special thanks to Gavin Baker: http://antonym.org/node/100.
"""
def __init__(self, bus, umask=None, uid=None, gid=None):
SimplePlugin.__init__(self, bus)
self.finalized = False
self.uid = uid
self.gid = gid
self.umask = umask
def _get_uid(self):
return self._uid
def _set_uid(self, val):
if val is not None:
if pwd is None:
self.bus.log("pwd module not available; ignoring uid.",
level=30)
val = None
elif isinstance(val, basestring):
val = pwd.getpwnam(val)[2]
self._uid = val
uid = property(_get_uid, _set_uid,
doc="The uid under which to run. Availability: Unix.")
def _get_gid(self):
return self._gid
def _set_gid(self, val):
if val is not None:
if grp is None:
self.bus.log("grp module not available; ignoring gid.",
level=30)
val = None
elif isinstance(val, basestring):
val = grp.getgrnam(val)[2]
self._gid = val
gid = property(_get_gid, _set_gid,
doc="The gid under which to run. Availability: Unix.")
def _get_umask(self):
return self._umask
def _set_umask(self, val):
if val is not None:
try:
os.umask
except AttributeError:
self.bus.log("umask function not available; ignoring umask.",
level=30)
val = None
self._umask = val
umask = property(_get_umask, _set_umask,
doc="""The default permission mode for newly created files and directories.
Usually expressed in octal format, for example, ``0644``.
Availability: Unix, Windows.
""")
def start(self):
# uid/gid
def current_ids():
"""Return the current (uid, gid) if available."""
name, group = None, None
if pwd:
name = pwd.getpwuid(os.getuid())[0]
if grp:
group = grp.getgrgid(os.getgid())[0]
return name, group
if self.finalized:
if not (self.uid is None and self.gid is None):
self.bus.log('Already running as uid: %r gid: %r' %
current_ids())
else:
if self.uid is None and self.gid is None:
if pwd or grp:
self.bus.log('uid/gid not set', level=30)
else:
self.bus.log('Started as uid: %r gid: %r' % current_ids())
if self.gid is not None:
os.setgid(self.gid)
os.setgroups([])
if self.uid is not None:
os.setuid(self.uid)
self.bus.log('Running as uid: %r gid: %r' % current_ids())
# umask
if self.finalized:
if self.umask is not None:
self.bus.log('umask already set to: %03o' % self.umask)
else:
if self.umask is None:
self.bus.log('umask not set', level=30)
else:
old_umask = os.umask(self.umask)
self.bus.log('umask old: %03o, new: %03o' %
(old_umask, self.umask))
self.finalized = True
# This is slightly higher than the priority for server.start
# in order to facilitate the most common use: starting on a low
# port (which requires root) and then dropping to another user.
start.priority = 77
class Daemonizer(SimplePlugin):
"""Daemonize the running script.
Use this with a Web Site Process Bus via::
Daemonizer(bus).subscribe()
When this component finishes, the process is completely decoupled from
the parent environment. Please note that when this component is used,
the return code from the parent process will still be 0 if a startup
error occurs in the forked children. Errors in the initial daemonizing
process still return proper exit codes. Therefore, if you use this
plugin to daemonize, don't use the return code as an accurate indicator
of whether the process fully started. In fact, that return code only
indicates if the process succesfully finished the first fork.
"""
def __init__(self, bus, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null'):
SimplePlugin.__init__(self, bus)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.finalized = False
def start(self):
if self.finalized:
self.bus.log('Already deamonized.')
# forking has issues with threads:
# http://www.opengroup.org/onlinepubs/000095399/functions/fork.html
# "The general problem with making fork() work in a multi-threaded
# world is what to do with all of the threads..."
# So we check for active threads:
if threading.activeCount() != 1:
self.bus.log('There are %r active threads. '
'Daemonizing now may cause strange failures.' %
threading.enumerate(), level=30)
# See http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
# (or http://www.faqs.org/faqs/unix-faq/programmer/faq/ section 1.7)
# and http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
# Finish up with the current stdout/stderr
sys.stdout.flush()
sys.stderr.flush()
# Do first fork.
try:
pid = os.fork()
if pid == 0:
# This is the child process. Continue.
pass
else:
# This is the first parent. Exit, now that we've forked.
self.bus.log('Forking once.')
os._exit(0)
except OSError:
# Python raises OSError rather than returning negative numbers.
exc = sys.exc_info()[1]
sys.exit("%s: fork #1 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.setsid()
# Do second fork
try:
pid = os.fork()
if pid > 0:
self.bus.log('Forking twice.')
os._exit(0) # Exit second parent
except OSError:
exc = sys.exc_info()[1]
sys.exit("%s: fork #2 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.chdir("/")
os.umask(0)
si = open(self.stdin, "r")
so = open(self.stdout, "a+")
se = open(self.stderr, "a+")
# os.dup2(fd, fd2) will close fd2 if necessary,
# so we don't explicitly close stdin/out/err.
# See http://docs.python.org/lib/os-fd-ops.html
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
self.bus.log('Daemonized to PID: %s' % os.getpid())
self.finalized = True
start.priority = 65
class PIDFile(SimplePlugin):
"""Maintain a PID file via a WSPBus."""
def __init__(self, bus, pidfile):
SimplePlugin.__init__(self, bus)
self.pidfile = pidfile
self.finalized = False
def start(self):
pid = os.getpid()
if self.finalized:
self.bus.log('PID %r already written to %r.' % (pid, self.pidfile))
else:
open(self.pidfile, "wb").write(ntob("%s" % pid, 'utf8'))
self.bus.log('PID %r written to %r.' % (pid, self.pidfile))
self.finalized = True
start.priority = 70
def exit(self):
try:
os.remove(self.pidfile)
self.bus.log('PID file removed: %r.' % self.pidfile)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
class PerpetualTimer(Timer):
"""A responsive subclass of threading.Timer whose run() method repeats.
Use this timer only when you really need a very interruptible timer;
this checks its 'finished' condition up to 20 times a second, which can
results in pretty high CPU usage
"""
def __init__(self, *args, **kwargs):
"Override parent constructor to allow 'bus' to be provided."
self.bus = kwargs.pop('bus', None)
super(PerpetualTimer, self).__init__(*args, **kwargs)
def run(self):
while True:
self.finished.wait(self.interval)
if self.finished.isSet():
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
if self.bus:
self.bus.log(
"Error in perpetual timer thread function %r." %
self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class BackgroundTask(SetDaemonProperty, threading.Thread):
"""A subclass of threading.Thread whose run() method repeats.
Use this class for most repeating tasks. It uses time.sleep() to wait
for each interval, which isn't very responsive; that is, even if you call
self.cancel(), you'll have to wait until the sleep() call finishes before
the thread stops. To compensate, it defaults to being daemonic, which means
it won't delay stopping the whole process.
"""
def __init__(self, interval, function, args=[], kwargs={}, bus=None):
threading.Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.running = False
self.bus = bus
# default to daemonic
self.daemon = True
def cancel(self):
self.running = False
def run(self):
self.running = True
while self.running:
time.sleep(self.interval)
if not self.running:
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
if self.bus:
self.bus.log("Error in background task thread function %r."
% self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class Monitor(SimplePlugin):
"""WSPBus listener to periodically run a callback in its own thread."""
callback = None
"""The function to call at intervals."""
frequency = 60
"""The time in seconds between callback runs."""
thread = None
"""A :class:`BackgroundTask<cherrypy.process.plugins.BackgroundTask>` thread."""
def __init__(self, bus, callback, frequency=60, name=None):
SimplePlugin.__init__(self, bus)
self.callback = callback
self.frequency = frequency
self.thread = None
self.name = name
def start(self):
"""Start our callback in its own background thread."""
if self.frequency > 0:
threadname = self.name or self.__class__.__name__
if self.thread is None:
self.thread = BackgroundTask(self.frequency, self.callback,
bus = self.bus)
self.thread.setName(threadname)
self.thread.start()
self.bus.log("Started monitor thread %r." % threadname)
else:
self.bus.log("Monitor thread %r already started." % threadname)
start.priority = 70
def stop(self):
"""Stop our callback's background task thread."""
if self.thread is None:
self.bus.log("No thread running for %s." % self.name or self.__class__.__name__)
else:
if self.thread is not threading.currentThread():
name = self.thread.getName()
self.thread.cancel()
if not get_daemon(self.thread):
self.bus.log("Joining %r" % name)
self.thread.join()
self.bus.log("Stopped thread %r." % name)
self.thread = None
def graceful(self):
"""Stop the callback's background task thread and restart it."""
self.stop()
self.start()
class Autoreloader(Monitor):
"""Monitor which re-executes the process when files change.
This :ref:`plugin<plugins>` restarts the process (via :func:`os.execv`)
if any of the files it monitors change (or is deleted). By default, the
autoreloader monitors all imported modules; you can add to the
set by adding to ``autoreload.files``::
cherrypy.engine.autoreload.files.add(myFile)
If there are imported files you do *not* wish to monitor, you can adjust the
``match`` attribute, a regular expression. For example, to stop monitoring
cherrypy itself::
cherrypy.engine.autoreload.match = r'^(?!cherrypy).+'
Like all :class:`Monitor<cherrypy.process.plugins.Monitor>` plugins,
the autoreload plugin takes a ``frequency`` argument. The default is
1 second; that is, the autoreloader will examine files once each second.
"""
files = None
"""The set of files to poll for modifications."""
frequency = 1
"""The interval in seconds at which to poll for modified files."""
match = '.*'
"""A regular expression by which to match filenames."""
def __init__(self, bus, frequency=1, match='.*'):
self.mtimes = {}
self.files = set()
self.match = match
Monitor.__init__(self, bus, self.run, frequency)
def start(self):
"""Start our own background task thread for self.run."""
if self.thread is None:
self.mtimes = {}
Monitor.start(self)
start.priority = 70
def sysfiles(self):
"""Return a Set of sys.modules filenames to monitor."""
files = set()
for k, m in sys.modules.items():
if re.match(self.match, k):
if hasattr(m, '__loader__') and hasattr(m.__loader__, 'archive'):
f = m.__loader__.archive
else:
f = getattr(m, '__file__', None)
if f is not None and not os.path.isabs(f):
# ensure absolute paths so a os.chdir() in the app doesn't break me
f = os.path.normpath(os.path.join(_module__file__base, f))
files.add(f)
return files
def run(self):
"""Reload the process if registered files have been modified."""
for filename in self.sysfiles() | self.files:
if filename:
if filename.endswith('.pyc'):
filename = filename[:-1]
oldtime = self.mtimes.get(filename, 0)
if oldtime is None:
# Module with no .py file. Skip it.
continue
try:
mtime = os.stat(filename).st_mtime
except OSError:
# Either a module with no .py file, or it's been deleted.
mtime = None
if filename not in self.mtimes:
# If a module has no .py file, this will be None.
self.mtimes[filename] = mtime
else:
if mtime is None or mtime > oldtime:
# The file has been deleted or modified.
self.bus.log("Restarting because %s changed." % filename)
self.thread.cancel()
self.bus.log("Stopped thread %r." % self.thread.getName())
self.bus.restart()
return
class ThreadManager(SimplePlugin):
"""Manager for HTTP request threads.
If you have control over thread creation and destruction, publish to
the 'acquire_thread' and 'release_thread' channels (for each thread).
This will register/unregister the current thread and publish to
'start_thread' and 'stop_thread' listeners in the bus as needed.
If threads are created and destroyed by code you do not control
(e.g., Apache), then, at the beginning of every HTTP request,
publish to 'acquire_thread' only. You should not publish to
'release_thread' in this case, since you do not know whether
the thread will be re-used or not. The bus will call
'stop_thread' listeners for you when it stops.
"""
threads = None
"""A map of {thread ident: index number} pairs."""
def __init__(self, bus):
self.threads = {}
SimplePlugin.__init__(self, bus)
self.bus.listeners.setdefault('acquire_thread', set())
self.bus.listeners.setdefault('start_thread', set())
self.bus.listeners.setdefault('release_thread', set())
self.bus.listeners.setdefault('stop_thread', set())
def acquire_thread(self):
"""Run 'start_thread' listeners for the current thread.
If the current thread has already been seen, any 'start_thread'
listeners will not be run again.
"""
thread_ident = get_thread_ident()
if thread_ident not in self.threads:
# We can't just use get_ident as the thread ID
# because some platforms reuse thread ID's.
i = len(self.threads) + 1
self.threads[thread_ident] = i
self.bus.publish('start_thread', i)
def release_thread(self):
"""Release the current thread and run 'stop_thread' listeners."""
thread_ident = get_thread_ident()
i = self.threads.pop(thread_ident, None)
if i is not None:
self.bus.publish('stop_thread', i)
def stop(self):
"""Release all threads and run all 'stop_thread' listeners."""
for thread_ident, i in self.threads.items():
self.bus.publish('stop_thread', i)
self.threads.clear()
graceful = stop
| mit |
jiangzhuo/kbengine | kbe/tools/xlsx2py/xlsx2py/xlsx2py.py | 29 | 15312 | # -*- coding: gb2312 -*-
import sys
import re
import os
import string
import signal
import time
import codecs
import json
from ExcelTool import ExcelTool
from config import *
import functions
try:
import character
except:
character = functions
import xlsxtool
import xlsxError
import copy
SYS_CODE = sys.getdefaultencoding()
def siginit(sigNum, sigHandler):
print("byebye")
sys.exit(1)
signal.signal(signal.SIGINT, siginit) #Ctrl-c´¦Àí
def hasFunc(funcName):
return hasattr(character, funcName) or hasattr(functions, funcName)
def getFunc(funcName):
if hasattr(character, funcName):
return getattr(character, funcName)
return getattr(functions, funcName)
g_dctDatas = {}
g_fdatas = {}
class xlsx2py(object):
"""
½«excelÊý¾Ýµ¼³öΪpyÎļþ ʹÓùý³ÌÐèÒª½øÐбàÂëת»»
"""
def __init__(self, infile, outfile):
sys.excepthook = xlsxError.except_hook #traceback´¦Àí,Ï£ÍûÊä³öÖÐÎÄ
self.infile = os.path.abspath(infile) #ÔÝ´æexcelÎļþÃû
self.outfile = os.path.abspath(outfile) #dataÎļþÃû
return
def __initXlsx(self):
self.xbook = ExcelTool(self.infile)
while not self.xbook.getWorkbook(forcedClose = True):
xlsxtool.exportMenu(EXPORT_INFO_RTEXCEL, OCallback = self.resetXlsx)
def resetXlsx(self):
"""
ÊäÈëO(other)µÄ»Øµ÷
¹Ø±ÕÒÑ´ò¿ªµÄexcel£¬È»ºóÖØÐ´ò¿ª
"""
self.xbook.getWorkbook(forcedClose = True)
def __initInfo(self):
self.__exportSheetIndex = [] #´æ´¢¿Éµ¼±íµÄË÷Òý
self.headerDict = {} #µ¼³ö±íµÚÒ»ÐÐתΪ×Öµä
self.mapDict = {} #´ú¶Ô±íÉú³ÉµÄ×Öµä(µÚÒ»ÐÐÊÇ´ú¶Ô±í˵Ã÷ºöÂÔ)
#####################Ö´ÐÐÖ÷Ìâ##########################
def run(self):
"""
´øÓÐ$µÄÁÐÊý¾ÝÐèÒª´ú¶Ô±í,Ê×ÏÈÉú³É´ú¶Ô×Öµä
"""
self.__initXlsx() #³õʼexcelÏà¹Ø
self.__initInfo() #³õʼµ¼±íÏà¹Ø
self.openFile()
self.sth4Nth() #½øÈëÏÂÒ»¸ö½×¶Î
self.constructMapDict() #Éú³É´ú¶Ô×Öµä
self.__onRun()
def __onRun(self):
self.writeLines = 0 #¼Ç¼ÒÑдÈëµÄexcelµÄÐÐÊý
self.parseDefineLine() #·ÖÎöÎļþ
###############ѰÕÒ´ú¶Ô±íºÍ±ê¼Çµ¼ÈëµÄ±í##################
def sth4Nth(self):
"""
something for nothing, ´ú¶Ô±íºÍµ¼Èë±íÐèÒªÓÐ
"""
for index in range(1, self.xbook.getSheetCount() + 1):
sheetName = self.xbook.getSheetNameByIndex(index)
if sheetName == EXPORT_MAP_SHEET:
self.__onFindMapSheet(index)
if sheetName.startswith(EXPORT_PREFIX_CHAR):
self.__onFindExportSheet(index)
self.onSth4Nth()
def onSth4Nth(self):
"""
"""
if not hasattr(self, 'mapIndex'):
self.xlsxClear(EXPORT_ERROR_NOMAP)
if len(self.__exportSheetIndex) == 0:
xlsxError.error_input(EXPORT_ERROR_NOSHEET)
return
def __onFindMapSheet(self, mapIndex):
self.mapIndex = mapIndex
return
def __onFindExportSheet(self, Eindex):
"""
Íê±Ï
"""
self.__exportSheetIndex.append(Eindex)
def constructMapDict(self):
"""
Éú³É´ú¶Ô×ֵ䣬 ´ú¶Ô±íÖ»ÓÐÒ»¸ö
"""
mapDict = {}
sheet = self.xbook.getSheetByIndex(self.mapIndex)
if not sheet:
return
for col in range(0, self.xbook.getRowCount(self.mapIndex)):
colValues = self.xbook.getColValues(sheet, col)
if colValues:
for v in [e for e in colValues[1:] if e[0] and isinstance(e[0], str) and e[0].strip()]:
print (v)
mapStr = v[0].replace('£º', ":") #ÖÐÎÄ"£º"ºÍ":"
try:
k, v = mapStr.split(":")
k = str.strip(k)
v = str.strip(v)
mapDict[k] = v
except Exception as errstr:
print( "waring£ºÐèÒª¼ì²é´ú¶Ô±í µÚ%dÁÐ, err=%s"%(col , errstr))
self.__onConstruct(mapDict)
return
def __onConstruct(self, mapDict):
"""
´ú¶Ô×ÖµäÉú³ÉÍê±Ï
"""
self.mapDict = mapDict
return
#####################ÎļþÍ·¼ì²â#######################
def parseDefineLine(self):
self.__checkDefine() #¼ì²é¶¨ÒåÊÇ·ñÕýÈ·
self.__checkData() #¼ì²éÊý¾ÝÊÇ·ñ·ûºÏ¹æÔò
def __reCheck(self, head):
pattern = "(\w+)(\[.*])(\[\w+\])"
reGroups =re.compile(pattern).match(head)
if not reGroups:
return ()
return reGroups.groups()
def __convertKeyName(self, name):
try:
tname = eval(name)
except:
pass
else:
if type(tname) == int or type(tname) == float:
return tname
return name
def __checkDefine(self):
"""
µÚÒ»ÐеĸöÔªËØÊÇ·ñ·ûºÏ¶¨Òå¸ñʽ"name[signs][func]"ÒÔ¼°keyÊÇ·ñ·ûºÏ¹æ¶¨
"""
print( "¼ì²âÎļþÍ·(µÚÒ»ÐÐ)ÊÇ·ñÕýÈ·" )
for index in self.__exportSheetIndex:
self.sheetKeys = []
headList = self.xbook.getRowValues(self.xbook.getSheetByIndex(index), EXPORT_DEFINE_ROW -1 )
enName = [] #¼ì²éÃüÃûÖØ¸´ÁÙʱ±äÁ¿
reTuples = []
self.headerDict[index] = {}
for c, head in enumerate(headList):
if head is None or head.strip() == '': #µ¼³ö±íµÄµÚÒ»ÐÐNone, ÔòÕâÒ»Áн«±»ºöÂÔ
self.__onCheckSheetHeader(self.headerDict[index], c, None)
continue
reTuple = self.__reCheck(head)
if len(reTuple) == 3: #¶¨Òå±»·Ö²ðΪÈý²¿·Ö:name, signs, func,signs¿ÉÒÔÊÇ¿Õ
name, signs, funcName = reTuple[0], reTuple[1][1:-1], reTuple[2][1:-1]
name = self.__convertKeyName(name)
for s in signs: #·ûºÅ¶¨ÒåÊÇ·ñÔÚ¹æÔòÖ®ÄÚ
if s not in EXPORT_ALL_SIGNS:
self.xlsxClear(EXPORT_ERROR_NOSIGN, (EXPORT_DEFINE_ROW, c+1))
if EXPORT_SIGN_GTH in signs: #ÊÇ·ñΪkey
self.sheetKeys.append(c)
if len(self.sheetKeys) > EXPORT_KEY_NUMS: #keyÊÇ·ñ³¬¹ý¹æ¶¨µÄ¸öÊý
self.xlsxClear(EXPORT_ERROR_NUMKEY, (EXPORT_DEFINE_ROW, c+1))
if name not in enName: #name²»ÄÜÖØ¸´
enName.append(name)
else:
self.xlsxClear(EXPORT_ERROR_REPEAT, \
(self.xbook.getSheetNameByIndex(index).encode(FILE_CODE), EXPORT_DEFINE_ROW, c+1))
if not hasFunc(funcName): #funcNameÊÇ·ñ´æÔÚ
self.xlsxClear(EXPORT_ERROR_NOFUNC, (xlsxtool.toGBK(funcName), c+1))
else:
self.xlsxClear(EXPORT_ERROR_HEADER, (self.xbook.getSheetNameByIndex(index).encode(FILE_CODE), EXPORT_DEFINE_ROW, c+1))
self.__onCheckSheetHeader(self.headerDict[index], c, (name, signs, funcName)) #¶¨ÒåÒ»Ðо³£Ê¹ÓôæÆðÀ´ÁË
self.__onCheckDefine()
return
def __onCheckSheetHeader(self, DataDict, col, headerInfo):
DataDict[col] = headerInfo
def __onCheckDefine(self):
if len(self.sheetKeys) != EXPORT_KEY_NUMS: #keyÒ²²»ÄÜÉÙ
self.xlsxClear(EXPORT_ERROR_NOKEY, ("ÐèÒª%d¶øÖ»ÓÐ%d"%(EXPORT_KEY_NUMS,len(self.sheetKeys))))
print( "ÎļþÍ·¼ì²âÕýÈ·", time.ctime(time.time()) )
def sheetIndex2Data(self):
self.sheet2Data = {}
for index in self.__exportSheetIndex:
SheetName = self.xbook.getSheetNameByIndex(index)
sheetName = SheetName[SheetName.find(EXPORT_PREFIX_CHAR)+1:]
if sheetName in self.mapDict:
dataName = self.mapDict[sheetName]
if dataName in self.sheet2Data:
self.sheet2Data[dataName].append(index)
else:
self.sheet2Data[dataName] = [index]
def __checkData(self):
"""
ÁÐÊý¾ÝÊÇ·ñ·ûºÏÃüÃû¹æ·¶, Éú³ÉËùÐè×Öµä
"""
self.sheetIndex2Data()
self.dctDatas = g_dctDatas
self.hasExportedSheet = []
for dataName, indexList in self.sheet2Data.items():
self.curIndexMax = len(indexList)
self.curProIndex = []
for index in indexList:
sheet = self.xbook.getSheetByIndex(index)
self.curProIndex.append(index)
cols = self.xbook.getRowCount(index)
rows = self.xbook.getColCount(index)
if dataName not in self.dctDatas:
self.dctDatas[dataName] = {}
self.dctData = self.dctDatas[dataName]
for row in range(3, rows + 1):
childDict = {}
for col in range(1, cols + 1):
val = (self.xbook.getText(sheet, row, col),)
if self.headerDict[index][col-1] is None:
continue
name, sign, funcName = self.headerDict[index][col-1]
if '$' in sign and len(val[0]) > 0:
self.needReplace({'v':val[0], "pos":(row, col)})
v = self.mapDict[xlsxtool.GTOUC(xlsxtool.val2Str(val[0]))] #mapDict:keyÊÇunicode.key¶¼Òª×ª³Éunicode
else:
v = val[0]
if EXPORT_SIGN_DOT in sign and v is None:
self.xlsxClear(EXPORT_ERROR_NOTNULL, (col, row))
try:
sv = v#xlsxtool.toGBK(v)
except:
sv = v
func = getFunc(funcName)
try:
v = func(self.mapDict, self.dctData, childDict, sv)
except Exception as errstr:
self.xlsxClear(EXPORT_ERROR_FUNC, (errstr, funcName, sv, row, col))
for ss in sign.replace('$',''):
EXPORT_SIGN[ss](self,{"v":v,"pos":(row, col)})
#if isinstance(v, (isinstance, unicode)):
# try:
# v = v.decode("gb2312").encode("utf-8")
# except:
# pass
childDict[name] = v
print( "µ±Ç°:%i/%i" % (row, rows) )
self.dctData[self.tempKeys[-1]] = copy.deepcopy(childDict)
self.writeHead()
overFunc = self.mapDict.get('overFunc')
if overFunc is not None:
func = getFunc(overFunc)
self.dctData = func(self.mapDict, self.dctDatas, self.dctData, dataName)
self.dctDatas[dataName] = self.dctData
g_dctDatas.update(self.dctDatas)
self.__onCheckSheet()
self.__onCheckData()
self.writeFoot()
def __onCheckSheet(self):
if hasattr(self, "tempKeys"):
del self.tempKeys
return
def __onCheckData(self):
self.exportSheet()
##############·ûºÅ×ÖµäµÄÏà¹ØÉèÖÃEXPORT_SIGN###################
def isNotEmpty(self, cellData):
if cellData['v'] is None:
self.xlsxClear(EXPORT_ERROR_NOTNULL, (cellData['pos'], ))
def needReplace(self, cellData):
"""ºêÌæ´ú"""
v = cellData["v"].strip()
if isinstance(v, float): #·ÀÖ¹Êý×Ö±¨´í(1:string) mapDict ÊÇunicode×Ö·û´®
v = str(int(v))
if v not in self.mapDict: #¼ì²â¶ø²»Ìæ»»
self.xlsxClear(EXPORT_ERROR_NOTMAP, (cellData['pos'], v))
def isKey(self, cellData):
if not hasattr(self, "tempKeys"):
self.tempKeys = []
if cellData['v'] not in self.tempKeys:
self.tempKeys.append(cellData['v'])
else:
self.xlsxClear(EXPORT_ERROR_REPKEY, (cellData['pos'], \
(self.tempKeys.index(cellData['v'])+3, cellData['pos'][1] ), cellData['v']) )
###############export to py²¿·Ö######################
def exportSheet(self):
"""
µ¼³ö
"""
self.__onExportSheet()
return
def __onExportSheet(self):
"""
Êý¾Ýת³ÉpyÎļþ
"""
self.writeXLSX2PY()
return
def openFile(self):
"""
ÎļþĿ¼´´½¨
"""
dirPath = os.path.split(self.outfile)[0]
if not os.path.isdir(dirPath):
try:
xlsxtool.createDir(dirPath)
except:
self.xlsxClear(EXPORT_ERROR_CPATH, (dirPath, ))
try:
fileHandler = codecs.open(self.outfile, "w+",'utf-8')
#fileHandler = open(self.outfile, "w+")
except:
self.xlsxClear(EXPORT_ERROR_FILEOPEN, (self.outfile, ))
self.__onOpenFile(fileHandler) #Ŀ¼´´½¨³É¹¦,Îļþ´ò¿ª
return
def __onOpenFile(self, fileHandler):
"""
pyÎļþ´ò¿ªÁË,¿ÉÒÔдÎļþÁË
"""
self.fileName = self.outfile
self.fileHandler = fileHandler
del self.outfile
def xlsxWrite(self, stream):
"""
дÈëdataÎļþ
"""
if not hasattr(self, "fileHandler"):
self.xlsxClear(EXPORT_ERROR_FILEOPEN, ())
try:
self.fileHandler.write(stream)
except Exception as errstr:
self.xlsxClear(EXPORT_ERROR_IOOP, (errstr))
def writeXLSX2PY(self):
"""
Îļþ ǰ¼¸ÐÐÎÄ×Ö
"""
self.writeBody()
return
def writeHead(self):
print( "¿ªÊ¼Ð´ÈëÎļþ:", time.ctime(time.time()) )
try:
SheetName = self.xbook.getSheetNameByIndex(self.curProIndex[-1])
except:
print( "»ñÈ¡±íµÄÃû×Ö³ö´í" )
sheetName = SheetName[SheetName.find(EXPORT_PREFIX_CHAR)+1:]
if sheetName in self.mapDict:
dataName = self.mapDict[sheetName]
self.hasExportedSheet.append(self.curProIndex[-1])
else:
self.xlsxClear(2, (sheetName.encode(FILE_CODE),))
stream = ""
dataFileInfo = (self.infile + '.' + SheetName).encode("UTF-8")
if len(self.hasExportedSheet) <= 1:
stream = EXPORT_DATA_HEAD
globalDefs = self.mapDict.get('globalDefs', '')
if len(globalDefs) > 0:
func = getFunc(globalDefs)
globalDefs = func(self.dctData)
if len(globalDefs) > 0:
globalDefs += "\n"
if "globalDefs" in g_fdatas:
g_fdatas["globalDefs"] += globalDefs
else:
g_fdatas["globalDefs"] = globalDefs
def writeBody(self):
#for index in self.curProIndex:
# xlsxError.info_input(EXPORT_INFO_ING, (self.xbook.getSheetNameByIndex(index).encode(FILE_CODE), ))
self.xlsxWrite(EXPORT_DATA_HEAD)
if "globalDefs" in g_fdatas:
self.xlsxWrite(g_fdatas["globalDefs"])
for dataName, datas in g_dctDatas.items():
stream = dataName + "="
#stream += xlsxtool.dict_to_text(datas) + "\n"
stream += "%s\n" % (datas)
self.xlsxWrite(stream)
jsonhandle = codecs.open(self.fileHandler.stream.name + "." + dataName + ".json", "w+",'utf-8')
s = json.dumps(datas)
jsonhandle.write("{%s}" % (s[1:-1]))
jsonhandle.close()
def writeFoot(self):
"""
Îļþβ
"""
if len(self.hasExportedSheet) < len(self.__exportSheetIndex):
return
allDataDefs = self.mapDict.get('allDataDefs', '')
if len(allDataDefs) > 0:
func = getFunc(allDataDefs)
allDataDefs = func(self.dctData)
if "allDataDefs" in g_fdatas:
g_fdatas["allDataDefs"] += allDataDefs
else:
g_fdatas["allDataDefs"] = allDataDefs
stream = "\nallDatas = {\n"
for dataName, indexList in self.sheet2Data.items():
for index in indexList:
SheetName = self.xbook.getSheetNameByIndex(index)
sheetName = SheetName[SheetName.find(EXPORT_PREFIX_CHAR)+1:]
stream += "\t'" + sheetName
stream += "':"
stream += dataName
stream += ",\n"
if len(allDataDefs) > 0:
stream += "\t" + g_fdatas["allDataDefs"] + ",\n"
stream +="}"
self.xlsxWrite(stream)
self.xlsxbyebye()
print( "дÍêÁËtime:", time.ctime(time.time()) )
##############ÆäËû##################
def xlsxClose(self):
"""
¹Ø±ÕÎĵµ
"""
if hasattr(self, "fileHandler"):
self.fileHandler.close()
self.xbook.close()
return
def xlsxClear(self, errno = 0, msg = ''):
"""
³ÌÐòÒì³£Í˳öÇåÀí´ò¿ªµÄExcel
"""
self.xlsxClose()
if errno > 0:
raise xlsxError.xe(errno, msg)
else:
sys.exit(1)
def xlsxbyebye(self):
"""
Õý³£Í˳ö
"""
self.xlsxClose()
return
def getSheetsCounts(self):
return reduce(lambda x,y:x+y, \
[self.xbook.getColCount(index) for index in self.__exportSheetIndex])
EXPORT_SIGN['.'] = xlsx2py.isNotEmpty
EXPORT_SIGN['$'] = xlsx2py.needReplace
EXPORT_SIGN['!'] = xlsx2py.isKey
def main():
"""
ʹÓ÷½·¨£º
python xlsx2py excelName.xls(x) data.py
"""
try:
outfile = sys.argv[1]
except:
print( main.__doc__ )
return
for infile in sys.argv[2:]:
print( "¿ªÊ¼µ¼±í:[%s] max=%i" % (infile, len(sys.argv[2:])) )
if os.path.isfile(infile):
a = xlsx2py(infile, outfile)
xlsxtool.exportMenu(EXPORT_INFO_OK)
a.run()
else:
xlsxError.error_input(EXPORT_ERROR_NOEXISTFILE, (infile,))
print( '-------------------------------THE END------------------------------------------------' )
sys.exit()
if __name__ == '__main__':
main()
| lgpl-3.0 |
MihaiMoldovanu/ansible | lib/ansible/module_utils/facts/system/chroot.py | 40 | 1029 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.module_utils.facts.collector import BaseFactCollector
def is_chroot():
is_chroot = None
if os.environ.get('debian_chroot', False):
is_chroot = True
else:
my_root = os.stat('/')
try:
# check if my file system is the root one
proc_root = os.stat('/proc/1/root/.')
is_chroot = my_root.st_ino != proc_root.st_ino or my_root.st_dev != proc_root.st_dev
except:
# I'm not root or no proc, fallback to checking it is inode #2
is_chroot = (my_root.st_ino != 2)
return is_chroot
class ChrootFactCollector(BaseFactCollector):
name = 'chroot'
_fact_ids = set(['is_chroot'])
def collect(self, module=None, collected_facts=None):
return {'is_chroot': is_chroot()}
| gpl-3.0 |
UCSBarchlab/PyRTL | tests/rtllib/test_matrix.py | 1 | 99917 | import random
import math
import unittest
import pyrtl
import pyrtl.rtllib.matrix as Matrix
class MatrixTestBase(unittest.TestCase):
def check_against_expected(self, result, expected_output, floored=False):
"""
:param Matrix result: matrix that is the result of some operation we're testing
:param list[list] expected_output: a list of lists to compare against
the resulting matrix after simulation
:param bool floored: needed to indicate that we're checking the result of
a matrix subtraction, to ensure the matrix properly floored results to
zero when needed (defaults to False)
"""
output = pyrtl.Output(name='output')
if isinstance(result, pyrtl.WireVector):
output <<= result
else:
output <<= result.to_wirevector()
sim = pyrtl.Simulation()
sim.step({})
if isinstance(result, pyrtl.WireVector):
given_output = sim.inspect("output")
else:
given_output = Matrix.matrix_wv_to_list(
sim.inspect("output"), result.rows, result.columns, result.bits
)
if isinstance(given_output, int):
self.assertEqual(given_output, expected_output)
else:
for r in range(len(expected_output)):
for c in range(len(expected_output[0])):
expected = expected_output[r][c]
if floored and expected < 0:
expected = 0
self.assertEqual(given_output[r][c], expected)
class TestMatrixInit(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_init_basic(self):
self.init_int_matrix([[0]], 1, 1, 2)
def test_init_basic_wirevector(self):
self.init_wirevector([[0]], 1, 1, 2)
def test_init_three_by_three(self):
self.init_int_matrix([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4)
def test_init_three_by_three_wirevector(self):
self.init_wirevector([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4)
def test_init_one_by_four(self):
self.init_int_matrix([[0, 0, 0, 0]], 1, 4, 4)
def test_init_one_by_four_wirevector(self):
self.init_wirevector([[0, 0, 0, 0]], 1, 4, 4)
def test_init_fail_int_instead_of_matrix(self):
with self.assertRaises(pyrtl.PyrtlError):
self.init_int_matrix(0, 1, 1, 2)
def test_init_fail_zero_row(self):
with self.assertRaises(pyrtl.PyrtlError):
self.init_int_matrix([[0], [0], [0], [0]], 0, 4, 4)
def test_init_fail_zero_row_wirevector(self):
with self.assertRaises(pyrtl.PyrtlError):
self.init_wirevector([[0], [0], [0], [0]], 0, 4, 4)
def test_init_fail_bad_number_of_rows(self):
with self.assertRaises(pyrtl.PyrtlError):
self.init_int_matrix([[0], [0]], 1, 1, 4)
def test_init_fail_bad_number_of_columns(self):
with self.assertRaises(pyrtl.PyrtlError):
self.init_int_matrix([[0, 0], [0], [0, 0]], 3, 2, 4)
def test_init_fail_string_row(self):
with self.assertRaises(pyrtl.PyrtlError):
self.init_int_matrix([[0]], "1", 1, 2)
def test_init_fail_negative_row(self):
with self.assertRaises(pyrtl.PyrtlError):
self.init_int_matrix([[0]], -1, 1, 2)
def test_init_fail_zero_column(self):
with self.assertRaises(pyrtl.PyrtlError):
self.init_int_matrix([[0, 0, 0, 0]], 4, 0, 4)
def test_init_fail_zero_column_wirevector(self):
with self.assertRaises(pyrtl.PyrtlError):
self.init_wirevector([[0, 0, 0, 0]], 4, 0, 4)
def test_init_fail_string_column(self):
with self.assertRaises(pyrtl.PyrtlError):
self.init_int_matrix([[0]], 1, "1", 2)
def test_init_fail_negative_column(self):
with self.assertRaises(pyrtl.PyrtlError):
self.init_int_matrix([[0]], 1, -1, 2)
def test_init_fail_zero_bits(self):
with self.assertRaises(pyrtl.PyrtlError):
self.init_int_matrix([[0]], 1, 1, 0)
def test_init_fail_zero_bits_wirevector(self):
with self.assertRaises(pyrtl.PyrtlError):
self.init_wirevector([[0]], 1, 1, 0)
def test_init_fail_string_bit(self):
with self.assertRaises(pyrtl.PyrtlError):
self.init_int_matrix([[0]], 1, 1, "2")
def test_init_fail_negative_bits(self):
with self.assertRaises(pyrtl.PyrtlError):
self.init_int_matrix([[0]], 1, 1, -1)
def test_init_wirevector_mismatch(self):
matrix_input = pyrtl.Input(1 * 1, 'matrix_input')
with self.assertRaises(pyrtl.PyrtlError):
_matrix = Matrix.Matrix(1, 1, 3, value=matrix_input)
def test_init_random(self):
rows, columns, bits = random.randint(
1, 20), random.randint(1, 20), random.randint(1, 20)
matrix = [[0 for _ in range(columns)]
for _ in range(rows)]
for i in range(rows):
for j in range(columns):
matrix[i][j] = random.randint(1, 2**bits - 1)
self.init_int_matrix(matrix, rows, columns, bits)
def test_init_random_wirevector(self):
rows, columns, bits = random.randint(
1, 20), random.randint(1, 20), random.randint(1, 20)
matrix = [[0 for _ in range(columns)]
for _ in range(rows)]
for i in range(rows):
for j in range(columns):
matrix[i][j] = random.randint(1, 2**bits - 1)
self.init_wirevector(matrix, rows, columns, bits)
def init_wirevector(self, matrix_value, rows, columns, bits):
matrix_input = pyrtl.Const(Matrix.list_to_int(matrix_value, bits), rows * columns * bits)
matrix = Matrix.Matrix(rows, columns, bits, value=matrix_input)
self.assertEqual(rows, matrix.rows)
self.assertEqual(columns, matrix.columns)
self.assertEqual(bits, matrix.bits)
self.assertEqual(len(matrix), (rows * columns * bits))
self.check_against_expected(matrix, matrix_value)
def init_int_matrix(self, int_matrix, rows, columns, bits):
matrix = Matrix.Matrix(rows, columns, bits, value=int_matrix)
self.assertEqual(rows, matrix.rows)
self.assertEqual(columns, matrix.columns)
self.assertEqual(bits, matrix.bits)
self.assertEqual(len(matrix), (rows * columns * bits))
self.check_against_expected(matrix, int_matrix)
class TestMatrixBits(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_bits_no_change(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
self.assertEqual(matrix.bits, 4)
self.check_against_expected(matrix, int_matrix)
def test_bits_basic_change_bits(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
matrix.bits = 5
self.assertEqual(matrix.bits, 5)
self.check_against_expected(matrix, int_matrix)
def test_bits_basic_change_bits_trunicate(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
matrix.bits = 2
int_matrix = [[0, 1, 2], [3, 0, 1], [2, 3, 0]]
self.assertEqual(matrix.bits, 2)
self.check_against_expected(matrix, int_matrix)
def test_bits_fail_change_bits_zero(self):
matrix = Matrix.Matrix(3, 3, 4, value=[[0, 1, 2], [3, 4, 5], [6, 7, 8]])
with self.assertRaises(pyrtl.PyrtlError):
matrix.bits = 0
def test_bits_fail_change_bits_negative(self):
matrix = Matrix.Matrix(3, 3, 4, value=[[0, 1, 2], [3, 4, 5], [6, 7, 8]])
with self.assertRaises(pyrtl.PyrtlError):
matrix.bits = -1
def test_bits_fail_change_bits_string(self):
matrix = Matrix.Matrix(3, 3, 4, value=[[0, 1, 2], [3, 4, 5], [6, 7, 8]])
with self.assertRaises(pyrtl.PyrtlError):
matrix.bits = "1"
class TestMatrixGetItem(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_getitem_basic_case(self):
self.get_item([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3,
3, 4, slice(0, 1, None), slice(0, 1, None), 0)
def test_getitem_2_by_2_slice(self):
self.get_item([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3,
3, 4, slice(0, 2, None), slice(0, 2, None), [[0, 1], [3, 4]])
def test_getitem_3_by_3_slice(self):
self.get_item([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3,
3, 4, slice(0, 3, None), slice(0, 3, None), [[0, 1, 2], [3, 4, 5], [6, 7, 8]])
def test_getitem_2_3_slice(self):
self.get_item([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3,
3, 4, slice(0, 2, None), slice(0, 3, None), [[0, 1, 2], [3, 4, 5]])
def test_getitem_3_2_slice(self):
self.get_item([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3,
3, 4, slice(0, 3, None), slice(0, 2, None), [[0, 1], [3, 4], [6, 7]])
'''
def test_getitem_random(self):
x_start = random.randint(0, 2)
x_stop = random.randint(x_start + 1, 3)
y_start = random.randint(0, 2)
y_stop = random.randint(y_start + 1, 3)
self.get_item([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3,
3, 4, slice(x_start, x_stop, None),
slice(y_start, y_stop, None), )
'''
def test_getitem_fail_string_rows(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = matrix["2", 3]
def test_getitem_fail_string_columns(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = matrix[2, "2"]
def test_getitem_fail_out_of_bounds_rows(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = matrix[4, 2]
def test_getitem_fail_out_of_bounds_row_negative(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = matrix[-4]
def test_getitem_fail_out_of_bounds_rows_negative(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = matrix[-4, 2]
def test_getitem_fail_out_of_bounds_columns(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = matrix[1, 4]
def test_getitem_fail_out_of_bounds_columns_negative(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = matrix[1, -4]
def test_getitem_fail_out_of_bounds_rows_slice(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = matrix[1:4, 1]
def test_getitem_fail_out_of_bounds_columns_slice(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = matrix[1, 1:4]
def test_getitem_fail_string_column_only(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = matrix["1"]
def get_item(self, value_array, rows, columns, bits, x_slice, y_slice, expected_output):
matrix = Matrix.Matrix(rows, columns, bits, value=value_array)
item = matrix[x_slice, y_slice]
out_rows, out_columns = x_slice.stop - x_slice.start, y_slice.stop - y_slice.start
if isinstance(item, Matrix.Matrix):
self.assertEqual(out_rows, item.rows)
self.assertEqual(out_columns, item.columns)
self.assertEqual(bits, item.bits)
self.assertEqual(len(item), out_rows * out_columns * bits)
self.check_against_expected(item, expected_output)
def test_getitem_with_tuple_indices(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
item = matrix[2, 0]
self.check_against_expected(item, 6)
def test_getitem_with_slice_indices_raw(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
item = matrix[slice(0, 2), slice(0, 3)]
self.check_against_expected(item, [[0, 1, 2], [3, 4, 5]])
def test_getitem_with_slice_indices_shorthand(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
item = matrix[0:2, 0:3]
self.check_against_expected(item, [[0, 1, 2], [3, 4, 5]])
def test_getitem_full(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
item = matrix[:, :]
self.check_against_expected(item, int_matrix)
def test_getitem_full_row(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
item = matrix[1]
self.check_against_expected(item, [[3, 4, 5]])
def test_getitem_full_rows_with_slice_front(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
item = matrix[:2]
self.check_against_expected(item, [[0, 1, 2], [3, 4, 5]])
def test_getitem_full_rows_with_slice_back(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
item = matrix[1:]
self.check_against_expected(item, [[3, 4, 5], [6, 7, 8]])
def test_getitem_negative_returns_single(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
item = matrix[-2:-1, -2:-1]
self.check_against_expected(item, 4)
def test_getitem_negative_returns_row_v1(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
item = matrix[-1]
self.check_against_expected(item, [[6, 7, 8]])
def test_getitem_negative_returns_row_v2(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
item = matrix[-2]
self.check_against_expected(item, [[3, 4, 5]])
def test_getitem_negative_returns_rows(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
item = matrix[-2:]
self.check_against_expected(item, [[3, 4, 5], [6, 7, 8]])
def test_getitem_negative_in_tuple_with_slice_returns_row_1(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
item = matrix[-3, :]
self.check_against_expected(item, [[0, 1, 2]])
def test_getitem_negative_in_tuple_with_slice_returns_row_2(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
item = matrix[-2, :]
self.check_against_expected(item, [[3, 4, 5]])
def test_getitem_negative_in_tuple_with_slice_returns_row_3(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
item = matrix[-1, :]
self.check_against_expected(item, [[6, 7, 8]])
def test_getitem_negative_in_tuple_with_slice_returns_column_1(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
item = matrix[:, -3]
self.check_against_expected(item, [[0], [3], [6]])
def test_getitem_negative_in_tuple_with_slice_returns_column_2(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
item = matrix[:, -2]
self.check_against_expected(item, [[1], [4], [7]])
def test_getitem_negative_in_tuple_with_slice_returns_column_3(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
item = matrix[:, -1]
self.check_against_expected(item, [[2], [5], [8]])
def test_getitem_negative_in_tuple_returns_single(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
item = matrix[0, -1]
self.check_against_expected(item, 2)
class TestMatrixSetItem(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_setitem_basic_case(self):
self.set_item([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3,
3, 4, slice(0, 1, None), slice(0, 1, None), [[1]], [[1, 1, 2],
[3, 4, 5],
[6, 7, 8]])
def test_setitem_2_by_2(self):
self.set_item([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3,
3, 4, slice(0, 2, None), slice(0, 2, None),
[[1, 0], [1, 0]], [[1, 0, 2],
[1, 0, 5],
[6, 7, 8]])
def test_setitem_3_by_3(self):
self.set_item([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3,
3, 4, slice(0, 3, None), slice(0, 3, None),
[[8, 7, 6], [5, 4, 3], [2, 1, 0]], [[8, 7, 6],
[5, 4, 3],
[2, 1, 0]])
def test_setitem_2_by_3(self):
self.set_item([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3,
3, 4, slice(0, 2, None), slice(0, 3, None),
[[8, 7, 6], [5, 4, 3]], [[8, 7, 6],
[5, 4, 3],
[6, 7, 8]])
def test_setitem_3_by_2(self):
self.set_item([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3,
3, 4, slice(0, 3, None), slice(0, 2, None),
[[8, 7], [5, 4], [2, 1]], [[8, 7, 2],
[5, 4, 5],
[2, 1, 8]])
'''
def test_setitem_random_case(self):
x_start = random.randint(0, 2)
x_stop = random.randint(x_start + 1, 3)
y_start = random.randint(0, 2)
y_stop = random.randint(y_start + 1, 3)
value = [[0 for _ in range(y_stop - y_start)]
for _ in range(x_stop - x_start)]
for i in range(len(value)):
for j in range(len(value[0])):
value[i][j] = random.randint(0, 2**4-1)
self.set_item([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3,
3, 4, slice(x_start, x_stop, None),
slice(y_start, y_stop, None), value)
'''
def test_setitem_fail_string_row(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
matrix["2", 3] = pyrtl.Const(0)
def test_setitem_fail_string_column(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
matrix[2, "2"] = pyrtl.Const(0)
def test_setitem_fail_out_of_bounds_rows(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
matrix[4, 2] = pyrtl.Const(0)
def test_setitem_fail_out_of_bounds_rows_negative(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
matrix[-4, 2] = pyrtl.Const(0)
def test_setitem_fail_out_of_bounds_columns(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
matrix[1, 4] = pyrtl.Const(0)
def test_setitem_fail_out_of_bounds_columns_negative(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
matrix[1, -4] = pyrtl.Const(0)
def test_setitem_fail_out_of_bounds_rows_slice(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
matrix[1:4, 2] = pyrtl.Const(0, bitwidth=9)
def test_setitem_fail_out_of_bounds_columns_slice(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
matrix[1:4, 2] = pyrtl.Const(0, bitwidth=9)
def test_setitem_fail_string_rows_only(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
matrix["1"] = pyrtl.Const(0, bitwidth=9)
def test_setitem_fail_wire_for_matrix(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
matrix[1, 0:2] = pyrtl.Const(0, bitwidth=3)
def test_setitem_fail_value_matrix_incorrect_rows(self):
matrix = Matrix.Matrix(3, 3, 3)
value_matrix = Matrix.Matrix(2, 1, 3)
with self.assertRaises(pyrtl.PyrtlError):
matrix[0:1, 0:1] = value_matrix
def test_setitem_fail_value_matrix_incorrect_columns(self):
matrix = Matrix.Matrix(3, 3, 3)
value_matrix = Matrix.Matrix(1, 2, 3)
with self.assertRaises(pyrtl.PyrtlError):
matrix[0:1, 0:1] = value_matrix
def set_item(self, int_matrix, rows, columns, bits,
x_slice, y_slice, value, expected_output):
matrix = Matrix.Matrix(rows, columns, bits, value=int_matrix)
value_matrix = Matrix.Matrix(
x_slice.stop - x_slice.start, y_slice.stop - y_slice.start,
bits, value=value)
matrix[x_slice, y_slice] = value_matrix
self.check_against_expected(matrix, expected_output)
def test_setitem_with_tuple_indices(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
expected_output = [[0, 1, 2], [3, 4, 5], [9, 7, 8]]
matrix[2, 0] = 9
self.check_against_expected(matrix, expected_output)
def test_setitem_with_slice_indices_raw(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
value_int_matrix = [[9, 8, 7], [6, 5, 4]]
value_matrix = Matrix.Matrix(2, 3, 4, value=value_int_matrix)
matrix[slice(0, 2), slice(0, 3)] = value_matrix
expected_output = [[9, 8, 7], [6, 5, 4], [6, 7, 8]]
self.check_against_expected(matrix, expected_output)
def test_setitem_with_slice_indices_shorthand(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
value_int_matrix = [[9, 8, 7], [6, 5, 4]]
value_matrix = Matrix.Matrix(2, 3, 4, value=value_int_matrix)
matrix[:2, :3] = value_matrix
expected_output = [[9, 8, 7], [6, 5, 4], [6, 7, 8]]
self.check_against_expected(matrix, expected_output)
def test_setitem_negative(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
matrix[-2:-1, -2:-1] = pyrtl.Const(0)
expected_output = [[0, 1, 2], [3, 0, 5], [6, 7, 8]]
self.check_against_expected(matrix, expected_output)
def test_setitem_raw_int(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
matrix[-2:-1, -2:-1] = 9
expected_output = [[0, 1, 2], [3, 9, 5], [6, 7, 8]]
self.check_against_expected(matrix, expected_output)
def test_setitem_full(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
value_int_matrix = [[8, 7, 6], [5, 4, 3], [2, 1, 0]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
value_matrix = Matrix.Matrix(3, 3, 4, value=value_int_matrix)
matrix[:, :] = value_matrix
self.check_against_expected(matrix, value_int_matrix)
def test_setitem_full_rows_with_slice_front(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
value_int_matrix = [[9, 8, 7], [6, 5, 4]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
value_matrix = Matrix.Matrix(2, 3, 4, value=value_int_matrix)
matrix[:2] = value_matrix
expected_output = [[9, 8, 7], [6, 5, 4], [6, 7, 8]]
self.check_against_expected(matrix, expected_output)
def test_setitem_full_rows_with_slice_back(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
value_int_matrix = [[9, 8, 7], [6, 5, 4]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
value_matrix = Matrix.Matrix(2, 3, 4, value=value_int_matrix)
matrix[1:] = value_matrix
expected_output = [[0, 1, 2], [9, 8, 7], [6, 5, 4]]
self.check_against_expected(matrix, expected_output)
def test_setitem_full_row_item(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
value_int_matrix = [[8, 7, 6]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
value_matrix = Matrix.Matrix(1, 3, 4, value=value_int_matrix)
matrix[1] = value_matrix
expected_output = [[0, 1, 2], [8, 7, 6], [6, 7, 8]]
self.check_against_expected(matrix, expected_output)
def test_setitem_row_with_negative_index_v1(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
value_int_matrix = [[9, 8, 7]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
value_matrix = Matrix.Matrix(1, 3, 4, value=value_int_matrix)
matrix[-1] = value_matrix
expected_output = [[0, 1, 2], [3, 4, 5], [9, 8, 7]]
self.check_against_expected(matrix, expected_output)
def test_setitem_row_with_negative_index_v2(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
value_int_matrix = [[9, 8, 7]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
value_matrix = Matrix.Matrix(1, 3, 4, value=value_int_matrix)
matrix[-2] = value_matrix
expected_output = [[0, 1, 2], [9, 8, 7], [6, 7, 8]]
self.check_against_expected(matrix, expected_output)
def test_setitem_rows_with_negative_index_slice(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
value_int_matrix = [[9, 8, 7], [6, 5, 4]]
matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
value_matrix = Matrix.Matrix(2, 3, 4, value=value_int_matrix)
matrix[-2:] = value_matrix
expected_output = [[0, 1, 2], [9, 8, 7], [6, 5, 4]]
self.check_against_expected(matrix, expected_output)
class TestMatrixCopy(unittest.TestCase):
def setUp(self):
pyrtl.reset_working_block()
def test_copy_basic(self):
self.copy([[0]], 1, 1, 2, [[1]])
def test_copy_three_by_three(self):
self.copy([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4,
[[1, 2, 3], [1, 2, 3], [1, 2, 3]])
def test_copy_one_by_four(self):
self.copy([[0, 0, 0, 0]], 1, 4, 4, [[1, 1, 1, 1]])
def copy(self, first_value, rows, columns, bits, second_value):
matrix = Matrix.Matrix(rows, columns, bits, value=first_value)
change_matrix = Matrix.Matrix(rows, columns, bits, value=second_value)
copy_matrix = matrix.copy()
self.assertEqual(copy_matrix.rows, matrix.rows)
self.assertEqual(copy_matrix.columns, matrix.columns)
self.assertEqual(copy_matrix.bits, matrix.bits)
self.assertEqual(len(copy_matrix), len(matrix))
copy_output = pyrtl.Output(name="copy_output", bitwidth=len(copy_matrix))
copy_output <<= copy_matrix.to_wirevector()
matrix_output = pyrtl.Output(name="matrix_output", bitwidth=len(matrix))
matrix_output <<= matrix.to_wirevector()
copy_matrix[:, :] = change_matrix[:, :]
matrix_output_1 = pyrtl.Output(name="matrix_output_1", bitwidth=len(matrix))
matrix_output_1 <<= matrix.to_wirevector()
copy_output_1 = pyrtl.Output(name="copy_output_1", bitwidth=len(copy_matrix))
copy_output_1 <<= copy_matrix.to_wirevector()
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
sim.step({})
given_output = Matrix.matrix_wv_to_list(sim.inspect("matrix_output"), rows, columns, bits)
expected_output = Matrix.matrix_wv_to_list(sim.inspect("copy_output"), rows, columns, bits)
for i in range(rows):
for j in range(columns):
self.assertEqual(given_output[i][j], expected_output[i][j])
given_output = Matrix.matrix_wv_to_list(sim.inspect("matrix_output_1"),
rows, columns, bits)
expected_output = Matrix.matrix_wv_to_list(sim.inspect("copy_output_1"),
rows, columns, bits)
for i in range(rows):
for j in range(columns):
self.assertNotEqual(first_value[i][j], second_value[i][j])
self.assertNotEqual(given_output[i][j], expected_output[i][j])
class TestMatrixTranspose(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_transpose_basic(self):
self.transpose([[0]], 1, 1, 2, [[0]])
def test_transpose_3_by_3(self):
self.transpose([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[0, 3, 6],
[1, 4, 7],
[2, 5, 8]])
def test_transpose_1_by_4(self):
self.transpose([[0, 1, 0, 2]], 1, 4, 4, [[0], [1], [0], [2]])
def test_transpose_4_by_1(self):
self.transpose([[0], [1], [0], [2]], 4, 1, 4, [[0, 1, 0, 2]])
'''
def test_transpose_random_case(self):
rows, columns, bits = random.randint(
1, 20), random.randint(1, 20), random.randint(1, 20)
matrix = [[0 for _ in range(columns)]
for _ in range(rows)]
for i in range(rows):
for j in range(columns):
matrix[i][j] = random.randint(1, 2**bits - 1)
self.transpose(matrix, rows, columns, bits)
'''
def transpose(self, int_matrix, rows, columns, bits, expected_output):
matrix = Matrix.Matrix(rows, columns, bits, value=int_matrix)
transpose_matrix = matrix.transpose()
self.assertEqual(columns, transpose_matrix.rows)
self.assertEqual(rows, transpose_matrix.columns)
self.assertEqual(bits, transpose_matrix.bits)
self.assertEqual(len(transpose_matrix), rows * columns * bits)
self.check_against_expected(transpose_matrix, expected_output)
class TestMatrixReverse(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_reverse_basic(self):
self.reverse([[0]], 1, 1, 2, [[0]])
def test_reverse_3_by_3(self):
self.reverse([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[8, 7, 6],
[5, 4, 3],
[2, 1, 0]])
def test_reverse_1_by_4(self):
self.reverse([[0, 1, 3, 2]], 1, 4, 4, [[2, 3, 1, 0]])
def test_reverse_4_by_1(self):
self.reverse([[0], [1], [3], [2]], 4, 1, 4, [[2],
[3],
[1],
[0]])
'''
def test_reverse_random(self):
rows, columns, bits = random.randint(
1, 20), random.randint(1, 20), random.randint(1, 20)
matrix = [[0 for _ in range(columns)]
for _ in range(rows)]
for i in range(rows):
for j in range(columns):
matrix[i][j] = random.randint(1, 2**bits - 1)
self.reverse(matrix, rows, columns, bits)
'''
def reverse(self, int_matrix, rows, columns, bits, expected_output):
matrix = Matrix.Matrix(rows, columns, bits, value=int_matrix)
reversed_matrix = reversed(matrix)
self.assertEqual(rows, reversed_matrix.rows)
self.assertEqual(columns, reversed_matrix.columns)
self.assertEqual(bits, reversed_matrix.bits)
self.assertEqual(len(reversed_matrix), rows * columns * bits)
self.check_against_expected(reversed_matrix, expected_output)
class TestMatrixAdd(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_add_basic_case(self):
self.add([[0]], 1, 1, 2, [[0]], 1, 1, 3, [[0]])
def test_add_2_by_3(self):
self.add([[0, 1, 2], [3, 4, 5]], 2, 3, 4,
[[0, 1, 2], [3, 4, 5]], 2, 3, 4, [[0, 2, 4],
[6, 8, 10]])
def test_add_3_by_2(self):
self.add([[2, 4], [5, 4], [2, 5]], 3, 2, 4,
[[0, 1], [3, 4], [6, 7]], 3, 2, 4, [[2, 5],
[8, 8],
[8, 12]])
def test_add_3_by_3_same(self):
self.add([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4,
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[0, 2, 4],
[6, 8, 10],
[12, 14, 16]])
def test_add_3_by_3_different(self):
self.add([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[2, 5, 5],
[8, 8, 12],
[8, 12, 9]])
def test_add_fail_2_by_2_add_3_by_3(self):
with self.assertRaises(pyrtl.PyrtlError):
self.add([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1], [0, 1], [0, 1]], 2, 3, 4, [[]])
def test_add_fail_3_by_3_add_2_by_3(self):
with self.assertRaises(pyrtl.PyrtlError):
self.add([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1, 1], [0, 1, 1]], 2, 3, 4, [[]])
def test_add_fail_3_by_3_add_3_by_2(self):
with self.assertRaises(pyrtl.PyrtlError):
self.add([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1], [0, 1], [1, 1]], 3, 2, 4, [[]])
def test_add_fail_add_one(self):
first_matrix = Matrix.Matrix(1, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_result = first_matrix + 1
'''
def test_add_random_case(self):
rows, columns, bits1, bits2 = random.randint(
1, 20), random.randint(1, 20), random.randint(
1, 20), random.randint(1, 20)
first = [[0 for _ in range(columns)]
for _ in range(rows)]
second = [[0 for _ in range(columns)]
for _ in range(rows)]
for i in range(rows):
for j in range(columns):
first[i][j] = random.randint(1, 2**bits1 - 1)
second[i][j] = random.randint(1, 2**bits2 - 1)
self.add(first, rows, columns, bits1, second, rows, columns, bits2)
'''
def add(self, first_int_matrix, rows1, columns1, bits1, second_int_matrix,
rows2, columns2, bits2, expected_output):
first_matrix = Matrix.Matrix(rows1, columns1, bits1, value=first_int_matrix)
second_matrix = Matrix.Matrix(rows2, columns2, bits2, value=second_int_matrix)
result_matrix = first_matrix + second_matrix
self.assertEqual(result_matrix.rows, rows1)
self.assertEqual(result_matrix.columns, columns1)
self.check_against_expected(result_matrix, expected_output)
class TestMatrixInplaceAdd(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_iadd_basic(self):
self.iadd([[0]], 1, 1, 2, [[0]], 1, 1, 3, [[0]])
def test_iadd_3_by_3_same(self):
self.iadd([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4,
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[0, 2, 4], [6, 8, 10], [12, 14, 16]])
def test_iadd_3_by_3_different(self):
self.iadd([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[2, 5, 5], [8, 8, 12], [8, 12, 9]])
def test_iadd_fail_3_by_3_add_2_by_3(self):
with self.assertRaises(pyrtl.PyrtlError):
self.iadd([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1], [0, 1], [0, 1]], 2, 3, 4, [[]])
def test_iadd_fail_3_by_3_add_3_by_2(self):
with self.assertRaises(pyrtl.PyrtlError):
self.iadd([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1, 1], [0, 1, 1]], 3, 2, 4, [[]])
'''
def test_iadd_random_case(self):
rows, columns, bits1, bits2 = random.randint(
1, 20), random.randint(1, 20), random.randint(
1, 20), random.randint(1, 20)
first = [[0 for _ in range(columns)]
for _ in range(rows)]
second = [[0 for _ in range(columns)]
for _ in range(rows)]
for i in range(rows):
for j in range(columns):
first[i][j] = random.randint(1, 2**bits1 - 1)
second[i][j] = random.randint(1, 2**bits2 - 1)
self.iadd(first, rows, columns, bits1, second, rows, columns, bits2)
'''
def iadd(self, first_int_matrix, rows1, columns1, bits1, second_int_matrix,
rows2, columns2, bits2, expected_output):
first_matrix = Matrix.Matrix(rows1, columns1, bits1, value=first_int_matrix)
second_matrix = Matrix.Matrix(rows2, columns2, bits2, value=second_int_matrix)
first_matrix += second_matrix
self.assertEqual(first_matrix.rows, rows1)
self.assertEqual(first_matrix.columns, columns1)
self.check_against_expected(first_matrix, expected_output)
class TestMatrixSub(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_sub_basic(self):
self.sub([[0]], 1, 1, 2, [[0]], 1, 1, 3, [[0]])
def test_sub_3_by_3_same(self):
self.sub([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4,
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[0, 0, 0], [0, 0, 0], [0, 0, 0]])
def test_sub_3_by_3_different(self):
self.sub([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[2, 3, 1], [2, 0, 2], [-4, -2, -7]])
def test_sub_fail_int(self):
first_matrix = Matrix.Matrix(1, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_result = first_matrix - 1
def test_sub_fail_3_by_3_sub_3_by_2(self):
with self.assertRaises(pyrtl.PyrtlError):
self.sub([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1], [0, 1], [0, 1]], 3, 2, 4, [[]])
def test_sub_fail_3_by_3_sub_2_by_3(self):
with self.assertRaises(pyrtl.PyrtlError):
self.sub([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1, 1], [0, 1, 1]], 2, 3, 4, [[]])
'''
def test_sub_random_case(self):
rows, columns, bits1, bits2 = random.randint(
1, 20), random.randint(1, 20), random.randint(
1, 20), random.randint(1, 20)
first = [[0 for _ in range(columns)]
for _ in range(rows)]
second = [[0 for _ in range(columns)]
for _ in range(rows)]
for i in range(rows):
for j in range(columns):
first[i][j] = random.randint(1, 2**bits1 - 1)
second[i][j] = random.randint(1, 2**bits2 - 1)
self.sub(first, rows, columns, bits1, second, rows, columns, bits2)
'''
def sub(self, first_int_matrix, rows1, columns1, bits1, second_int_matrix,
rows2, columns2, bits2, expected_output):
first_matrix = Matrix.Matrix(rows1, columns1, bits1, value=first_int_matrix)
second_matrix = Matrix.Matrix(rows2, columns2, bits2, value=second_int_matrix)
result_matrix = first_matrix - second_matrix
self.assertEqual(result_matrix.rows, rows1)
self.assertEqual(result_matrix.columns, columns1)
self.check_against_expected(result_matrix, expected_output, floored=True)
class TestMatrixInplaceSub(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_isub_basic(self):
self.isub([[0]], 1, 1, 2, [[0]], 1, 1, 3, [[0]])
def test_isub_3_by_3_same(self):
self.isub([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4,
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[0, 0, 0], [0, 0, 0], [0, 0, 0]])
def test_isub_3_by_3_different_positive_result(self):
self.isub([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1, 2], [3, 4, 5], [1, 4, 0]], 3, 3, 4, [[2, 3, 1], [2, 0, 2], [1, 1, 1]])
def test_isub_fail_3_by_3_sub_2_by_3(self):
with self.assertRaises(pyrtl.PyrtlError):
self.isub([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1], [0, 1], [0, 1]], 2, 3, 4, [[]])
def test_isub_fail_3_by_3_sub_3_by_2(self):
with self.assertRaises(pyrtl.PyrtlError):
self.isub([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1, 1], [0, 1, 1]], 3, 2, 4, [[]])
'''
def test_isub_random_case(self):
rows, columns, bits1 = random.randint(
1, 20), random.randint(1, 20), random.randint(
1, 20)
first = [[0 for _ in range(columns)]
for _ in range(rows)]
second = [[0 for _ in range(columns)]
for _ in range(rows)]
for i in range(rows):
for j in range(columns):
first_num, second_num = random.randint(
1, 2**bits1 - 1), random.randint(1, 2**bits1 - 1)
if first_num > second_num:
first[i][j] = first_num
second[i][j] = second_num
else:
first[i][j] = second_num
second[i][j] = first_num
self.isub(first, rows, columns, bits1, second, rows, columns, bits1)
'''
def isub(self, first_int_matrix, rows1, columns1, bits1, second_int_matrix,
rows2, columns2, bits2, expected_output):
first_matrix = Matrix.Matrix(rows1, columns1, bits1, value=first_int_matrix)
second_matrix = Matrix.Matrix(rows2, columns2, bits2, value=second_int_matrix)
first_matrix -= second_matrix
self.assertEqual(first_matrix.rows, rows1)
self.assertEqual(first_matrix.columns, columns1)
self.check_against_expected(first_matrix, expected_output)
class TestMatrixMultiply(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_element_wise_multiply_basic(self):
self.element_wise_multiply([[0]], 1, 1, 2, [[0]], 1, 1, 3, [[0]])
def test_element_wise_multiply_2_by_3(self):
self.element_wise_multiply([[2, 4, 3], [5, 4, 7]], 2, 3, 4,
[[0, 1, 2], [3, 4, 5]], 2, 3, 4, [[0, 4, 6],
[15, 16, 35]])
def test_element_wise_multiply_3_by_2(self):
self.element_wise_multiply([[2, 4], [5, 7], [2, 5]], 3, 2, 4,
[[0, 2], [3, 4], [6, 7]], 3, 2, 4, [[0, 8],
[15, 28],
[12, 35]])
def test_element_wise_multiply_3_by_3_same(self):
self.element_wise_multiply([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4,
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[0, 1, 4],
[9, 16, 25],
[36, 49, 64]])
def test_element_wise_multiply_3_by_3_different(self):
self.element_wise_multiply([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[0, 4, 6],
[15, 16, 35],
[12, 35, 8]])
def test_element_wise_multiply_fail_3_by_3_multiply_3_by_2(self):
with self.assertRaises(pyrtl.PyrtlError):
self.element_wise_multiply([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1], [0, 1], [0, 1]], 3, 2, 4, [[]])
def test_element_wise_multiply_fail_3_by_3_multiply_2_by_3(self):
with self.assertRaises(pyrtl.PyrtlError):
self.element_wise_multiply([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1, 1], [0, 1, 1]], 2, 3, 4, [[]])
'''
def test_element_wise_multiply_random_case(self):
rows, columns, bits1, bits2 = random.randint(
1, 10), random.randint(1, 10), random.randint(
1, 10), random.randint(1, 10)
first = [[0 for _ in range(columns)]
for _ in range(rows)]
second = [[0 for _ in range(columns)]
for _ in range(rows)]
for i in range(rows):
for j in range(columns):
first[i][j] = random.randint(1, 2**bits1 - 1)
second[i][j] = random.randint(1, 2**bits2 - 1)
self.element_wise_multiply(
first, rows, columns, bits1, second, rows, columns, bits2)
'''
def element_wise_multiply(self, first_int_matrix, rows1, columns1, bits1,
second_int_matrix, rows2, columns2, bits2, expected_output):
first_matrix = Matrix.Matrix(rows1, columns1, bits1, value=first_int_matrix)
second_matrix = Matrix.Matrix(rows2, columns2, bits2, value=second_int_matrix)
result_matrix = first_matrix * second_matrix
self.assertEqual(result_matrix.rows, rows1)
self.assertEqual(result_matrix.columns, columns1)
self.check_against_expected(result_matrix, expected_output)
def test_multiply_scalar_basic(self):
self.multiply_number([[2]], 1, 1, 3, 1, [[2]])
def test_multiply_scalar_basic_zero(self):
self.multiply_number([[1]], 1, 1, 2, 0, [[0]])
def test_multiply_scalar_basic_one(self):
self.multiply_number([[1]], 1, 1, 2, 1, [[1]])
def test_multiply_scalar_3_by_3(self):
self.multiply_number([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, 3, [[0, 3, 6],
[9, 12, 15],
[18, 21, 24]])
def test_multiply_scalar_4_by_1(self):
self.multiply_number([[0, 1, 0, 2]], 1, 4, 4, 5, [[0, 5, 0, 10]])
def test_multiply_scalar_1_by_4(self):
self.multiply_number([[0], [1], [0], [2]], 4, 1,
4, 5, [[0], [5], [0], [10]])
'''
def test_multiply_scalar_random_case(self):
rows, columns, bits, number = random.randint(
1, 10), random.randint(1, 10), random.randint(
1, 10), random.randint(1, 10)
matrix = [[0 for _ in range(columns)]
for _ in range(rows)]
for i in range(rows):
for j in range(columns):
matrix[i][j] = random.randint(1, 2**bits - 1)
self.multiply_number(matrix, rows, columns, bits, number)
'''
def multiply_number(self, int_matrix, rows, columns, bits, number, expected_output):
first_matrix = Matrix.Matrix(rows, columns, bits, value=int_matrix)
bits = int(math.log(number, 2)) + 1 if number != 0 else 1
result_matrix = first_matrix * pyrtl.Const(number, bits)
self.assertEqual(result_matrix.rows, rows)
self.assertEqual(result_matrix.columns, columns)
self.check_against_expected(result_matrix, expected_output)
def test_multiply_fail_int(self):
first_matrix = Matrix.Matrix(3, 2, 3)
with self.assertRaises(pyrtl.PyrtlError):
_result = first_matrix * 1
class TestMatrixInplaceMultiply(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_element_wise_imultiply_basic(self):
self.element_wise_imultiply([[0]], 1, 1, 2, [[0]], 1, 1, 3, [[0]])
def test_element_wise_imultiply_3_by_3_same(self):
self.element_wise_imultiply([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4,
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[0, 1, 4],
[9, 16, 25],
[36, 49, 64]])
def test_element_wise_imultiply_3_by_3_different(self):
self.element_wise_imultiply([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[0, 4, 6],
[15, 16, 35],
[12, 35, 8]])
def test_element_wise_imultiply_fail_3_by_3_multiply_2_by_3(self):
with self.assertRaises(pyrtl.PyrtlError):
self.element_wise_imultiply([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1], [0, 1], [0, 1]], 2, 3, 4, [[]])
def test_element_wise_imultiply_fail_3_by_3_multiply_3_by_2(self):
with self.assertRaises(pyrtl.PyrtlError):
self.element_wise_imultiply([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1, 1], [0, 1, 1]], 3, 2, 4, [[]])
'''
def test_element_wise_imultiply_random_case(self):
rows, columns, bits1, bits2 = random.randint(
1, 10), random.randint(1, 10), random.randint(
1, 10), random.randint(1, 10)
first = [[0 for _ in range(columns)]
for _ in range(rows)]
second = [[0 for _ in range(columns)]
for _ in range(rows)]
for i in range(rows):
for j in range(columns):
first[i][j] = random.randint(1, 2**bits1 - 1)
second[i][j] = random.randint(1, 2**bits2 - 1)
self.element_wise_imultiply(
first, rows, columns, bits1, second, rows, columns, bits2)
'''
def element_wise_imultiply(self, first_int_matrix, rows1, columns1, bits1,
second_int_matrix, rows2, columns2, bits2, expected_output):
first_matrix = Matrix.Matrix(rows1, columns1, bits1, value=first_int_matrix)
second_matrix = Matrix.Matrix(rows2, columns2, bits2, value=second_int_matrix)
first_matrix *= second_matrix
self.assertEqual(first_matrix.rows, rows1)
self.assertEqual(first_matrix.columns, columns1)
self.check_against_expected(first_matrix, expected_output)
class TestMatrixMatrixMultiply(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_mat_mul_basic(self):
self.matmul([[0]], 1, 1, 2, [[0]], 1, 1, 3, [[0]])
def test_mat_mul_1_by_2_multiply_2_by_1(self):
self.matmul([[1, 2]], 1, 2, 2, [[1], [2]], 2, 1, 3, [[5]])
def test_mat_mul_3_by_3_multiply_3_by_3_same(self):
self.matmul([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4,
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[15, 18, 21],
[42, 54, 66],
[69, 90, 111]])
def test_mat_mul_3_by_3_multiply_3_by_3_different(self):
self.matmul([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[30, 39, 48],
[54, 70, 86],
[21, 29, 37]])
def test_mat_mul_fail_int(self):
first_matrix = Matrix.Matrix(3, 2, 3)
with self.assertRaises(pyrtl.PyrtlError):
_result = first_matrix.__matmul__(1)
def test_mat_mul_fail_3_by_3_multiply_2_by_2(self):
with self.assertRaises(pyrtl.PyrtlError):
self.matmul([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1], [0, 1], [0, 1]], 2, 3, 4, [[]])
def test_mat_mul_fail_3_by_3_multiply_2_by_3(self):
with self.assertRaises(pyrtl.PyrtlError):
self.matmul([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1, 1], [0, 1, 1]], 2, 3, 4, [[]])
def test_mat_mul_fail_3_by_2_multiply_3_by_2(self):
first_matrix = Matrix.Matrix(3, 2, 3)
second_matrix = Matrix.Matrix(3, 2, 3)
with self.assertRaises(pyrtl.PyrtlError):
_result = first_matrix.__matmul__(second_matrix)
'''
def test_mat_mul_random_case(self):
rows, columns1, columns2, bits1, bits2 = random.randint(
1, 5), random.randint(1, 5), random.randint(
1, 5), random.randint(1, 5), random.randint(1, 5)
first = [[0 for _ in range(columns1)]
for _ in range(rows)]
second = [[0 for _ in range(columns2)]
for _ in range(columns1)]
for i in range(rows):
for j in range(columns1):
first[i][j] = random.randint(1, 2**bits1 - 1)
for i in range(columns1):
for j in range(columns2):
second[i][j] = random.randint(1, 2**bits2 - 1)
self.matmul(first, rows, columns1, bits1,
second, columns1, columns2, bits2)
'''
def matmul(self, first_int_matrix, rows1, columns1, bits1,
second_int_matrix, rows2, columns2, bits2, expected_output):
first_matrix = Matrix.Matrix(rows1, columns1, bits1, value=first_int_matrix)
second_matrix = Matrix.Matrix(rows2, columns2, bits2, value=second_int_matrix)
result_matrix = first_matrix.__matmul__(second_matrix)
self.assertEqual(result_matrix.rows, rows1)
self.assertEqual(result_matrix.columns, columns2)
self.check_against_expected(result_matrix, expected_output)
class TestMatrixInplaceMatrixMultiply(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_imat_mul_basic(self):
self.imatmul([[0]], 1, 1, 2, [[0]], 1, 1, 3, [[0]])
def test_imat_mul_1_by_2_multiply_2_by_1(self):
self.imatmul([[1, 2]], 1, 2, 2, [[1], [2]], 2, 1, 3, [[5]])
def test_imat_mul_3_by_3_multiply_3_by_3_same(self):
self.imatmul([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4,
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[15, 18, 21],
[42, 54, 66],
[69, 90, 111]])
def test_imat_mul_3_by_3_multiply_3_by_3_different(self):
self.imatmul([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[30, 39, 48],
[54, 70, 86],
[21, 29, 37]])
def test_imat_mul_fail_3_by_3_multiply_2_by_3(self):
with self.assertRaises(pyrtl.PyrtlError):
self.imatmul([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1], [0, 1], [0, 1]], 2, 3, 4, [[]])
def test_imat_mul_fail_3_by_3_multiply_3_by_2(self):
with self.assertRaises(pyrtl.PyrtlError):
self.imatmul([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1, 1], [0, 1, 1]], 3, 2, 4, [[]])
'''
def test_imat_mul_random_case(self):
rows, columns1, columns2, bits1, bits2 = random.randint(
1, 5), random.randint(1, 5), random.randint(
1, 5), random.randint(1, 5), random.randint(1, 5)
first = [[0 for _ in range(columns1)]
for _ in range(rows)]
second = [[0 for _ in range(columns2)]
for _ in range(columns1)]
for i in range(rows):
for j in range(columns1):
first[i][j] = random.randint(1, 2**bits1 - 1)
for i in range(columns1):
for j in range(columns2):
second[i][j] = random.randint(1, 2**bits2 - 1)
self.imatmul(first, rows, columns1, bits1,
second, columns1, columns2, bits2)
'''
def imatmul(self, first_int_matrix, rows1, columns1, bits1,
second_int_matrix, rows2, columns2, bits2, expected_output):
first_matrix = Matrix.Matrix(rows1, columns1, bits1, value=first_int_matrix)
second_matrix = Matrix.Matrix(rows2, columns2, bits2, value=second_int_matrix)
first_matrix.__imatmul__(second_matrix)
self.assertEqual(first_matrix.rows, rows1)
self.assertEqual(first_matrix.columns, columns2)
self.check_against_expected(first_matrix, expected_output)
class TestMatrixMatrixPower(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_matrix_power_3_by_3_power_0(self):
self.matrix_power([[0, 1, 2], [3, 4, 5], [6, 7, 8]],
3, 3, 4, 0, [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
def test_matrix_power_3_by_3_power_1(self):
self.matrix_power([[0, 1, 2], [3, 4, 5], [6, 7, 8]],
3, 3, 4, 1, [[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
def test_matrix_power_3_by_3_power_2(self):
self.matrix_power([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, 2, [[15, 18, 21],
[42, 54, 66],
[69, 90, 111]])
def test_matrix_power_fail_nonsquare(self):
with self.assertRaises(pyrtl.PyrtlError):
self.matrix_power([[0, 0, 0, 0]], 1, 4, 4, 3, [[]])
def test_matrix_power_fail_string(self):
first_matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_result = first_matrix ** "1"
def test_matrix_power_fail_negative_power(self):
with self.assertRaises(pyrtl.PyrtlError):
self.matrix_power([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, -1, [[]])
'''
def test_matrix_power_random_case(self):
self.matrix_power([[0, 1, 2], [3, 4, 5], [6, 7, 8]],
3, 3, 4, random.randint(0, 2))
'''
def matrix_power(self, int_matrix, rows, columns, bits, exp, expected_output):
matrix = Matrix.Matrix(rows, columns, bits, value=int_matrix)
result_matrix = matrix ** exp
self.check_against_expected(result_matrix, expected_output)
class TestMatrixInplaceMatrixPower(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def tearDown(self):
pyrtl.reset_working_block()
def test_imatrix_power_3_by_3_power_0(self):
self.imatrix_power([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, 0, [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
def test_imatrix_power_3_by_3_power_1(self):
self.imatrix_power([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, 1, [[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
def test_imatrix_power_3_by_3_power_2(self):
self.imatrix_power([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, 2, [[15, 18, 21],
[42, 54, 66],
[69, 90, 111]])
def imatrix_power(self, int_matrix, rows, columns, bits, exp, expected_output):
matrix = Matrix.Matrix(rows, columns, bits, value=int_matrix)
matrix **= exp
self.check_against_expected(matrix, expected_output)
class TestMultiply(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_multiply_scalar(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
first_matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
result_matrix = Matrix.multiply(first_matrix, pyrtl.Const(3))
expected_output = [[0, 3, 6], [9, 12, 15], [18, 21, 24]]
self.assertEqual(result_matrix.rows, 3)
self.assertEqual(result_matrix.columns, 3)
self.check_against_expected(result_matrix, expected_output)
def test_multiply_matrix(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
first_matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
second_matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
result_matrix = Matrix.multiply(first_matrix, second_matrix)
expected_output = [[0, 1, 4], [9, 16, 25], [36, 49, 64]]
self.assertEqual(result_matrix.rows, 3)
self.assertEqual(result_matrix.columns, 3)
self.check_against_expected(result_matrix, expected_output)
def test_multiply_fail_string(self):
int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
second_matrix = Matrix.Matrix(3, 3, 4, value=int_matrix)
with self.assertRaises(pyrtl.PyrtlError):
_result_matrix = Matrix.multiply(1, second_matrix)
class TestReshape(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def check_reshape(self, original, rows, columns, shape, expected, order='C'):
matrix = Matrix.Matrix(rows, columns, 4, value=original)
reshaped = matrix.reshape(shape, order=order)
self.check_against_expected(reshaped, expected)
def test_reshape_negative_one_shape(self):
self.check_reshape([[0, 1, 2, 3], [4, 5, 6, 7]], 2, 4, -1,
[[0, 1, 2, 3, 4, 5, 6, 7]])
def test_reshape_single_int_shape(self):
self.check_reshape([[0, 1, 2, 3], [4, 5, 6, 7]], 2, 4, 8,
[[0, 1, 2, 3, 4, 5, 6, 7]])
def test_reshape_normal_tuple_shape(self):
self.check_reshape([[0, 1, 2, 3], [4, 5, 6, 7]], 2, 4, (1, 8),
[[0, 1, 2, 3, 4, 5, 6, 7]])
def test_reshape_tuple_with_negative_one_shape(self):
self.check_reshape([[0, 1, 2, 3], [4, 5, 6, 7]], 2, 4, (1, -1),
[[0, 1, 2, 3, 4, 5, 6, 7]])
def test_reshape_varargs_shape(self):
matrix = Matrix.Matrix(2, 4, 4, value=[[0, 1, 2, 3], [4, 5, 6, 7]])
reshaped = matrix.reshape(1, 8)
self.check_against_expected(reshaped, [[0, 1, 2, 3, 4, 5, 6, 7]])
def test_reshape_nonsquare_tuple_shape_1(self):
self.check_reshape([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], 4, 3, (2, 6),
[[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
def test_reshape_nonsquare_tuple_shape_2(self):
self.check_reshape([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], 4, 3, (6, 2),
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
def test_reshape_nonsquare_tuple_shape_3(self):
self.check_reshape([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], 4, 3, (3, 4),
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]])
def test_reshape_nonsquare_tuple_shape_4(self):
self.check_reshape([[0, 1, 2], [3, 4, 5]], 2, 3, (3, 2),
[[0, 1], [2, 3], [4, 5]])
def test_reshape_nonsquare_tuple_shape_5(self):
self.check_reshape([[0, 1, 2]], 1, 3, (3, 1),
[[0], [1], [2]])
def test_reshape_nonsquare_int_shape(self):
self.check_reshape([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], 4, 3, 12,
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]])
def test_reshape_nonsquare_negative_one_shape(self):
self.check_reshape([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], 4, 3, -1,
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]])
def test_reshape_nonsquare_tuple_with_negative_one_shape_1(self):
self.check_reshape([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], 4, 3, (-1, 12),
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]])
def test_reshape_nonsquare_tuple_with_negative_one_shape_2(self):
self.check_reshape([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], 4, 3, (12, -1),
[[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]])
def test_reshape_nonsquare_incomplete_tuple_shape(self):
self.check_reshape([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], 4, 3, (12,),
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]])
def test_reshape_nonsquare_incomplete_tuple_with_negative_one_shape(self):
self.check_reshape([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], 4, 3, (-1,),
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]])
def test_reshape_negative_one_shape_column_order(self):
self.check_reshape([[0, 1, 2, 3], [4, 5, 6, 7]], 2, 4, -1,
[[0, 4, 1, 5, 2, 6, 3, 7]], order='F')
def test_reshape_single_int_shape_column_order(self):
self.check_reshape([[0, 1, 2, 3], [4, 5, 6, 7]], 2, 4, 8,
[[0, 4, 1, 5, 2, 6, 3, 7]], order='F')
def test_reshape_normal_tuple_shape_column_order(self):
self.check_reshape([[0, 1, 2, 3], [4, 5, 6, 7]], 2, 4, (1, 8),
[[0, 4, 1, 5, 2, 6, 3, 7]], order='F')
def test_reshape_tuple_with_negative_one_shape_column_order(self):
self.check_reshape([[0, 1, 2, 3], [4, 5, 6, 7]], 2, 4, (1, -1),
[[0, 4, 1, 5, 2, 6, 3, 7]], order='F')
def test_reshape_varargs_shape_column_order(self):
matrix = Matrix.Matrix(2, 4, 4, value=[[0, 1, 2, 3], [4, 5, 6, 7]])
reshaped = matrix.reshape(1, 8, order='F')
self.check_against_expected(reshaped, [[0, 4, 1, 5, 2, 6, 3, 7]])
def test_reshape_nonsquare_tuple_shape_column_order_1(self):
self.check_reshape([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], 4, 3, (2, 6),
[[0, 6, 1, 7, 2, 8], [3, 9, 4, 10, 5, 11]], order='F')
def test_reshape_nonsquare_tuple_shape_column_order_2(self):
self.check_reshape([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], 4, 3, (6, 2),
[[0, 7], [3, 10], [6, 2], [9, 5], [1, 8], [4, 11]], order='F')
def test_reshape_nonsquare_tuple_shape_column_order_3(self):
self.check_reshape([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], 4, 3, (3, 4),
[[0, 9, 7, 5], [3, 1, 10, 8], [6, 4, 2, 11]], order='F')
def test_reshape_nonsquare_tuple_shape_column_order_4(self):
self.check_reshape([[0, 1, 2], [3, 4, 5]], 2, 3, (3, 2),
[[0, 4], [3, 2], [1, 5]], order='F')
def test_reshape_nonsquare_tuple_shape_column_order_5(self):
self.check_reshape([[0, 1, 2]], 1, 3, (3, 1),
[[0], [1], [2]], order='F')
def test_reshape_nonsquare_int_shape_column_order(self):
self.check_reshape([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], 4, 3, 12,
[[0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11]], order='F')
def test_reshape_nonsquare_negative_one_shape_column_order(self):
self.check_reshape([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], 4, 3, -1,
[[0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11]], order='F')
def test_reshape_nonsquare_tuple_with_negative_one_shape_1_column_order(self):
self.check_reshape([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], 4, 3, (-1, 12),
[[0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11]], order='F')
def test_reshape_nonsquare_tuple_with_negative_one_shape_2_column_order(self):
self.check_reshape([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], 4, 3, (12, -1),
[[0], [3], [6], [9], [1], [4], [7], [10], [2], [5], [8], [11]],
order='F')
def test_reshape_nonsquare_incomplete_tuple_shape_column_order(self):
self.check_reshape([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], 4, 3, (12,),
[[0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11]], order='F')
def test_reshape_nonsquare_incomplete_tuple_with_negative_one_shape_column_order(self):
self.check_reshape([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], 4, 3, (-1,),
[[0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11]], order='F')
def check_raises_bad_shape(self, shape, order='C'):
matrix = Matrix.Matrix(2, 3, 4, value=[[0, 1, 2], [3, 4, 5]])
with self.assertRaises(pyrtl.PyrtlError):
matrix.reshape(shape, order=order)
def test_reshape_bad_tuple_shape_1(self):
self.check_raises_bad_shape((4,))
def test_reshape_bad_tuple_shape_2(self):
self.check_raises_bad_shape((1, 6, 12))
def test_reshape_bad_tuple_shape_3(self):
self.check_raises_bad_shape((1, 'bad'))
def test_reshape_bad_tuple_shape_4(self):
self.check_raises_bad_shape('bad')
def test_reshape_bad_tuple_shape_5(self):
self.check_raises_bad_shape((-1, -1))
def test_reshape_bad_tuple_shape_count(self):
self.check_raises_bad_shape((1, 7))
def test_reshape_bad_tuple_shape_order(self):
self.check_raises_bad_shape((1, 6), order='Z')
class TestFlatten(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def check_flattened(self, original, rows, columns, expected, order='C'):
matrix = Matrix.Matrix(rows, columns, 4, value=original)
flattened = matrix.flatten(order)
self.check_against_expected(flattened, expected)
def test_flatten_row_wise(self):
self.check_flattened([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3,
[[0, 1, 2, 3, 4, 5, 6, 7, 8]])
def test_flatten_row_wise_nonsquare_1(self):
self.check_flattened([[0, 1, 2, 3], [4, 5, 6, 7]], 2, 4,
[[0, 1, 2, 3, 4, 5, 6, 7]])
def test_flatten_row_wise_nonsquare_2(self):
self.check_flattened([[0], [1], [2], [3]], 4, 1,
[[0, 1, 2, 3]])
def test_flatten_row_wise_nonsquare_3(self):
self.check_flattened([[0, 1, 2, 3, 4, 5, 6, 7]], 1, 8,
[[0, 1, 2, 3, 4, 5, 6, 7]])
def test_flatten_column_wise(self):
self.check_flattened([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3,
[[0, 3, 6, 1, 4, 7, 2, 5, 8]], order='F')
def test_flatten_column_wise_nonsquare_1(self):
self.check_flattened([[0, 1, 2, 3], [4, 5, 6, 7]], 2, 4,
[[0, 4, 1, 5, 2, 6, 3, 7]], order='F')
def test_flatten_column_wise_nonsquare_2(self):
self.check_flattened([[0], [1], [2], [3]], 4, 1,
[[0, 1, 2, 3]], order='F')
def test_flatten_column_wise_nonsquare_3(self):
self.check_flattened([[0, 1, 2, 3, 4, 5, 6, 7]], 1, 8,
[[0, 1, 2, 3, 4, 5, 6, 7]], order='F')
def test_flatten_invalid_order(self):
value = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
matrix = Matrix.Matrix(3, 3, 4, value=value)
with self.assertRaises(pyrtl.PyrtlError):
_flattened = matrix.flatten(order='Z')
class TestPut(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def check_put(self, original, rows, columns, ind, v, expected, mode='raise'):
matrix = Matrix.Matrix(rows, columns, 4, value=original)
matrix.put(ind, v, mode=mode)
self.check_against_expected(matrix, expected)
def test_put_indices_list_values_1(self):
self.check_put([[0, 1, 2], [3, 4, 5]], 2, 3, [0, 2], [12, 13],
[[12, 1, 13], [3, 4, 5]])
def test_put_indices_list_values_2(self):
self.check_put([[0, 1, 2], [3, 4, 5]], 2, 3, [-1, 2, 3], [12, 13, 15],
[[0, 1, 13], [15, 4, 12]])
def test_put_indices_tuple_values(self):
self.check_put([[0, 1, 2], [3, 4, 5]], 2, 3, [-1, 2, 3], (12, 13, 15),
[[0, 1, 13], [15, 4, 12]])
def test_put_tuple_indices(self):
self.check_put([[0, 1, 2], [3, 4, 5]], 2, 3, (-1, 2, 3), [12, 13, 15],
[[0, 1, 13], [15, 4, 12]])
def test_put_matrix(self):
v = Matrix.Matrix(1, 3, 4, value=[[12, 13, 15]])
self.check_put([[0, 1, 2], [3, 4, 5]], 2, 3, [2, 3, 4], v,
[[0, 1, 12], [13, 15, 5]])
def test_put_indices_list_repeat_v_1(self):
self.check_put([[0, 1, 2], [3, 4, 5]], 2, 3, [-1, 2, 3], [12],
[[0, 1, 12], [12, 4, 12]])
def test_put_indices_list_repeat_v_2(self):
self.check_put([[0, 1, 2], [3, 4, 5]], 2, 3, [-1, 2, 3], 12,
[[0, 1, 12], [12, 4, 12]])
def test_put_indices_negative(self):
self.check_put([[0, 1, 2], [3, 4, 5]], 2, 3, -4, 12,
[[0, 1, 12], [3, 4, 5]])
def test_put_empty(self):
self.check_put([[0, 1, 2], [3, 4, 5]], 2, 3, [0, 1], [],
[[0, 1, 2], [3, 4, 5]])
def test_put_indices_raise_1(self):
with self.assertRaises(pyrtl.PyrtlError):
self.check_put([[0, 1, 2], [3, 4, 5]], 2, 3, 22, 12,
[[0, 1, 2], [3, 4, 12]])
def test_put_indices_raise_2(self):
with self.assertRaises(pyrtl.PyrtlError):
self.check_put([[0, 1, 2], [3, 4, 5]], 2, 3, -22, 12,
[[12, 1, 2], [3, 4, 5]])
def test_put_indices_wrap_1(self):
self.check_put([[0, 1, 2], [3, 4, 5]], 2, 3, 22, 12,
[[0, 1, 2], [3, 12, 5]], mode='wrap')
def test_put_indices_wrap_2(self):
self.check_put([[0, 1, 2], [3, 4, 5]], 2, 3, -22, 12,
[[0, 1, 12], [3, 4, 5]], mode='wrap')
def test_put_indices_clip_1(self):
self.check_put([[0, 1, 2], [3, 4, 5]], 2, 3, 22, 12,
[[0, 1, 2], [3, 4, 12]], mode='clip')
def test_put_indices_clip_2(self):
self.check_put([[0, 1, 2], [3, 4, 5]], 2, 3, -22, 12,
[[12, 1, 2], [3, 4, 5]], mode='clip')
class TestSum(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_sum_basic(self):
self.sum([[0]], 1, 1, 2, None, 0)
def test_sum_basic_column(self):
self.sum([[0]], 1, 1, 2, 0, [[0]])
def test_sum_basic_row(self):
self.sum([[0]], 1, 1, 2, 1, [[0]])
def test_sum_3_by_3(self):
self.sum([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, None, 36)
def test_sum_3_by_3_column(self):
self.sum([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, 0, [[9, 12, 15]])
def test_sum_3_by_3_row(self):
self.sum([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, 1, [[3, 12, 21]])
def test_sum_4_by_1(self):
self.sum([[0], [1], [0], [1]], 4, 1, 4, None, 2)
def test_sum_4_by_1_column(self):
self.sum([[0], [1], [0], [1]], 4, 1, 4, 0, [[2]])
def test_sum_4_by_1_row(self):
self.sum([[0], [1], [0], [1]], 4, 1, 4, 1, [[0, 1, 0, 1]])
def test_sum_1_by_4(self):
self.sum([[0, 1, 0, 1]], 1, 4, 4, None, 2)
def test_sum_1_by_4_column(self):
self.sum([[0, 1, 0, 1]], 1, 4, 4, 0, [[0, 1, 0, 1]])
def test_sum_1_by_4_row(self):
self.sum([[0, 1, 0, 1]], 1, 4, 4, 1, [[2]])
'''
def test_sum_random_case(self):
rows, columns, bits, axis = random.randint(
1, 5), random.randint(1, 5), random.randint(1, 5), random.randint(0, 2)
if axis == 2:
axis = None
matrix = [[0 for _ in range(columns)]
for _ in range(rows)]
for i in range(rows):
for j in range(columns):
matrix[i][j] = random.randint(1, 2**bits - 1)
self.sum(matrix, rows, columns, bits, axis)
'''
def test_sum_wire(self):
sum_wire = Matrix.sum(pyrtl.Const(3))
self.check_against_expected(sum_wire, 3)
def test_sum_fail_string(self):
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.sum("1", 0)
def test_sum_fail_negative_axis(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.sum(matrix, -1)
def test_sum_fail_axis_out_of_bounds(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.sum(matrix, 2)
def test_sum_fail_string_axis(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.sum(matrix, "0")
def test_sum_fail_string_bits(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.sum(matrix, axis=0, bits="0")
def test_sum_fail_negative_bits(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.sum(matrix, axis=0, bits=-1)
def test_sum_fail_zero_bits(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.sum(matrix, axis=0, bits=0)
def sum(self, int_matrix, rows, columns, bits, axis, expected_output):
matrix = Matrix.Matrix(rows, columns, bits, value=int_matrix, max_bits=bits * rows)
result = Matrix.sum(matrix, axis=axis, bits=bits * max(rows, columns))
self.check_against_expected(result, expected_output)
class TestMin(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_min_basic(self):
self.min([[0]], 1, 1, 2, None, 0)
def test_min_basic_column(self):
self.min([[0]], 1, 1, 2, 0, [[0]])
def test_min_basic_row(self):
self.min([[0]], 1, 1, 2, 1, [[0]])
def test_min_3_by_3(self):
self.min([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, None, 0)
def test_min_3_by_3_column(self):
self.min([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, 0, [[0, 1, 2]])
def test_min_3_by_3_row(self):
self.min([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, 1, [[0, 3, 6]])
def test_min_4_by_1(self):
self.min([[0], [1], [0], [1]], 4, 1, 4, None, 0)
def test_min_4_by_1_column(self):
self.min([[0], [1], [0], [1]], 4, 1, 4, 0, [[0]])
def test_min_4_by_1_row(self):
self.min([[0], [1], [0], [1]], 4, 1, 4, 1, [[0, 1, 0, 1]])
def test_min_1_by_4(self):
self.min([[0, 1, 0, 1]], 1, 4, 4, None, 0)
def test_min_1_by_4_column(self):
self.min([[0, 1, 0, 1]], 1, 4, 4, 0, [[0, 1, 0, 1]])
def test_min_1_by_4_row(self):
self.min([[0, 1, 0, 1]], 1, 4, 4, 1, [[0]])
'''
def test_min_random_case(self):
rows, columns, bits, axis = random.randint(
1, 5), random.randint(1, 5), random.randint(1, 5), random.randint(0, 2)
if axis == 2:
axis = None
matrix = [[0 for _ in range(columns)]
for _ in range(rows)]
for i in range(rows):
for j in range(columns):
matrix[i][j] = random.randint(1, 2**bits - 1)
self.min(matrix, rows, columns, bits, axis)
'''
def test_min_wire(self):
min_wire = Matrix.min(pyrtl.Const(3))
self.check_against_expected(min_wire, 3)
def test_min_fail_string(self):
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.min("1", 0)
def test_min_fail_axis_out_of_bounds(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.min(matrix, 4)
def test_min_fail_axis_negative(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.min(matrix, -1)
def test_min_fail_axis_string(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.min(matrix, "0")
def test_min_fail_bits_string(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.min(matrix, axis=0, bits="1")
def test_min_fail_bits_zero(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.min(matrix, axis=0, bits=0)
def test_min_fail_bits_negative(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.min(matrix, axis=0, bits=-2)
def min(self, int_matrix, rows, columns, bits, axis, expected_output):
matrix = Matrix.Matrix(rows, columns, bits, value=int_matrix, max_bits=bits * rows)
result = Matrix.min(matrix, axis=axis, bits=bits * max(rows, columns))
self.check_against_expected(result, expected_output)
class TestMax(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_max_basic(self):
self.max([[0]], 1, 1, 2, None, 0)
def test_max_basic_columns(self):
self.max([[0]], 1, 1, 2, 0, [[0]])
def test_max_basic_rows(self):
self.max([[0]], 1, 1, 2, 1, [[0]])
def test_max_3_by_3(self):
self.max([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, None, 8)
def test_max_3_by_3_columns(self):
self.max([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, 0, [[6, 7, 8]])
def test_max_3_by_3_rows(self):
self.max([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, 1, [[2, 5, 8]])
def test_max_4_by_1(self):
self.max([[0], [1], [0], [1]], 4, 1, 4, None, 1)
def test_max_4_by_1_columns(self):
self.max([[0], [1], [0], [1]], 4, 1, 4, 0, [[1]])
def test_max_4_by_1_rows(self):
self.max([[0], [1], [0], [1]], 4, 1, 4, 1, [[0, 1, 0, 1]])
def test_max_1_by_4(self):
self.max([[0, 1, 0, 1]], 1, 4, 4, None, 1)
def test_max_1_by_4_columns(self):
self.max([[0, 1, 0, 1]], 1, 4, 4, 0, [[0, 1, 0, 1]])
def test_max_1_by_4_rows(self):
self.max([[0, 1, 0, 1]], 1, 4, 4, 1, [[1]])
'''
def test_max_random_case(self):
rows, columns, bits, axis = random.randint(
1, 5), random.randint(1, 5), random.randint(1, 5), random.randint(0, 2)
if axis == 2:
axis = None
matrix = [[0 for _ in range(columns)]
for _ in range(rows)]
for i in range(rows):
for j in range(columns):
matrix[i][j] = random.randint(1, 2**bits - 1)
self.max(matrix, rows, columns, bits, axis)
'''
def test_max_wire(self):
max_wire = Matrix.max(pyrtl.Const(3))
self.check_against_expected(max_wire, 3)
def test_max_fail_string(self):
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.max("1", 0)
def test_max_fail_axis_out_of_bounds(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.max(matrix, 4)
def test_max_fail_axis_negative(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.max(matrix, -1)
def test_max_fail_axis_string(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.max(matrix, "0")
def test_max_fail_bits_string(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.max(matrix, axis=0, bits="1")
def test_max_fail_bits_zero(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.max(matrix, axis=0, bits=0)
def test_max_fail_bits_negative(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.max(matrix, axis=0, bits=-1)
def max(self, int_matrix, rows, columns, bits, axis, expected_output):
matrix = Matrix.Matrix(rows, columns, bits, value=int_matrix, max_bits=bits * rows)
result = Matrix.max(matrix, axis=axis, bits=bits * max(rows, columns))
self.check_against_expected(result, expected_output)
class TestArgMax(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_argument_max_basic(self):
self.argument_max([[0]], 1, 1, 2, None, 0)
def test_argument_max_basic_columns(self):
self.argument_max([[0]], 1, 1, 2, 0, [[0]])
def test_argument_max_basic_rows(self):
self.argument_max([[0]], 1, 1, 2, 1, [[0]])
def test_argument_max_3_by_3(self):
self.argument_max([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, None, 8)
def test_argument_max_3_by_3_columns(self):
self.argument_max([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, 0, [[2, 2, 2]])
def test_argument_max_3_by_3_rows(self):
self.argument_max([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, 1, [[2, 2, 2]])
def test_argument_max_4_by_1(self):
self.argument_max([[0], [1], [0], [1]], 4, 1, 4, None, 1)
def test_argument_max_4_by_1_columns(self):
self.argument_max([[0], [1], [0], [1]], 4, 1, 4, 0, [[1]])
def test_argument_max_4_by_1_rows(self):
self.argument_max([[0], [1], [0], [1]], 4, 1, 4, 1, [[0, 0, 0, 0]])
def test_argument_max_1_by_4(self):
self.argument_max([[0, 1, 0, 1]], 1, 4, 4, None, 1)
def test_argument_max_1_by_4_columns(self):
self.argument_max([[0, 1, 0, 1]], 1, 4, 4, 0, [[0, 0, 0, 0]])
def test_argument_max_1_by_4_rows(self):
self.argument_max([[0, 1, 0, 1]], 1, 4, 4, 1, [[1]])
'''
def test_argument_max_random_case(self):
rows, columns, bits, axis = random.randint(
1, 5), random.randint(1, 5), random.randint(1, 5), random.randint(0, 2)
if axis == 2:
axis = None
matrix = [[0 for _ in range(columns)]
for _ in range(rows)]
for i in range(rows):
for j in range(columns):
matrix[i][j] = random.randint(1, 2**bits - 1)
self.argument_max(matrix, rows, columns, bits, axis)
'''
def test_argument_max_wire(self):
arg_max_wire = Matrix.argmax(pyrtl.Const(3))
self.check_against_expected(arg_max_wire, 0)
def test_argument_max_string(self):
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.argmax("1", axis=0)
def test_argument_max_axis_out_of_bounds(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.argmax(matrix, axis=4)
def test_argument_max_axis_negative(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.argmax(matrix, axis=-1)
def test_argument_max_axis_string(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.argmax(matrix, "1")
def test_argument_max_bits_string(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.argmax(matrix, axis=1, bits="1")
def test_argument_max_bits_negative(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.argmax(matrix, axis=1, bits=-1)
def test_argument_max_bits_zero(self):
matrix = Matrix.Matrix(3, 3, 3)
with self.assertRaises(pyrtl.PyrtlError):
_output = Matrix.argmax(matrix, axis=1, bits=0)
def argument_max(self, int_matrix, rows, columns, bits, axis, expected_output):
matrix = Matrix.Matrix(rows, columns, bits, value=int_matrix, max_bits=bits * rows)
result = Matrix.argmax(matrix, axis=axis, bits=bits * max(rows, columns))
self.check_against_expected(result, expected_output)
class TestDot(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def tearDown(self):
pyrtl.reset_working_block()
def test_dot_basic(self):
self.dot([[0]], 1, 1, 2, [[0]], 1, 1, 3, 0)
def test_dot_1_by_2_multiply_2_by_1(self):
self.dot([[1, 2]], 1, 2, 2, [[1], [2]], 2, 1, 3, 5)
def test_dot_1_by_2_multiply_1_by_2(self):
self.dot([[1, 2]], 1, 2, 2, [[1, 2]], 1, 2, 3, 5)
def test_dot_2_by_1_multiply_2_by_1(self):
self.dot([[1], [2]], 2, 1, 3, [[1], [2]], 2, 1, 3, 5)
def test_dot_2_by_1_multiply_1_by_2(self):
self.dot([[1], [2]], 2, 1, 3, [[1, 2]], 1, 2, 3, 5)
def test_dot_3_by_3_multiply_3_by_3_same(self):
self.dot([[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4,
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[15, 18, 21],
[42, 54, 66],
[69, 90, 111]])
def test_dot_3_by_3_multiply_3_by_3_different(self):
self.dot([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1, 2], [3, 4, 5], [6, 7, 8]], 3, 3, 4, [[30, 39, 48],
[54, 70, 86],
[21, 29, 37]])
def test_dot_both_wires(self):
first = pyrtl.Const(5)
second = pyrtl.Const(3)
dot_product = Matrix.dot(first, second)
self.check_against_expected(dot_product, 15)
def test_dot_first_wire(self):
first = pyrtl.Const(5)
second = Matrix.Matrix(1, 1, 3, value=[[3]])
dot_product = Matrix.dot(first, second)
self.check_against_expected(dot_product, 15)
def test_dot_second_wire(self):
first = Matrix.Matrix(1, 1, 3, value=[[5]])
second = pyrtl.Const(3)
dot_product = Matrix.dot(first, second)
self.check_against_expected(dot_product, 15)
def test_dot_fail_int_second(self):
first_matrix = Matrix.Matrix(3, 2, 3)
with self.assertRaises(pyrtl.PyrtlError):
_result = Matrix.dot(first_matrix, 1)
def test_dot_fail_int_first(self):
first_matrix = Matrix.Matrix(3, 2, 3)
with self.assertRaises(pyrtl.PyrtlError):
_result = Matrix.dot(1, first_matrix)
def test_dot_fail_1_by_2_multiply_1_by_3(self):
with self.assertRaises(pyrtl.PyrtlError):
self.dot([[1, 2]], 1, 2, 2, [[1, 2, 3]], 1, 3, 3, [[]])
def test_dot_fail_3_by_3_multiply_2_by_2(self):
with self.assertRaises(pyrtl.PyrtlError):
self.dot([[2, 4, 3], [5, 4, 7], [2, 5, 1]], 3, 3, 4,
[[0, 1], [0, 1]], 2, 2, 4, [[]])
'''
def test_dot_random_inner_product(self):
columns, bits1, bits2 = random.randint(
1, 5), random.randint(1, 5), random.randint(1, 5)
first = [[0 for _ in range(columns)]
for _ in range(1)]
second = [[0 for _ in range(columns)]
for _ in range(1)]
for i in range(1):
for j in range(columns):
first[i][j] = random.randint(1, 2**bits1 - 1)
for i in range(1):
for j in range(columns):
second[i][j] = random.randint(1, 2**bits2 - 1)
self.dot(first, 1, columns, bits1,
second, 1, columns, bits2)
'''
'''
def test_dot_random_matrix_multiply(self):
rows, columns1, columns2, bits1, bits2 = random.randint(
2, 3), random.randint(2, 3), random.randint(
2, 3), random.randint(1, 5), random.randint(1, 5)
first = [[0 for _ in range(columns1)]
for _ in range(rows)]
second = [[0 for _ in range(columns2)]
for _ in range(columns1)]
for i in range(rows):
for j in range(columns1):
first[i][j] = random.randint(1, 2**bits1 - 1)
for i in range(columns1):
for j in range(columns2):
second[i][j] = random.randint(1, 2**bits2 - 1)
self.dot(first, rows, columns1, bits1,
second, columns1, columns2, bits2)
'''
def dot(self, first_int_matrix, rows1, columns1, bits1,
second_int_matrix, rows2, columns2, bits2, expected_output):
first_matrix = Matrix.Matrix(rows1, columns1, bits1, value=first_int_matrix)
second_matrix = Matrix.Matrix(rows2, columns2, bits2, value=second_int_matrix)
result_matrix = Matrix.dot(first_matrix, second_matrix)
self.check_against_expected(result_matrix, expected_output)
class TestHStack(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_hstack_two_row_vectors(self):
v1 = Matrix.Matrix(1, 3, bits=4, value=[[1, 2, 3]])
v2 = Matrix.Matrix(1, 5, bits=8, value=[[4, 5, 6, 7, 8]])
v3 = Matrix.hstack(v1, v2)
self.assertEqual(v3.bits, 8)
self.assertEqual(v3.max_bits, max(v1.max_bits, v2.max_bits))
self.check_against_expected(v3, [[1, 2, 3, 4, 5, 6, 7, 8]])
def test_hstack_one_row_vector(self):
v1 = Matrix.Matrix(1, 3, bits=4, value=[[1, 2, 3]])
v2 = Matrix.hstack(v1)
self.assertEqual(v2.bits, 4)
self.assertEqual(v2.max_bits, v1.max_bits)
self.check_against_expected(v2, [[1, 2, 3]])
def test_concatenate(self):
m1 = Matrix.Matrix(2, 3, bits=4, value=[[1, 2, 3], [4, 5, 6]])
m2 = Matrix.Matrix(2, 5, bits=8, value=[[7, 8, 9, 10, 11], [12, 13, 14, 15, 16]])
m3 = Matrix.concatenate([m1, m2])
self.check_against_expected(
m3,
[[1, 2, 3, 7, 8, 9, 10, 11],
[4, 5, 6, 12, 13, 14, 15, 16]]
)
def test_hstack_several_matrices(self):
m1 = Matrix.Matrix(2, 3, bits=4, value=[[1, 2, 3], [4, 5, 6]])
m2 = Matrix.Matrix(2, 5, bits=8, value=[[7, 8, 9, 10, 11], [12, 13, 14, 15, 16]])
m3 = Matrix.Matrix(2, 1, bits=3, value=[[0], [1]])
m4 = Matrix.hstack(m1, m2, m3)
self.assertEqual(m4.bits, 8)
self.assertEqual(m4.max_bits, max(m1.max_bits, m2.max_bits, m3.max_bits))
self.check_against_expected(
m4,
[[1, 2, 3, 7, 8, 9, 10, 11, 0],
[4, 5, 6, 12, 13, 14, 15, 16, 1]]
)
def test_hstack_fail_on_inconsistent_rows(self):
m1 = Matrix.Matrix(1, 2, bits=2, value=[[0, 1]])
m2 = Matrix.Matrix(2, 2, bits=4, value=[[1, 2], [3, 4]])
m3 = Matrix.Matrix(1, 4, bits=3, value=[[0, 0, 0, 0]])
with self.assertRaises(pyrtl.PyrtlError):
_v = Matrix.hstack(m1, m2, m3)
def test_hstack_empty_args_fails(self):
with self.assertRaises(pyrtl.PyrtlError):
_v = Matrix.hstack()
def test_hstack_on_non_matrices_fails(self):
w = pyrtl.WireVector(1)
m = Matrix.Matrix(1, 2, bits=2, value=[[0, 1]])
with self.assertRaises(pyrtl.PyrtlError):
_v = Matrix.hstack(w, m)
class TestVStack(MatrixTestBase):
def setUp(self):
pyrtl.reset_working_block()
def test_vstack_two_column_vectors(self):
v1 = Matrix.Matrix(3, 1, bits=4, value=[[1], [2], [3]])
v2 = Matrix.Matrix(5, 1, bits=8, value=[[4], [5], [6], [7], [8]])
v3 = Matrix.vstack(v1, v2)
self.assertEqual(v3.bits, 8)
self.assertEqual(v3.max_bits, max(v1.max_bits, v2.max_bits))
self.check_against_expected(v3, [[1], [2], [3], [4], [5], [6], [7], [8]])
def test_vstack_one_column_vector(self):
v1 = Matrix.Matrix(3, 1, bits=4, value=[[1], [2], [3]])
v2 = Matrix.vstack(v1)
self.assertEqual(v2.bits, 4)
self.assertEqual(v2.max_bits, v1.max_bits)
self.check_against_expected(v2, [[1], [2], [3]])
def test_concatenate(self):
m1 = Matrix.Matrix(2, 3, bits=5, value=[[1, 2, 3], [4, 5, 6]])
m2 = Matrix.Matrix(1, 3, bits=10, value=[[7, 8, 9]])
m3 = Matrix.concatenate([m1, m2], axis=1)
self.check_against_expected(
m3,
[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
)
def test_vstack_several_matrix(self):
m1 = Matrix.Matrix(2, 3, bits=5, value=[[1, 2, 3], [4, 5, 6]])
m2 = Matrix.Matrix(1, 3, bits=10, value=[[7, 8, 9]])
m3 = Matrix.Matrix(3, 3, bits=8, value=[[10, 11, 12], [13, 14, 15], [16, 17, 18]])
m4 = Matrix.vstack(m1, m2, m3)
self.assertEqual(m4.bits, 10)
self.assertEqual(m4.max_bits, max(m1.max_bits, m2.max_bits, m3.max_bits))
self.check_against_expected(
m4,
[[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12],
[13, 14, 15],
[16, 17, 18]]
)
def test_vstack_fail_on_inconsistent_cols(self):
m1 = Matrix.Matrix(1, 1, bits=2, value=[[0]])
m2 = Matrix.Matrix(2, 2, bits=4, value=[[1, 2], [3, 4]])
m3 = Matrix.Matrix(3, 1, bits=3, value=[[0], [0], [0]])
with self.assertRaises(pyrtl.PyrtlError):
_v = Matrix.vstack(m1, m2, m3)
def test_vstack_empty_args_fails(self):
with self.assertRaises(pyrtl.PyrtlError):
_v = Matrix.vstack()
def test_vstack_on_non_matrices_fails(self):
w = pyrtl.WireVector(1)
m = Matrix.Matrix(2, 1, bits=2, value=[[0], [1]])
with self.assertRaises(pyrtl.PyrtlError):
_v = Matrix.vstack(w, m)
class TestHelpers(unittest.TestCase):
def setUp(self):
pyrtl.reset_working_block()
def test_list_to_int(self):
self.assertEqual(Matrix.list_to_int([[0]], 1), 0b0)
self.assertEqual(Matrix.list_to_int([[1, 2]], 2), 0b0110)
self.assertEqual(Matrix.list_to_int([[1, 2, 3]], 2), 0b011011)
self.assertEqual(Matrix.list_to_int([[4, 9, 11], [3, 5, 6]], 4),
0b010010011011001101010110)
def test_list_to_int_truncates(self):
self.assertEqual(Matrix.list_to_int([[4, 9, 27]], 3), 0b100001011)
def test_list_to_int_negative(self):
self.assertEqual(Matrix.list_to_int([[-4, -9, 11]], 5), 0b111001011101011)
def test_list_to_int_negative_truncates(self):
self.assertEqual(Matrix.list_to_int([[-4, -9, 11]], 3), 0b100111011)
def test_list_to_int_non_positive_n_bits(self):
with self.assertRaises(pyrtl.PyrtlError):
Matrix.list_to_int([[3]], 0)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
Jannes123/django-oscar | tests/integration/offer/absolute_benefit_tests.py | 51 | 15825 | from decimal import Decimal as D
from django.core import exceptions
from django.test import TestCase
import mock
from oscar.apps.offer import models
from oscar.apps.offer.utils import Applicator
from oscar.test.basket import add_product, add_products
from oscar.test import factories
class TestAnAbsoluteDiscountAppliedWithCountConditionOnDifferentRange(TestCase):
def setUp(self):
self.condition_product = factories.ProductFactory()
condition_range = factories.RangeFactory()
condition_range.add_product(self.condition_product)
self.condition = models.CountCondition.objects.create(
range=condition_range,
type=models.Condition.COUNT,
value=2)
self.benefit_product = factories.ProductFactory()
benefit_range = factories.RangeFactory()
benefit_range.add_product(self.benefit_product)
self.benefit = models.AbsoluteDiscountBenefit.objects.create(
range=benefit_range,
type=models.Benefit.FIXED,
value=D('3.00'))
self.offer = models.ConditionalOffer(
id=1, condition=self.condition, benefit=self.benefit)
self.basket = factories.create_basket(empty=True)
self.applicator = Applicator()
def test_succcessful_application_consumes_correctly(self):
add_product(self.basket, product=self.condition_product, quantity=2)
add_product(self.basket, product=self.benefit_product, quantity=1)
self.applicator.apply_offers(self.basket, [self.offer])
discounts = self.basket.offer_applications.offer_discounts
self.assertEqual(len(discounts), 1)
self.assertEqual(discounts[0]['freq'], 1)
def test_condition_is_consumed_correctly(self):
# Testing an error case reported on the mailing list
add_product(self.basket, product=self.condition_product, quantity=3)
add_product(self.basket, product=self.benefit_product, quantity=2)
self.applicator.apply_offers(self.basket, [self.offer])
discounts = self.basket.offer_applications.offer_discounts
self.assertEqual(len(discounts), 1)
self.assertEqual(discounts[0]['freq'], 1)
class TestAnAbsoluteDiscountAppliedWithCountCondition(TestCase):
def setUp(self):
range = models.Range.objects.create(
name="All products", includes_all_products=True)
self.condition = models.CountCondition.objects.create(
range=range,
type=models.Condition.COUNT,
value=2)
self.offer = mock.Mock()
self.benefit = models.AbsoluteDiscountBenefit.objects.create(
range=range,
type=models.Benefit.FIXED,
value=D('3.00'))
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('0.00'), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_matches_condition_with_one_line(self):
add_product(self.basket, price=D('12.00'), quantity=2)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
# Check the discount is applied equally to each item in the line
line = self.basket.all_lines()[0]
prices = line.get_price_breakdown()
self.assertEqual(1, len(prices))
self.assertEqual(D('10.50'), prices[0][0])
def test_applies_correctly_to_basket_which_matches_condition_with_multiple_lines(self):
# Use a basket with 2 lines
add_products(self.basket, [
(D('12.00'), 1), (D('12.00'), 1)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertTrue(result.is_successful)
self.assertFalse(result.is_final)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
# Check the discount is applied equally to each line
for line in self.basket.all_lines():
self.assertEqual(D('1.50'), line.discount_value)
def test_applies_correctly_to_basket_which_matches_condition_with_multiple_lines_and_lower_total_value(self):
# Use a basket with 2 lines
add_products(self.basket, [
(D('1.00'), 1), (D('1.50'), 1)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertTrue(result.is_successful)
self.assertFalse(result.is_final)
self.assertEqual(D('2.50'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition(self):
add_products(self.basket, [
(D('12.00'), 2), (D('10.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(4, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition_with_smaller_prices_than_discount(self):
add_products(self.basket, [
(D('2.00'), 2), (D('4.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(4, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition_with_smaller_prices_than_discount_and_higher_prices_first(self):
add_products(self.basket, [
(D('2.00'), 2), (D('4.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(4, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
class TestAnAbsoluteDiscount(TestCase):
def setUp(self):
range = models.Range.objects.create(
name="All products", includes_all_products=True)
self.condition = models.CountCondition.objects.create(
range=range,
type=models.Condition.COUNT,
value=2)
self.benefit = models.AbsoluteDiscountBenefit.objects.create(
range=range,
type=models.Benefit.FIXED,
value=D('4.00'))
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_when_discounts_need_rounding(self):
# Split discount across 3 lines
for price in [D('2.00'), D('2.00'), D('2.00')]:
add_product(self.basket, price)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('4.00'), result.discount)
# Check the discount is applied equally to each line
line_discounts = [line.discount_value for line in self.basket.all_lines()]
self.assertEqual(len(line_discounts), 3)
for i, v in enumerate([D('1.33'), D('1.33'), D('1.34')]):
self.assertEqual(line_discounts[i], v)
class TestAnAbsoluteDiscountWithMaxItemsSetAppliedWithCountCondition(TestCase):
def setUp(self):
range = models.Range.objects.create(
name="All products", includes_all_products=True)
self.condition = models.CountCondition.objects.create(
range=range,
type=models.Condition.COUNT,
value=2)
self.benefit = models.AbsoluteDiscountBenefit.objects.create(
range=range,
type=models.Benefit.FIXED,
value=D('3.00'),
max_affected_items=1)
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('0.00'), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_matches_condition(self):
add_product(self.basket, D('12.00'), 2)
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition(self):
add_products(self.basket, [(D('12.00'), 2), (D('10.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(2, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition_but_with_smaller_prices_than_discount(self):
add_products(self.basket, [(D('2.00'), 2), (D('1.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('1.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(2, self.basket.num_items_without_discount)
class TestAnAbsoluteDiscountAppliedWithValueCondition(TestCase):
def setUp(self):
range = models.Range.objects.create(
name="All products", includes_all_products=True)
self.condition = models.ValueCondition.objects.create(
range=range,
type=models.Condition.VALUE,
value=D('10.00'))
self.benefit = models.AbsoluteDiscountBenefit.objects.create(
range=range,
type=models.Benefit.FIXED,
value=D('3.00'))
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('0.00'), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_single_item_basket_which_matches_condition(self):
add_products(self.basket, [(D('10.00'), 1)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(1, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_matches_condition(self):
add_products(self.basket, [(D('5.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_exceeds_condition(self):
add_products(self.basket, [(D('4.00'), 3)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_exceeds_condition_but_matches_boundary(self):
add_products(self.basket, [(D('5.00'), 3)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
class TestAnAbsoluteDiscountWithMaxItemsSetAppliedWithValueCondition(TestCase):
def setUp(self):
range = models.Range.objects.create(
name="All products", includes_all_products=True)
self.condition = models.ValueCondition.objects.create(
range=range,
type=models.Condition.VALUE,
value=D('10.00'))
self.benefit = models.AbsoluteDiscountBenefit.objects.create(
range=range,
type=models.Benefit.FIXED,
value=D('3.00'),
max_affected_items=1)
self.offer = mock.Mock()
self.basket = factories.create_basket(empty=True)
def test_applies_correctly_to_empty_basket(self):
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('0.00'), result.discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_single_item_basket_which_matches_condition(self):
add_products(self.basket, [(D('10.00'), 1)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(1, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_matches_condition(self):
add_products(self.basket, [(D('5.00'), 2)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_exceeds_condition(self):
add_products(self.basket, [(D('4.00'), 3)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_exceeds_condition_but_matches_boundary(self):
add_products(self.basket, [(D('5.00'), 3)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('3.00'), result.discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(1, self.basket.num_items_without_discount)
def test_applies_correctly_to_multi_item_basket_which_matches_condition_but_with_lower_prices_than_discount(self):
add_products(self.basket, [(D('2.00'), 6)])
result = self.benefit.apply(self.basket, self.condition, self.offer)
self.assertEqual(D('2.00'), result.discount)
self.assertEqual(5, self.basket.num_items_with_discount)
self.assertEqual(1, self.basket.num_items_without_discount)
class TestAnAbsoluteDiscountBenefit(TestCase):
def test_requires_a_benefit_value(self):
rng = models.Range.objects.create(
name="", includes_all_products=True)
benefit = models.Benefit.objects.create(
type=models.Benefit.FIXED, range=rng
)
with self.assertRaises(exceptions.ValidationError):
benefit.clean()
| bsd-3-clause |
samdoran/ansible | lib/ansible/modules/cloud/centurylink/clc_firewall_policy.py | 43 | 21975 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_firewall_policy
short_description: Create/delete/update firewall policies
description:
- Create or delete or update firewall polices on Centurylink Cloud
version_added: "2.0"
options:
location:
description:
- Target datacenter for the firewall policy
required: True
state:
description:
- Whether to create or delete the firewall policy
default: present
required: False
choices: ['present', 'absent']
source:
description:
- The list of source addresses for traffic on the originating firewall.
This is required when state is 'present"
default: None
required: False
destination:
description:
- The list of destination addresses for traffic on the terminating firewall.
This is required when state is 'present'
default: None
required: False
ports:
description:
- The list of ports associated with the policy.
TCP and UDP can take in single ports or port ranges.
default: None
required: False
choices: ['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456']
firewall_policy_id:
description:
- Id of the firewall policy. This is required to update or delete an existing firewall policy
default: None
required: False
source_account_alias:
description:
- CLC alias for the source account
required: True
destination_account_alias:
description:
- CLC alias for the destination account
default: None
required: False
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
default: True
required: False
choices: [True, False]
enabled:
description:
- Whether the firewall policy is enabled or disabled
default: True
required: False
choices: [True, False]
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
---
- name: Create Firewall Policy
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create / Verify an Firewall Policy at CenturyLink Cloud
clc_firewall:
source_account_alias: WFAD
location: VA1
state: present
source: 10.128.216.0/24
destination: 10.128.216.0/24
ports: Any
destination_account_alias: WFAD
---
- name: Delete Firewall Policy
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Delete an Firewall Policy at CenturyLink Cloud
clc_firewall:
source_account_alias: WFAD
location: VA1
state: absent
firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1
'''
RETURN = '''
firewall_policy_id:
description: The fire wall policy id
returned: success
type: string
sample: fc36f1bfd47242e488a9c44346438c05
firewall_policy:
description: The fire wall policy information
returned: success
type: dict
sample:
{
"destination":[
"10.1.1.0/24",
"10.2.2.0/24"
],
"destinationAccount":"wfad",
"enabled":true,
"id":"fc36f1bfd47242e488a9c44346438c05",
"links":[
{
"href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05",
"rel":"self",
"verbs":[
"GET",
"PUT",
"DELETE"
]
}
],
"ports":[
"any"
],
"source":[
"10.1.1.0/24",
"10.2.2.0/24"
],
"status":"active"
}
'''
__version__ = '${version}'
import urlparse
from time import sleep
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
try:
import clc as clc_sdk
from clc import CLCException
from clc import APIFailedResponse
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
class ClcFirewallPolicy:
clc = None
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.firewall_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
location=dict(required=True),
source_account_alias=dict(required=True, default=None),
destination_account_alias=dict(default=None),
firewall_policy_id=dict(default=None),
ports=dict(default=None, type='list'),
source=dict(default=None, type='list'),
destination=dict(default=None, type='list'),
wait=dict(default=True),
state=dict(default='present', choices=['present', 'absent']),
enabled=dict(default=True, choices=[True, False])
)
return argument_spec
def process_request(self):
"""
Execute the main code path, and handle the request
:return: none
"""
changed = False
firewall_policy = None
location = self.module.params.get('location')
source_account_alias = self.module.params.get('source_account_alias')
destination_account_alias = self.module.params.get(
'destination_account_alias')
firewall_policy_id = self.module.params.get('firewall_policy_id')
ports = self.module.params.get('ports')
source = self.module.params.get('source')
destination = self.module.params.get('destination')
wait = self.module.params.get('wait')
state = self.module.params.get('state')
enabled = self.module.params.get('enabled')
self.firewall_dict = {
'location': location,
'source_account_alias': source_account_alias,
'destination_account_alias': destination_account_alias,
'firewall_policy_id': firewall_policy_id,
'ports': ports,
'source': source,
'destination': destination,
'wait': wait,
'state': state,
'enabled': enabled}
self._set_clc_credentials_from_env()
if state == 'absent':
changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent(
source_account_alias, location, self.firewall_dict)
elif state == 'present':
changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present(
source_account_alias, location, self.firewall_dict)
return self.module.exit_json(
changed=changed,
firewall_policy_id=firewall_policy_id,
firewall_policy=firewall_policy)
@staticmethod
def _get_policy_id_from_response(response):
"""
Method to parse out the policy id from creation response
:param response: response from firewall creation API call
:return: policy_id: firewall policy id from creation call
"""
url = response.get('links')[0]['href']
path = urlparse.urlparse(url).path
path_list = os.path.split(path)
policy_id = path_list[-1]
return policy_id
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _ensure_firewall_policy_is_present(
self,
source_account_alias,
location,
firewall_dict):
"""
Ensures that a given firewall policy is present
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_dict: dictionary of request parameters for firewall policy
:return: (changed, firewall_policy_id, firewall_policy)
changed: flag for if a change occurred
firewall_policy_id: the firewall policy id that was created/updated
firewall_policy: The firewall_policy object
"""
firewall_policy = None
firewall_policy_id = firewall_dict.get('firewall_policy_id')
if firewall_policy_id is None:
if not self.module.check_mode:
response = self._create_firewall_policy(
source_account_alias,
location,
firewall_dict)
firewall_policy_id = self._get_policy_id_from_response(
response)
changed = True
else:
firewall_policy = self._get_firewall_policy(
source_account_alias, location, firewall_policy_id)
if not firewall_policy:
return self.module.fail_json(
msg='Unable to find the firewall policy id : {0}'.format(
firewall_policy_id))
changed = self._compare_get_request_with_dict(
firewall_policy,
firewall_dict)
if not self.module.check_mode and changed:
self._update_firewall_policy(
source_account_alias,
location,
firewall_policy_id,
firewall_dict)
if changed and firewall_policy_id:
firewall_policy = self._wait_for_requests_to_complete(
source_account_alias,
location,
firewall_policy_id)
return changed, firewall_policy_id, firewall_policy
def _ensure_firewall_policy_is_absent(
self,
source_account_alias,
location,
firewall_dict):
"""
Ensures that a given firewall policy is removed if present
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_dict: firewall policy to delete
:return: (changed, firewall_policy_id, response)
changed: flag for if a change occurred
firewall_policy_id: the firewall policy id that was deleted
response: response from CLC API call
"""
changed = False
response = []
firewall_policy_id = firewall_dict.get('firewall_policy_id')
result = self._get_firewall_policy(
source_account_alias, location, firewall_policy_id)
if result:
if not self.module.check_mode:
response = self._delete_firewall_policy(
source_account_alias,
location,
firewall_policy_id)
changed = True
return changed, firewall_policy_id, response
def _create_firewall_policy(
self,
source_account_alias,
location,
firewall_dict):
"""
Creates the firewall policy for the given account alias
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_dict: dictionary of request parameters for firewall policy
:return: response from CLC API call
"""
payload = {
'destinationAccount': firewall_dict.get('destination_account_alias'),
'source': firewall_dict.get('source'),
'destination': firewall_dict.get('destination'),
'ports': firewall_dict.get('ports')}
try:
response = self.clc.v2.API.Call(
'POST', '/v2-experimental/firewallPolicies/%s/%s' %
(source_account_alias, location), payload)
except APIFailedResponse as e:
return self.module.fail_json(
msg="Unable to create firewall policy. %s" %
str(e.response_text))
return response
def _delete_firewall_policy(
self,
source_account_alias,
location,
firewall_policy_id):
"""
Deletes a given firewall policy for an account alias in a datacenter
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: firewall policy id to delete
:return: response: response from CLC API call
"""
try:
response = self.clc.v2.API.Call(
'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' %
(source_account_alias, location, firewall_policy_id))
except APIFailedResponse as e:
return self.module.fail_json(
msg="Unable to delete the firewall policy id : {0}. {1}".format(
firewall_policy_id, str(e.response_text)))
return response
def _update_firewall_policy(
self,
source_account_alias,
location,
firewall_policy_id,
firewall_dict):
"""
Updates a firewall policy for a given datacenter and account alias
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: firewall policy id to update
:param firewall_dict: dictionary of request parameters for firewall policy
:return: response: response from CLC API call
"""
try:
response = self.clc.v2.API.Call(
'PUT',
'/v2-experimental/firewallPolicies/%s/%s/%s' %
(source_account_alias,
location,
firewall_policy_id),
firewall_dict)
except APIFailedResponse as e:
return self.module.fail_json(
msg="Unable to update the firewall policy id : {0}. {1}".format(
firewall_policy_id, str(e.response_text)))
return response
@staticmethod
def _compare_get_request_with_dict(response, firewall_dict):
"""
Helper method to compare the json response for getting the firewall policy with the request parameters
:param response: response from the get method
:param firewall_dict: dictionary of request parameters for firewall policy
:return: changed: Boolean that returns true if there are differences between
the response parameters and the playbook parameters
"""
changed = False
response_dest_account_alias = response.get('destinationAccount')
response_enabled = response.get('enabled')
response_source = response.get('source')
response_dest = response.get('destination')
response_ports = response.get('ports')
request_dest_account_alias = firewall_dict.get(
'destination_account_alias')
request_enabled = firewall_dict.get('enabled')
if request_enabled is None:
request_enabled = True
request_source = firewall_dict.get('source')
request_dest = firewall_dict.get('destination')
request_ports = firewall_dict.get('ports')
if (
response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or (
response_enabled != request_enabled) or (
response_source and response_source != request_source) or (
response_dest and response_dest != request_dest) or (
response_ports and response_ports != request_ports):
changed = True
return changed
def _get_firewall_policy(
self,
source_account_alias,
location,
firewall_policy_id):
"""
Get back details for a particular firewall policy
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: id of the firewall policy to get
:return: response - The response from CLC API call
"""
response = None
try:
response = self.clc.v2.API.Call(
'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' %
(source_account_alias, location, firewall_policy_id))
except APIFailedResponse as e:
if e.response_status_code != 404:
self.module.fail_json(
msg="Unable to fetch the firewall policy with id : {0}. {1}".format(
firewall_policy_id, str(e.response_text)))
return response
def _wait_for_requests_to_complete(
self,
source_account_alias,
location,
firewall_policy_id,
wait_limit=50):
"""
Waits until the CLC requests are complete if the wait argument is True
:param source_account_alias: The source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: The firewall policy id
:param wait_limit: The number of times to check the status for completion
:return: the firewall_policy object
"""
wait = self.module.params.get('wait')
count = 0
firewall_policy = None
while wait:
count += 1
firewall_policy = self._get_firewall_policy(
source_account_alias, location, firewall_policy_id)
status = firewall_policy.get('status')
if status == 'active' or count > wait_limit:
wait = False
else:
# wait for 2 seconds
sleep(2)
return firewall_policy
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcFirewallPolicy._define_module_argument_spec(),
supports_check_mode=True)
clc_firewall = ClcFirewallPolicy(module)
clc_firewall.process_request()
from ansible.module_utils.basic import * # pylint: disable=W0614
if __name__ == '__main__':
main()
| gpl-3.0 |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/tools/swarming_client/utils/on_error.py | 9 | 7469 | # coding=utf-8
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
"""Declares a single function to report errors to a server.
By running the script, you accept that errors will be reported to the server you
connect to.
"""
import atexit
import getpass
import os
import platform
import re
import socket
import sys
import time
import traceback
from . import net
from . import tools
from . import zip_package
# It is very important to not get reports from non Chromium infrastructure. We
# *really* do not want to know anything about you, dear non Google employee.
_ENABLED_DOMAINS = (
'.chromium.org',
'.google.com',
'.google.com.internal',
)
# If this envar is '1' then disable reports. Useful when developing the client.
_DISABLE_ENVVAR = 'SWARMING_DISABLE_ON_ERROR'
# Set this variable to the net.HttpService server to be used to report errors.
# It must be done early at process startup. Once this value is set, it is
# considered that the atexit handler is enabled.
_SERVER = None
# This is tricky because it is looked at during import time. At atexit time,
# __file__ is not defined anymore so it has to be saved first. Also make it work
# when executed directly from a .zip file. Also handle interactive mode where
# this is always set to None.
_SOURCE = zip_package.get_main_script_path()
if _SOURCE:
_SOURCE = os.path.basename(_SOURCE)
_TIME_STARTED = time.time()
_HOSTNAME = None
# Paths that can be stripped from the stack traces by _relative_path().
_PATHS_TO_STRIP = (
os.getcwd() + os.path.sep,
os.path.dirname(os.__file__) + os.path.sep,
'.' + os.path.sep,
)
# Used to simplify the stack trace, by removing path information when possible.
_RE_STACK_TRACE_FILE = (
r'^(?P<prefix> File \")(?P<file>[^\"]+)(?P<suffix>\"\, line )'
r'(?P<line_no>\d+)(?P<rest>|\, in .+)$')
### Private stuff.
def _relative_path(path):
"""Strips the current working directory or common library prefix.
Used by Formatter.
"""
for i in _PATHS_TO_STRIP:
if path.startswith(i):
return path[len(i):]
return path
def _reformat_stack(stack):
"""Post processes the stack trace through _relative_path()."""
def replace(l):
m = re.match(_RE_STACK_TRACE_FILE, l, re.DOTALL)
if m:
groups = list(m.groups())
groups[1] = _relative_path(groups[1])
return ''.join(groups)
return l
# Trim paths.
out = map(replace, stack.splitlines(True))
# Trim indentation.
while all(l.startswith(' ') for l in out):
out = [l[1:] for l in out]
return ''.join(out)
def _format_exception(e):
"""Returns a human readable form of an exception.
Adds the maximum number of interesting information in the safest way."""
try:
out = repr(e)
except Exception:
out = ''
try:
out = str(e)
except Exception:
pass
return out
def _post(params):
"""Executes the HTTP Post to the server."""
if not _SERVER:
return None
return _SERVER.json_request(
'/ereporter2/api/v1/on_error', data=params, max_attempts=1, timeout=20)
def _serialize_env():
"""Makes os.environ json serializable.
It happens that the environment variable may have non-ASCII characters like
ANSI escape code.
"""
return dict(
(k, v.encode('ascii', 'replace')) for k, v in os.environ.iteritems())
def _report_exception(message, e, stack):
"""Sends the stack trace to the breakpad server."""
name = 'crash report' if e else 'report'
sys.stderr.write('Sending the %s ...' % name)
message = (message or '').rstrip()
if e:
if message:
message += '\n'
message += (_format_exception(e)).rstrip()
params = {
'args': sys.argv,
'cwd': os.getcwd(),
'duration': time.time() - _TIME_STARTED,
'env': _serialize_env(),
'hostname': _HOSTNAME,
'message': message,
'os': sys.platform,
'python_version': platform.python_version(),
'source': _SOURCE,
'user': getpass.getuser(),
}
if e:
params['category'] = 'exception'
params['exception_type'] = e.__class__.__name__
else:
params['category'] = 'report'
if stack:
params['stack'] = _reformat_stack(stack).rstrip()
if len(params['stack']) > 4096:
params['stack'] = params['stack'][:4095] + '…'
version = getattr(sys.modules['__main__'], '__version__', None)
if version:
params['version'] = version
data = {
'r': params,
# Bump the version when changing the packet format.
'v': '1',
}
response = _post(data)
if response and response.get('url'):
sys.stderr.write(' done.\nReport URL: %s\n' % response['url'])
else:
sys.stderr.write(' failed!\n')
sys.stderr.write(message + '\n')
def _check_for_exception_on_exit():
"""Runs at exit. Look if there was an exception active and report if so.
Since atexit() may not be called from the frame itself, use sys.last_value.
"""
# Sadly, sys.exc_info() cannot be used here, since atexit calls are called
# outside the exception handler.
exception = getattr(sys, 'last_value', None)
if not exception or isinstance(exception, KeyboardInterrupt):
return
last_tb = getattr(sys, 'last_traceback', None)
if not last_tb:
return
_report_exception(
'Process exited due to exception',
exception,
''.join(traceback.format_tb(last_tb)))
def _is_in_test():
"""Returns True if filename of __main__ module ends with _test.py(c)."""
main_file = os.path.basename(getattr(sys.modules['__main__'], '__file__', ''))
return os.path.splitext(main_file)[0].endswith('_test')
### Public API.
def report_on_exception_exit(server):
"""Registers the callback at exit to report an error if the process exits due
to an exception.
"""
global _HOSTNAME
global _SERVER
if _SERVER:
raise ValueError('on_error.report_on_exception_exit() was called twice')
if tools.get_bool_env_var(_DISABLE_ENVVAR):
return False
if _is_in_test():
# Disable when running inside unit tests process.
return False
if not server.startswith('https://'):
# Only allow report over HTTPS. Silently drop it.
return False
_HOSTNAME = socket.getfqdn()
if not _HOSTNAME.endswith(_ENABLED_DOMAINS):
# Silently skip non-google infrastructure. Technically, it reports to the
# server the client code is talking to so in practice, it would be safe for
# non googler to manually enable this assuming their client code talks to a
# server they also own. Please send a CL if you desire this functionality.
return False
_SERVER = net.get_http_service(server, allow_cached=False)
atexit.register(_check_for_exception_on_exit)
return True
def report(error):
"""Either reports an error to the server or prints a error to stderr.
It's indented to be used only for non recoverable unexpected errors that must
be monitored at server-level like API request failure. Is should NOT be used
for input validation, command line argument errors, etc.
Arguments:
error: error message string (possibly multiple lines) or None. If a
exception frame is active, it will be logged.
"""
exc_info = sys.exc_info()
if _SERVER:
_report_exception(
error, exc_info[1], ''.join(traceback.format_tb(exc_info[2])))
return
if error:
sys.stderr.write(error + '\n')
if exc_info[1]:
sys.stderr.write(_format_exception(exc_info[1]) + '\n')
| mit |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_07_01/operations/_virtual_network_gateways_operations.py | 1 | 131134 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkGatewaysOperations(object):
"""VirtualNetworkGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VirtualNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetworkGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VirtualNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkGateway"]
"""Creates or updates a virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to create or update virtual network gateway operation.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.VirtualNetworkGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.VirtualNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkGateway"
"""Gets the specified virtual network gateway by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.VirtualNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.VirtualNetworkGateway"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualNetworkGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkGateway"]
"""Updates a virtual network gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to update virtual network gateway tags.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.VirtualNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkGatewayListResult"]
"""Gets all virtual network gateways by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkGatewayListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_07_01.models.VirtualNetworkGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways'} # type: ignore
def list_connections(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkGatewayListConnectionsResult"]
"""Gets all the connections in a virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkGatewayListConnectionsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_07_01.models.VirtualNetworkGatewayListConnectionsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayListConnectionsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_connections.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGatewayListConnectionsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/connections'} # type: ignore
def _reset_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
gateway_vip=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Optional["_models.VirtualNetworkGateway"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualNetworkGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._reset_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if gateway_vip is not None:
query_parameters['gatewayVip'] = self._serialize.query("gateway_vip", gateway_vip, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reset_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'} # type: ignore
def begin_reset(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
gateway_vip=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkGateway"]
"""Resets the primary of the virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param gateway_vip: Virtual network gateway vip address supplied to the begin reset of the
active-active feature enabled gateway.
:type gateway_vip: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.VirtualNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
gateway_vip=gateway_vip,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'} # type: ignore
def _reset_vpn_client_shared_key_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._reset_vpn_client_shared_key_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_vpn_client_shared_key_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/resetvpnclientsharedkey'} # type: ignore
def begin_reset_vpn_client_shared_key(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Resets the VPN client shared key of the virtual network gateway in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_vpn_client_shared_key_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_vpn_client_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/resetvpnclientsharedkey'} # type: ignore
def _generatevpnclientpackage_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnClientParameters"
**kwargs # type: Any
):
# type: (...) -> Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._generatevpnclientpackage_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VpnClientParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_generatevpnclientpackage_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'} # type: ignore
def begin_generatevpnclientpackage(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnClientParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[str]
"""Generates VPN client package for P2S client of the virtual network gateway in the specified
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network gateway VPN client
package operation.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.VpnClientParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._generatevpnclientpackage_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_generatevpnclientpackage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'} # type: ignore
def _generate_vpn_profile_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnClientParameters"
**kwargs # type: Any
):
# type: (...) -> Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._generate_vpn_profile_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VpnClientParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_generate_vpn_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnprofile'} # type: ignore
def begin_generate_vpn_profile(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnClientParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[str]
"""Generates VPN profile for P2S client of the virtual network gateway in the specified resource
group. Used for IKEV2 and radius based authentication.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network gateway VPN client
package operation.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.VpnClientParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._generate_vpn_profile_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_generate_vpn_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnprofile'} # type: ignore
def _get_vpn_profile_package_url_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._get_vpn_profile_package_url_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_vpn_profile_package_url_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnprofilepackageurl'} # type: ignore
def begin_get_vpn_profile_package_url(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[str]
"""Gets pre-generated VPN profile for P2S client of the virtual network gateway in the specified
resource group. The profile needs to be generated first using generateVpnProfile.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_vpn_profile_package_url_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_vpn_profile_package_url.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnprofilepackageurl'} # type: ignore
def _get_bgp_peer_status_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
peer=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Optional["_models.BgpPeerStatusListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.BgpPeerStatusListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._get_bgp_peer_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if peer is not None:
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BgpPeerStatusListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_bgp_peer_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus'} # type: ignore
def begin_get_bgp_peer_status(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
peer=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.BgpPeerStatusListResult"]
"""The GetBgpPeerStatus operation retrieves the status of all BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer to retrieve the status of.
:type peer: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either BgpPeerStatusListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.BgpPeerStatusListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BgpPeerStatusListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_bgp_peer_status_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('BgpPeerStatusListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_bgp_peer_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus'} # type: ignore
def supported_vpn_devices(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> str
"""Gets a xml format representation for supported vpn devices.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.supported_vpn_devices.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
supported_vpn_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/supportedvpndevices'} # type: ignore
def _get_learned_routes_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.GatewayRouteListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GatewayRouteListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._get_learned_routes_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_learned_routes_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes'} # type: ignore
def begin_get_learned_routes(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.GatewayRouteListResult"]
"""This operation retrieves a list of routes the virtual network gateway has learned, including
routes learned from BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GatewayRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.GatewayRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GatewayRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_learned_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_learned_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes'} # type: ignore
def _get_advertised_routes_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
peer, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.GatewayRouteListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GatewayRouteListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._get_advertised_routes_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_advertised_routes_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes'} # type: ignore
def begin_get_advertised_routes(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
peer, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.GatewayRouteListResult"]
"""This operation retrieves a list of routes the virtual network gateway is advertising to the
specified peer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer.
:type peer: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GatewayRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.GatewayRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GatewayRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_advertised_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_advertised_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes'} # type: ignore
def _set_vpnclient_ipsec_parameters_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
vpnclient_ipsec_params, # type: "_models.VpnClientIPsecParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.VpnClientIPsecParameters"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VpnClientIPsecParameters"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._set_vpnclient_ipsec_parameters_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpnclient_ipsec_params, 'VpnClientIPsecParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnClientIPsecParameters', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_set_vpnclient_ipsec_parameters_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/setvpnclientipsecparameters'} # type: ignore
def begin_set_vpnclient_ipsec_parameters(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
vpnclient_ipsec_params, # type: "_models.VpnClientIPsecParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnClientIPsecParameters"]
"""The Set VpnclientIpsecParameters operation sets the vpnclient ipsec policy for P2S client of
virtual network gateway in the specified resource group through Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param vpnclient_ipsec_params: Parameters supplied to the Begin Set vpnclient ipsec parameters
of Virtual Network Gateway P2S client operation through Network resource provider.
:type vpnclient_ipsec_params: ~azure.mgmt.network.v2020_07_01.models.VpnClientIPsecParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnClientIPsecParameters or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.VpnClientIPsecParameters]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnClientIPsecParameters"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._set_vpnclient_ipsec_parameters_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
vpnclient_ipsec_params=vpnclient_ipsec_params,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnClientIPsecParameters', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_set_vpnclient_ipsec_parameters.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/setvpnclientipsecparameters'} # type: ignore
def _get_vpnclient_ipsec_parameters_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VpnClientIPsecParameters"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnClientIPsecParameters"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._get_vpnclient_ipsec_parameters_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnClientIPsecParameters', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_vpnclient_ipsec_parameters_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnclientipsecparameters'} # type: ignore
def begin_get_vpnclient_ipsec_parameters(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnClientIPsecParameters"]
"""The Get VpnclientIpsecParameters operation retrieves information about the vpnclient ipsec
policy for P2S client of virtual network gateway in the specified resource group through
Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The virtual network gateway name.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnClientIPsecParameters or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.VpnClientIPsecParameters]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnClientIPsecParameters"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_vpnclient_ipsec_parameters_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnClientIPsecParameters', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_vpnclient_ipsec_parameters.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnclientipsecparameters'} # type: ignore
def vpn_device_configuration_script(
self,
resource_group_name, # type: str
virtual_network_gateway_connection_name, # type: str
parameters, # type: "_models.VpnDeviceScriptParameters"
**kwargs # type: Any
):
# type: (...) -> str
"""Gets a xml format representation for vpn device configuration script.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the virtual network gateway
connection for which the configuration script is generated.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to the generate vpn device script operation.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.VpnDeviceScriptParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.vpn_device_configuration_script.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VpnDeviceScriptParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
vpn_device_configuration_script.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/vpndeviceconfigurationscript'} # type: ignore
def _start_packet_capture_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters=None, # type: Optional["_models.VpnPacketCaptureStartParameters"]
**kwargs # type: Any
):
# type: (...) -> Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._start_packet_capture_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'VpnPacketCaptureStartParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_start_packet_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/startPacketCapture'} # type: ignore
def begin_start_packet_capture(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters=None, # type: Optional["_models.VpnPacketCaptureStartParameters"]
**kwargs # type: Any
):
# type: (...) -> LROPoller[str]
"""Starts packet capture on virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Virtual network gateway packet capture parameters supplied to start packet
capture on gateway.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.VpnPacketCaptureStartParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_packet_capture_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_packet_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/startPacketCapture'} # type: ignore
def _stop_packet_capture_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnPacketCaptureStopParameters"
**kwargs # type: Any
):
# type: (...) -> Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._stop_packet_capture_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VpnPacketCaptureStopParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_stop_packet_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/stopPacketCapture'} # type: ignore
def begin_stop_packet_capture(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnPacketCaptureStopParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[str]
"""Stops packet capture on virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Virtual network gateway packet capture parameters supplied to stop packet
capture on gateway.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.VpnPacketCaptureStopParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_packet_capture_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop_packet_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/stopPacketCapture'} # type: ignore
def _get_vpnclient_connection_health_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.VpnClientConnectionHealthDetailListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VpnClientConnectionHealthDetailListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._get_vpnclient_connection_health_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnClientConnectionHealthDetailListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_vpnclient_connection_health_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getVpnClientConnectionHealth'} # type: ignore
def begin_get_vpnclient_connection_health(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnClientConnectionHealthDetailListResult"]
"""Get VPN client connection health detail per P2S client connection of the virtual network
gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnClientConnectionHealthDetailListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.VpnClientConnectionHealthDetailListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnClientConnectionHealthDetailListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_vpnclient_connection_health_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnClientConnectionHealthDetailListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_vpnclient_connection_health.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getVpnClientConnectionHealth'} # type: ignore
def _disconnect_virtual_network_gateway_vpn_connections_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
request, # type: "_models.P2SVpnConnectionRequest"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._disconnect_virtual_network_gateway_vpn_connections_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'P2SVpnConnectionRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_disconnect_virtual_network_gateway_vpn_connections_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/disconnectVirtualNetworkGatewayVpnConnections'} # type: ignore
def begin_disconnect_virtual_network_gateway_vpn_connections(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
request, # type: "_models.P2SVpnConnectionRequest"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Disconnect vpn connections of virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param request: The parameters are supplied to disconnect vpn connections.
:type request: ~azure.mgmt.network.v2020_07_01.models.P2SVpnConnectionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._disconnect_virtual_network_gateway_vpn_connections_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
request=request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_disconnect_virtual_network_gateway_vpn_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/disconnectVirtualNetworkGatewayVpnConnections'} # type: ignore
| mit |
wnt-zhp/hufce | django/contrib/gis/db/backends/spatialite/models.py | 403 | 1847 | """
The GeometryColumns and SpatialRefSys models for the SpatiaLite backend.
"""
from django.db import models
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
class GeometryColumns(models.Model):
"""
The 'geometry_columns' table from SpatiaLite.
"""
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
type = models.CharField(max_length=30)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
spatial_index_enabled = models.IntegerField()
class Meta:
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the
the feature table name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the
the feature geometry column.
"""
return 'f_geometry_column'
def __unicode__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from SpatiaLite.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
ref_sys_name = models.CharField(max_length=256)
proj4text = models.CharField(max_length=2048)
@property
def wkt(self):
from django.contrib.gis.gdal import SpatialReference
return SpatialReference(self.proj4text).wkt
class Meta:
db_table = 'spatial_ref_sys'
managed = False
| gpl-3.0 |
AlexHill/django | tests/field_deconstruction/tests.py | 7 | 11022 | import warnings
from django.test import TestCase
from django.db import models
class FieldDeconstructionTests(TestCase):
"""
Tests the deconstruct() method on all core fields.
"""
def test_name(self):
"""
Tests the outputting of the correct name if assigned one.
"""
# First try using a "normal" field
field = models.CharField(max_length=65)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, None)
field.set_attributes_from_name("is_awesome_test")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "is_awesome_test")
# Now try with a ForeignKey
field = models.ForeignKey("some_fake.ModelName")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, None)
field.set_attributes_from_name("author")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "author")
def test_auto_field(self):
field = models.AutoField(primary_key=True)
field.set_attributes_from_name("id")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.AutoField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"primary_key": True})
def test_big_integer_field(self):
field = models.BigIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BigIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_boolean_field(self):
field = models.BooleanField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.BooleanField(default=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"default": True})
def test_char_field(self):
field = models.CharField(max_length=65)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 65})
field = models.CharField(max_length=65, null=True, blank=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 65, "null": True, "blank": True})
def test_csi_field(self):
field = models.CommaSeparatedIntegerField(max_length=100)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CommaSeparatedIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 100})
def test_date_field(self):
field = models.DateField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.DateField(auto_now=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now": True})
def test_datetime_field(self):
field = models.DateTimeField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.DateTimeField(auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now_add": True})
def test_decimal_field(self):
field = models.DecimalField(max_digits=5, decimal_places=2)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DecimalField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 2})
def test_email_field(self):
field = models.EmailField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.EmailField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 75})
field = models.EmailField(max_length=255)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.EmailField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 255})
def test_file_field(self):
field = models.FileField(upload_to="foo/bar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FileField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/bar"})
def test_file_path_field(self):
field = models.FilePathField(match=".*\.txt$")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FilePathField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"match": ".*\.txt$"})
field = models.FilePathField(recursive=True, allow_folders=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FilePathField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"recursive": True, "allow_folders": True})
def test_float_field(self):
field = models.FloatField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FloatField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_foreign_key(self):
field = models.ForeignKey("auth.User")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User"})
field = models.ForeignKey("something.Else")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "something.Else"})
field = models.ForeignKey("auth.User", on_delete=models.SET_NULL)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User", "on_delete": models.SET_NULL})
def test_image_field(self):
field = models.ImageField(upload_to="foo/barness", width_field="width", height_field="height")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ImageField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/barness", "width_field": "width", "height_field": "height"})
def test_integer_field(self):
field = models.IntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.IntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_ip_address_field(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
field = models.IPAddressField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.IPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_generic_ip_address_field(self):
field = models.GenericIPAddressField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.GenericIPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.GenericIPAddressField(protocol="IPv6")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.GenericIPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"protocol": "IPv6"})
def test_many_to_many_field(self):
field = models.ManyToManyField("auth.User")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User"})
def test_null_boolean_field(self):
field = models.NullBooleanField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.NullBooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_positive_integer_field(self):
field = models.PositiveIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.PositiveIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_positive_small_integer_field(self):
field = models.PositiveSmallIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.PositiveSmallIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_slug_field(self):
field = models.SlugField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SlugField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.SlugField(db_index=False)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SlugField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"db_index": False})
def test_small_integer_field(self):
field = models.SmallIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SmallIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_text_field(self):
field = models.TextField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.TextField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_url_field(self):
field = models.URLField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.URLField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
| bsd-3-clause |
krinart/AutobahnPython | examples/asyncio/wamp/beginner/server.py | 6 | 2835 | ###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
import six
import datetime
import asyncio
from autobahn.wamp import router
from autobahn.asyncio import wamp, websocket
class MyBackendComponent(wamp.ApplicationSession):
"""
Application code goes here. This is an example component that provides
a simple procedure which can be called remotely from any WAMP peer.
It also publishes an event every second to some topic.
"""
def onConnect(self):
self.join(u"realm1")
@asyncio.coroutine
def onJoin(self, details):
## register a procedure for remote calling
##
def utcnow():
print("Someone is calling me;)")
now = datetime.datetime.utcnow()
return six.u(now.strftime("%Y-%m-%dT%H:%M:%SZ"))
reg = yield from self.register(utcnow, u'com.timeservice.now')
print("Registered procedure with ID {}".format(reg.id))
## publish events to a topic
##
counter = 0
while True:
self.publish(u'com.myapp.topic1', counter)
print("Published event.")
counter += 1
yield from asyncio.sleep(1)
if __name__ == '__main__':
## 1) create a WAMP router factory
router_factory = router.RouterFactory()
## 2) create a WAMP router session factory
session_factory = wamp.RouterSessionFactory(router_factory)
## 3) Optionally, add embedded WAMP application sessions to the router
session_factory.add(MyBackendComponent())
## 4) create a WAMP-over-WebSocket transport server factory
transport_factory = websocket.WampWebSocketServerFactory(session_factory,
debug = False,
debug_wamp = False)
## 5) start the server
loop = asyncio.get_event_loop()
coro = loop.create_server(transport_factory, '127.0.0.1', 8080)
server = loop.run_until_complete(coro)
try:
## 6) now enter the asyncio event loop
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.close()
| apache-2.0 |
kkk669/mxnet | python/mxnet/contrib/tensorboard.py | 34 | 3192 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""TensorBoard functions that can be used to log various status during epoch."""
from __future__ import absolute_import
import logging
class LogMetricsCallback(object):
"""Log metrics periodically in TensorBoard.
This callback works almost same as `callback.Speedometer`, but write TensorBoard event file
for visualization. For more usage, please refer https://github.com/dmlc/tensorboard
Parameters
----------
logging_dir : str
TensorBoard event file directory.
After that, use `tensorboard --logdir=path/to/logs` to launch TensorBoard visualization.
prefix : str
Prefix for a metric name of `scalar` value.
You might want to use this param to leverage TensorBoard plot feature,
where TensorBoard plots different curves in one graph when they have same `name`.
The follow example shows the usage(how to compare a train and eval metric in a same graph).
Examples
--------
>>> # log train and eval metrics under different directories.
>>> training_log = 'logs/train'
>>> evaluation_log = 'logs/eval'
>>> # in this case, each training and evaluation metric pairs has same name,
>>> # you can add a prefix to make it separate.
>>> batch_end_callbacks = [mx.contrib.tensorboard.LogMetricsCallback(training_log)]
>>> eval_end_callbacks = [mx.contrib.tensorboard.LogMetricsCallback(evaluation_log)]
>>> # run
>>> model.fit(train,
>>> ...
>>> batch_end_callback = batch_end_callbacks,
>>> eval_end_callback = eval_end_callbacks)
>>> # Then use `tensorboard --logdir=logs/` to launch TensorBoard visualization.
"""
def __init__(self, logging_dir, prefix=None):
self.prefix = prefix
try:
from tensorboard import SummaryWriter
self.summary_writer = SummaryWriter(logging_dir)
except ImportError:
logging.error('You can install tensorboard via `pip install tensorboard`.')
def __call__(self, param):
"""Callback to log training speed and metrics in TensorBoard."""
if param.eval_metric is None:
return
name_value = param.eval_metric.get_name_value()
for name, value in name_value:
if self.prefix is not None:
name = '%s-%s' % (self.prefix, name)
self.summary_writer.add_scalar(name, value)
| apache-2.0 |
eHealthAfrica/ureport | ureport/contacts/models.py | 2 | 9707 | from dash.orgs.models import Org
from django.core.cache import cache
from django.db import models, DataError
from django.db.models import Sum
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
# Create your models here.
import pytz
from ureport.locations.models import Boundary
from ureport.utils import json_date_to_datetime, datetime_to_json_date
class ContactField(models.Model):
"""
Corresponds to a RapidPro contact field
"""
CONTACT_FIELDS_CACHE_TIMEOUT = 60 * 60 * 24 * 15
CONTACT_FIELDS_CACHE_KEY = 'org:%d:contact_fields'
org = models.ForeignKey(Org, verbose_name=_("Org"), related_name="contactfields")
label = models.CharField(verbose_name=_("Label"), max_length=36)
key = models.CharField(verbose_name=_("Key"), max_length=36)
value_type = models.CharField(max_length=1, verbose_name="Field Type")
@classmethod
def update_or_create_from_temba(cls, org, temba_contact_field):
kwargs = cls.kwargs_from_temba(org, temba_contact_field)
existing = cls.objects.filter(org=org, key=kwargs['key'])
if existing:
existing.update(**kwargs)
return existing.first()
else:
return cls.objects.create(**kwargs)
@classmethod
def kwargs_from_temba(cls, org, temba_contact_field):
return dict(org=org, label=temba_contact_field.label, key=temba_contact_field.key,
value_type=temba_contact_field.value_type)
@classmethod
def fetch_contact_fields(cls, org):
temba_client = org.get_temba_client()
api_contact_fields = temba_client.get_fields()
seen_keys = []
for contact_field in api_contact_fields:
cls.update_or_create_from_temba(org, contact_field)
seen_keys.append(contact_field.key)
# remove any contact field that's no longer return on the API
cls.objects.filter(org=org).exclude(key__in=seen_keys).delete()
key = cls.CONTACT_FIELDS_CACHE_KEY % org.id
cache.set(key, seen_keys, cls.CONTACT_FIELDS_CACHE_TIMEOUT)
return seen_keys
@classmethod
def get_contact_fields(cls, org):
key = cls.CONTACT_FIELDS_CACHE_KEY % org.id
fields_keys = cache.get(key, None)
if fields_keys:
return fields_keys
fields_keys = cls.fetch_contact_fields(org)
return fields_keys
class Contact(models.Model):
"""
Corresponds to a RapidPro contact
"""
CONTACT_LAST_FETCHED_CACHE_KEY = 'last:fetch_contacts:%d'
CONTACT_LAST_FETCHED_CACHE_TIMEOUT = 60 * 60 * 24 * 30
MALE = 'M'
FEMALE = 'F'
GENDER_CHOICES = ((MALE, _("Male")), (FEMALE, _("Female")))
uuid = models.CharField(max_length=36, unique=True)
org = models.ForeignKey(Org, verbose_name=_("Organization"), related_name="contacts")
gender = models.CharField(max_length=1, verbose_name=_("Gender"), choices=GENDER_CHOICES, null=True, blank=True,
help_text=_("Gender of the contact"))
born = models.IntegerField(verbose_name=_("Born Field"), null=True, blank=True)
occupation = models.CharField(max_length=255, verbose_name=_("Occupation Field"), null=True, blank=True)
registered_on = models.DateTimeField(verbose_name=_("Registration Date"), null=True, blank=True)
state = models.CharField(max_length=255, verbose_name=_("State Field"), null=True)
district = models.CharField(max_length=255, verbose_name=_("District Field"), null=True)
@classmethod
def find_contact_field_key(cls, org, label):
contact_field = ContactField.objects.filter(org=org, label__iexact=label).first()
if contact_field:
return contact_field.key
@classmethod
def kwargs_from_temba(cls, org, temba_contact):
from ureport.utils import json_date_to_datetime
state = ''
district = ''
state_field = org.get_config('state_label')
if state_field:
if org.get_config('is_global'):
state_name = temba_contact.fields.get(cls.find_contact_field_key(org, state_field), None)
if state_name:
state = state_name
else:
state_name = temba_contact.fields.get(cls.find_contact_field_key(org, state_field), None)
state_boundary = Boundary.objects.filter(org=org, level=1, name__iexact=state_name).first()
if state_boundary:
state = state_boundary.osm_id
district_field = org.get_config('district_label')
if district_field:
district_name = temba_contact.fields.get(cls.find_contact_field_key(org, district_field), None)
district_boundary = Boundary.objects.filter(org=org, level=2, name__iexact=district_name,
parent=state_boundary).first()
if district_boundary:
district = district_boundary.osm_id
registered_on = None
registration_field = org.get_config('registration_label')
if registration_field:
registered_on = temba_contact.fields.get(cls.find_contact_field_key(org, registration_field), None)
if registered_on:
registered_on = json_date_to_datetime(registered_on)
occupation = ''
occupation_field = org.get_config('occupation_label')
if occupation_field:
occupation = temba_contact.fields.get(cls.find_contact_field_key(org, occupation_field), '')
if not occupation:
occupation = ''
born = 0
born_field = org.get_config('born_label')
if born_field:
try:
born = int(temba_contact.fields.get(cls.find_contact_field_key(org, born_field), 0))
except ValueError:
pass
except TypeError:
pass
gender = ''
gender_field = org.get_config('gender_label')
female_label = org.get_config('female_label')
male_label = org.get_config('male_label')
if gender_field:
gender = temba_contact.fields.get(cls.find_contact_field_key(org, gender_field), '')
if gender and gender.lower() == female_label.lower():
gender = cls.FEMALE
elif gender and gender.lower() == male_label.lower():
gender = cls.MALE
else:
gender = ''
return dict(org=org, uuid=temba_contact.uuid, gender=gender, born=born, occupation=occupation,
registered_on=registered_on, district=district, state=state)
@classmethod
def update_or_create_from_temba(cls, org, temba_contact):
from raven.contrib.django.raven_compat.models import client
kwargs = cls.kwargs_from_temba(org, temba_contact)
try:
existing = cls.objects.filter(org=org, uuid=kwargs['uuid'])
if existing:
existing.update(**kwargs)
return existing.first()
else:
return cls.objects.create(**kwargs)
except DataError as e: # pragma: no cover
client.captureException()
import traceback
traceback.print_exc()
@classmethod
def fetch_contacts(cls, org, after=None):
print "START== Fetching contacts for %s" % org.name
reporter_group = org.get_config('reporter_group')
temba_client = org.get_temba_client()
api_groups = temba_client.get_groups(name=reporter_group)
if not api_groups:
return
seen_uuids = []
group_uuid = None
for grp in api_groups:
if grp.name.lower() == reporter_group.lower():
group_uuid = grp.uuid
break
now = timezone.now().replace(tzinfo=pytz.utc)
before = now
if not after:
# consider the after year 2013
after = json_date_to_datetime("2013-01-01T00:00:00.000")
while before > after:
pager = temba_client.pager()
api_contacts = temba_client.get_contacts(before=before, after=after, pager=pager)
last_contact_index = len(api_contacts) - 1
for i, contact in enumerate(api_contacts):
if i == last_contact_index:
before = contact.modified_on
if group_uuid in contact.groups:
cls.update_or_create_from_temba(org, contact)
seen_uuids.append(contact.uuid)
if not pager.has_more():
cache.set(cls.CONTACT_LAST_FETCHED_CACHE_KEY % org.pk,
datetime_to_json_date(now.replace(tzinfo=pytz.utc)),
cls.CONTACT_LAST_FETCHED_CACHE_TIMEOUT)
break
return seen_uuids
class ReportersCounter(models.Model):
org = models.ForeignKey(Org, related_name='reporters_counters')
type = models.CharField(max_length=255)
count = models.IntegerField(default=0, help_text=_("Number of items with this counter"))
@classmethod
def get_counts(cls, org, types=None):
"""
Gets all reporters counts by counter type for the given org
"""
counters = cls.objects.filter(org=org)
if types:
counters = counters.filter(counter_type__in=types)
counter_counts = counters.values('type').order_by('type').annotate(count_sum=Sum('count'))
return {c['type']: c['count_sum'] for c in counter_counts}
class Meta:
index_together = ('org', 'type') | agpl-3.0 |
YSOVAR/YSOVAR | YSOVAR/autofuncs.py | 1 | 22958 | # -*- coding: utf-8 -*-
# Copyright (C) 2013 H.M.Guenther & K.Poppenhaeger. See Licence.rst for details.
'''Define and register some common functions for the autogeneration of columns.
Module level variables
----------------------
This module contains a dictionary of default reddening vectors.
The form of the vectors is a following:
redvec[0] : slope of reddening law in CMD
redvec[1] : reddening value in first band (the $\delta y$ in CMD)
The following command will list all implemented reddening vectors::
print YSOVAR.plot.redvecs
The reddening vector with the key '36_45' is used as default in most plots if
no specific reddening vector is specified.
Functions
----------
This module defines some commonly used functions (e.g. stetson) for the analysis
of our lightcurves. It registers all the functions defined here and a bunch
of simple numpy function with :mod:`registry`, so that they are availvalbe
for the autogeneration of lightcurves in :class:`atlas.YSOVAR_atlas` obejcts.
Specifically, this module collects function that do *not* depend on the
times of the observations (see :mod:`lightcuves` for that).
'''
import math
import numpy as np
import scipy.stats
import scipy.stats.mstats
import scipy.odr
from astropy.utils.compat.odict import OrderedDict
from .registry import register
#def redvec_36_45():
# ''' Rieke & Lebofsky 1985 (bibcode 1985ApJ...288..618R)
# I take the extinctions from the L and M band (3.5, 5.0).'''
# A36 = 0.058
# A45 = 0.023
# R36 = - A36/(A45 - A36)
# return np.array([R36, A36])
redvecs = {'36_45_rieke_Lebofsky85_vsAV': np.array([1.66, 0.058]),
'36_45_Flaherty07_vsAK':np.array([-0.632/(0.53-0.632),0.632]),
'36_45_Indebetouw05_vsAK': np.array([-0.56/(0.43-0.56), .56]),
'36_45': np.array([-0.56/(0.43-0.56), .56])
}
'''Dictionary of default reddening vectors
The form of the vectors is a following:
redvec[0] : slope of reddening law in CMD
redvec[1] : reddening value in first band (the delta_y in CMD)
'''
### simple function for one band ###
def mad(data):
'''Median absolute deviation.'''
return np.median(np.abs(data - np.median(data)))
def redchi2tomean(data, error):
'''reduced chi^2 to mean'''
return np.sum( (data - np.mean(data))**2/(error**2) )/(len(data)-1)
def delta(data):
'''width of distribution from 10% to 90%'''
return (scipy.stats.mstats.mquantiles(data, prob=0.9) - scipy.stats.mstats.mquantiles(data, prob=0.1))
def AMCM(data):
'''So-called M value from Ann-Marie Cody's 2014 paper. Light curve asymmetry across the median; specifically, average of top and bottom 10% minus median, divided by rms scatter.'''
return (np.mean([scipy.stats.mstats.mquantiles(data, prob=0.9), scipy.stats.mstats.mquantiles(data, prob=0.1)]) - np.median(data))/np.sqrt( ( (data - data.mean())**2).sum() / len(data) )
def wmean(data, error):
'''error weighted mean'''
return np.average(data, weights=1./error**2.)
def isnormal(data):
'p-value for a 2-sided chi^2 probability that the distribution is normal'
if len(data) >=20:
return scipy.stats.normaltest(data)[1]
else:
return np.nan
register(np.mean, n_bands = 1, error = False, time = False, force = True, default_colunits=['mag'],
default_coldescriptions=['mean magnitude'])
register(np.median, n_bands = 1, error = False, time = False, force = True, default_colunits=['mag'],
default_coldescriptions=['median magnitude'])
register(mad, n_bands = 1, error = False, time = False, force = True, default_colunits=['mag'])
register(delta, n_bands = 1, error = False, time = False, force = True, default_colunits=['mag'])
register(AMCM, n_bands = 1, error = False, time = False, force = True, default_colunits=['mag'])
register(len, n_bands = 1, error = False, time = False, name = 'n',
other_cols = OrderedDict([('n', int)]), force = True,
default_coldescriptions=['Number of datapoints'], default_colunits=['ct'])
register(np.min, n_bands = 1, error = False, time = False, name = 'min', force = True,
default_colunits=['mag'], default_coldescriptions=['minimum magnitude in lightcurve'])
register(np.max, n_bands = 1, error = False, time = False, name = 'max', force = True,
default_colunits=['mag'], default_coldescriptions=['maximum magnitude in lightcurve'])
register(np.std, n_bands = 1, time = False, error = False, name = 'stddev',
description = 'standard deviation calculated from non-biased variance',
kwargs = {'ddof': 1}, force = True, default_colunits=['mag'])
register(scipy.stats.skew, n_bands = 1, error = False, time = False,
description = 'biased (no correction for dof) skew', force = True, default_colunits=['mag'])
register(scipy.stats.kurtosis, n_bands = 1, error = False, time = False,
description = 'biased (no correction for dof) kurtosis', force = True, default_colunits=['mag'])
register(isnormal, n_bands = 1, error = False, time = False, force = True)
for func in [redchi2tomean, wmean]:
register(func, n_bands = 1, time = False, error = True, force = True)
### functions for two bands ###
def stetson(data1, data2, data1_error, data2_error):
'''Stetson index for a two-band lightcurve.
According to eqn (1) in Stetson 1996, PSAP, 108, 851.
This procedure uses on the matched lightcurves
(not frames with one band only) and assignes a weight (g_i) in
Stetson (1996) of 1 to each datapoint.
Parameters
----------
data1 : np.array
single lightcurve of band 1 in magnitudes
data2 : np.array
single lightcurve of band 2 in magnitudes
data1_error : np.array
error on data points of band 1 in magnitudes
data2_error : np.array
error on data points of band 2 in magnitudes
Returns
-------
stetson : float
Stetson value for the provided two-band lightcurve
'''
# number of datapoints:
N = float(len(data1))
if (len(data2) != N) or (len(data1_error) !=N) or (len(data2_error) !=N):
raise ValueError('All input arrays must have the same length')
if N > 1:
# weighted mean magnitudes in each passband:
wmean1 = wmean(data1, data1_error)
wmean2 = wmean(data2, data2_error)
# normalized residual from the weighted mean for each datapoint:
res_1 = (data1 - wmean1) / data1_error
res_2 = (data2 - wmean2) / data2_error
P_ik = res_1 * res_2
return np.sqrt(1./(N*(N-1))) * np.sum( np.sign(P_ik) * np.sqrt(np.abs(P_ik)) )
else:
return np.nan
register(stetson, n_bands = 2, error = True, time = False, force = True)
def cmd_slope_simple(data1, data2, data1_error, data2_error, redvec = redvecs['36_45']):
'''Slope of the data points in the color-magnitude diagram
This is just fitted with ordinary least squares, using the analytic formula.
This is then used as a first guess for an orthogonal least squares fit with simultaneous treatment of errors in x and y (see fit_twocolor_odr)
Parameters
----------
data1 : np.array
single lightcurve of band 1 in magnitudes
data2 : np.array
single lightcurve of band 2 in magnitudes
data1_error : np.array
error on data points of band 1 in magnitudes
data2_error : np.array
error on data points of band 2 in magnitudes
redvec : np.array with two elements
theoretical reddening vector for the two bands chosen
Returns
-------
m : float
slope of fit in color-magnitude diagram
b : float
axis intercept of fit
m2 : float
slope of the input theoretical reddening vector `redvec`
b2 : float
axis intercept of fit forcin the slope to `m2`
redchi2 : float
reduced chi^2 of fit of `[m,b]`
redchi2_2 : float
reduced chi^2 of fit of `b2`
'''
# number of datapoints:
N = float(len(data1))
if N < 3:
return np.nan
if (len(data2) != N) or (len(data1_error) !=N) or (len(data2_error) !=N):
raise ValueError('All input arrays must have the same length')
x = data1 - data2
y = data1
x_error = np.sqrt( data1_error**2 + data2_error**2 )
y_error = data1_error
# calculate the different sums:
sum_x = np.sum(x)
sum_y = np.sum(y)
sum_xx = np.sum(x**2)
sum_xy = np.sum(x*y)
# now get b and m from analytic formula:
m = (-sum_x*sum_y + N*sum_xy) / (N*sum_xx - sum_x*sum_x)
b = (-sum_x*sum_xy + sum_xx*sum_y) / (N*sum_xx - sum_x*sum_x)
# now calculate chisquared for this line:
redchi2 = np.sum( (y - (m*x+b))**2/ y_error**2)/(N-2)
# now fit theoretical reddening vector to data, for plotting purposes (i.e. just shifting it in y:)
m2 = redvec[0] # the sign is okay, because the y axis is inverted in the plots
b2 = 1/N * ( sum_y - m2 * sum_x )
redchi2_2 = np.sum( (y - (m2*x+b2))**2/y_error**2 )/(N-2)
return m,b,m2,b2,redchi2,redchi2_2
register(cmd_slope_simple, n_bands = 2, error = True, time = False, default_colnames = ['cmd_m_plain', 'cmd_b_plain', 'cmd_m_redvec', 'cmd_b_redvec'], name = 'cmdslopesimple', force = True)
def fit_twocolor_odr(band1, band2, band1_err, band2_err, outroot = None, n_bootstrap = None, xyswitch = False, p_guess = None, redvec = redvecs['36_45']):
'''Fits a straight line to a single CMD, using a weighted orthogonal least squares algorithm (ODR).
Parameters
----------
data1 : np.array
single lightcurve of band 1 in magnitudes
data2 : np.array
single lightcurve of band 2 in magnitudes
data1_error : np.array
error on data points of band 1 in magnitudes
data2_error : np.array
error on data points of band 2 in magnitudes
dataset : np.ndarray
data collection for one detected source
index : integer
the index of the dataset within the data structure
p_guess : tuple
initial fit parameters derived from fit_twocolor
outroot : string or None
dictionary where to save the plot, set to `None` for no plotting
n_bootstrap : integer or None
how many bootstrap trials, set to `None` for no bootstrapping
xyswitch : boolean
if the X and Y axis will be switched for the fit or not. This has nothing to do with bisector fitting! The fitting algorithm used here takes care of errors in x and y simultaneously; the xyswitch is only for taking care of pathological cases where a vertical fitted line would occur without coordinate switching.
redvec : np.array with two elements
theoretical reddening vector for the two bands chosen
Returns
-------
result : tuple
contains output = fit parameters, bootstrap_output = results from the bootstrap, bootstrap_raw = the actual bootstrapped data, alpha = the fitted slope angle, sd_alpha = the error on the fitted slope angle, x_spread = the spread of the data along the fitted line (0.5*(90th percentile - 10th percentile)))
'''
#define the fitting function (in this case a straight line)
def fitfunc(p, x):
return p[0]*x + p[1]
if p_guess is None:
p_guess = list(cmd_slope_simple(band1, band2, band1_err, band2_err))[0:2]
if ~np.isfinite(p_guess[0]): # pathological case
p_guess[0] = 0
if ~np.isfinite(p_guess[1]): # pathological case
p_guess[1] = np.mean(band1-band2)
# define what the x and y data is:
x_data = band1 - band2
y_data = band1
x_error = np.sqrt( band1_err**2 + band2_err**2 )
y_error = band1_err
if xyswitch:
y_data, x_data = (x_data, y_data)
y_error, x_error = (x_error, y_error)
# load data into ODR
data = scipy.odr.RealData(x=x_data, y=y_data, sx=x_error, sy=y_error)
# tell ODR what the fitting function is:
model = scipy.odr.Model(fitfunc)
# now do the fit:
fit = scipy.odr.ODR(data, model, p_guess, maxit=1000)
output = fit.run()
p = output.beta # the fitted function parameters
delta = output.delta # array of estimated errors in input variables
eps = output.eps # array of estimated errors in response variables
#print output.stopreason[0]
bootstrap_output = np.array([np.NaN, np.NaN, np.NaN, np.NaN])
bootstrap_raw = (np.NaN, np.NaN, np.NaN)
# calculate slope angle. This is vs. horizontal axis.
alpha = math.atan(output.beta[0])
# calculate error on slope angle by taking the mean difference of the angles derived from m+m_error and m-m_error.
alpha_plus = math.asin((output.beta[0]+output.sd_beta[0])/np.sqrt((output.beta[0]+output.sd_beta[0])**2 + 1**2))
alpha_minus = math.asin((output.beta[0]-output.sd_beta[0])/np.sqrt((output.beta[0]-output.sd_beta[0])**2 + 1**2))
sd_alpha = 0.5*( np.abs(alpha - alpha_plus) + np.abs(alpha - alpha_minus) )
# define the spread along the fitted line. Use 90th and 10th quantile.
# output.xplus and output.y are the x and y values of the projection of the original data onto the fit.
# okay, first transform coordinate system so that x axis is along fit. To do this, first shift everything by -p[1] (this is -b), then rotate by -alpha. New x and y coordinates are then:
#
# |x'| |cos(-alpha) -sin(-alpha)| | x |
# | | = | | | |
# |y'| |sin(-alpha) cos(-alpha)| |y-b|
#
x_new = math.cos(-alpha) * output.xplus - math.sin(-alpha)*(output.y - p[1])
y_new = math.sin(-alpha) * output.xplus + math.cos(-alpha)*(output.y - p[1])
# The y_new values are now essentially zero. (As they should.)
# Now sort x_new and get 90th and 10th quantile:
x_new.sort()
x_spread = scipy.stats.mstats.mquantiles(x_new, prob=0.9)[0] - scipy.stats.mstats.mquantiles(x_new, prob=0.1)[0]
#print x_spread
if outroot is not None:
# I got the following from a python script from http://www.physics.utoronto.ca/~phy326/python/odr_fit_to_data.py, I have to check this properly.
# This does a residual plot, and some bootstrapping if desired.
# error ellipses:
xstar = x_error*np.sqrt( ((y_error*delta)**2) / ( (y_error*delta)**2 + (x_error*eps)**2 ) )
ystar = y_error*np.sqrt( ((x_error*eps)**2) / ( (y_error*delta)**2 + (x_error*eps)**2 ) )
adjusted_err = np.sqrt(xstar**2 + ystar**2)
residual = np.sign(y_data - fitfunc(p,x_data))*np.sqrt(delta**2 + eps**2)
fig = plt.figure()
fit = fig.add_subplot(211)
fit.set_xticklabels( () )
plt.ylabel("[3.6]")
plt.title("Orthogonal Distance Regression Fit to Data")
# plot data as circles and model as line
x_model = np.arange(min(x_data),max(x_data),(max(x_data)-min(x_data))/1000.)
fit.plot(x_data,y_data,'ro', x_model, fitfunc(p,x_model))
fit.errorbar(x_data, y_data, xerr=x_error, yerr=y_error, fmt='r+')
fit.set_yscale('linear')
a = np.array([output.xplus,x_data]) # output.xplus: x-values of datapoints projected onto fit
b = np.array([output.y,y_data]) # output.y: y-values of datapoints projected onto fit
fit.plot(np.array([a[0][0],a[1][0]]), np.array([b[0][0],b[1][0]]), 'k-', label = 'Residuals')
print np.array([a[0][0],a[1][0]])
print np.array([b[0][0],b[1][0]])
for i in range(1,len(y_data)):
fit.plot(np.array([a[0][i],a[1][i]]), np.array([b[0][i],b[1][i]]),'k-')
fit.set_ylim([min(y_data)-0.05, max(y_data)+0.05])
fit.set_ylim(fit.get_ylim()[::-1])
fit.legend(loc='lower left')
# separate plot to show residuals
residuals = fig.add_subplot(212) # 3 rows, 1 column, subplot 2
residuals.errorbar(x=a[0][:],y=residual,yerr=adjusted_err, fmt="r+", label = "Residuals")
# make sure residual plot has same x axis as fit plot
residuals.set_xlim(fit.get_xlim())
residuals.set_ylim(residuals.get_ylim()[::-1])
# Draw a horizontal line at zero on residuals plot
plt.axhline(y=0, color='b')
# Label axes
plt.xlabel("[3.6] - [4.5]")
plt.ylabel("Residuals")
plt.savefig(outroot + str(index) + '_odrfit.eps')
if n_bootstrap is not None:
print 'bootstrapping...'
# take a random half of the data and do the fit (choosing without replacement, standard bootstrap). Do this a lot of times and construct a cumulative distribution function for the slope and the intercept of the fitted line.
# now what I actually want is the slope angle a, not m.
m = np.array([])
b = np.array([])
for i in np.arange(0, n_bootstrap):
indices = np.arange(0,len(x_data))
np.random.shuffle(indices)
ind = indices[0:len(x_data)/2] # dividing by integer on purpose.
dat = scipy.odr.RealData(x=x_data[ind], y=y_data[ind], sx=x_error[ind], sy=y_error[ind])
fit = scipy.odr.ODR(dat, model, p_guess, maxit=5000,job=10)
out = fit.run()
m = np.append(m, out.beta[0])
b = np.append(b, out.beta[1])
a = np.arctan(m) # in radian
# plot histograms for m and b:
plt.clf()
n_m, bins_m, patches_m = plt.hist(m, 100, normed=True )
plt.savefig('m_hist.eps')
plt.clf()
n_b, bins_b, patches_b = plt.hist(b, 100, normed=True)
plt.savefig('b_hist.eps')
plt.clf()
n_a, bins_a, patches_a = plt.hist(a, 100, normed=True)
plt.savefig('a_hist.eps')
plt.clf()
# get median and symmetric 68% interval for m, b and alpha:
m_median = np.median(m)
m_down = np.sort(m)[ int(round(0.16*len(m))) ]
m_up = np.sort(m)[ int(round(0.84*len(m))) ]
m_error = np.mean([abs(m_down-m_median), abs(m_up-m_median)])
#print (m_median, m_up, m_down, m_error)
b_median = np.median(b)
b_down = np.sort(b)[ int(round(0.16*len(b))) ]
b_up = np.sort(b)[ int(round(0.84*len(b))) ]
b_error = np.mean([abs(b_down-b_median), abs(b_up-b_median)])
#print (b_median, b_up, b_down, b_error)
a_median = np.median(a)
a_down = np.sort(a)[ int(round(0.16*len(a))) ]
a_up = np.sort(a)[ int(round(0.84*len(a))) ]
a_error = np.mean([abs(a_down-a_median), abs(a_up-a_median)])
#print (b_median, b_up, b_down, b_error)
bootstrap_output = np.array([m_median, m_error, b_median, b_error, a_median, a_error])
bootstrap_raw = (m, b, a)
result = (output, bootstrap_output, bootstrap_raw, alpha, sd_alpha, x_spread)
return result
def cmdslope_odr(band1, band2, band1_err, band2_err, p_guess = None, redvec = redvecs['36_45']):
'''Fits a straight line to a single CMD, using a weighted orthogonal least squares algorithm (ODR).
Parameters
----------
data1 : np.array
single lightcurve of band 1 in magnitudes
data2 : np.array
single lightcurve of band 2 in magnitudes
data1_error : np.array
error on data points of band 1 in magnitudes
data2_error : np.array
error on data points of band 2 in magnitudes
p_guess : tuple
initial fit parameters derived from fit_twocolor
redvec : np.array with two elements
theoretical reddening vector for the two bands chosen
Returns
-------
result : tuple
contains output = fit parameters, bootstrap_output = results from the bootstrap, bootstrap_raw = the actual bootstrapped data, alpha = the fitted slope angle, sd_alpha = the error on the fitted slope angle, x_spread = the spread of the data along the fitted line (0.5*(90th percentile - 10th percentile)))
'''
if len(band1) < 10:
return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, ''
if p_guess is None:
p_guess = cmd_slope_simple(band1, band2, band1_err, band2_err, redvec = redvec)
(fit_output2, bootstrap_output2, bootstrap_raw2, alpha2, alpha_error2, spread2) = fit_twocolor_odr(band1, band2, band1_err, band2_err, xyswitch = True, p_guess = p_guess, redvec = redvec)
(fit_output, bootstrap_output, bootstrap_raw, alpha, alpha_error, spread) = fit_twocolor_odr(band1, band2, band1_err, band2_err, xyswitch = False, p_guess = p_guess, redvec = redvec)
# Checks if the ODR fit with switched X and Y axes yields a more
# constrained fit than the original axes. This basically catches the
# pathological cases with a (nearly) vertical fit with large nominal errors.
if alpha_error/alpha > alpha_error2/alpha2:
alpha, alpha_error = (alpha2, alpha_error2)
cmd_m = 1./fit_output2.beta[0]
cmd_b = -fit_output2.beta[1] / fit_output2.beta[0]
cmd_m_error = fit_output2.sd_beta[0] / cmd_m**2
cmd_b_error = np.sqrt((fit_output2.sd_beta[1]/cmd_m)**2 +
(cmd_b**2*cmd_m_error**2)**2)
spread = spread2
else:
cmd_m = fit_output.beta[0]
cmd_b = fit_output.beta[1]
cmd_m_error = fit_output.sd_beta[0]
cmd_b_error = fit_output.sd_beta[1]
# Make new alpha to avoid confusion in case of x/y switch
alpha = math.atan(cmd_m)
'''crude classification of CMD slope
This is some crude classification of the cmd slope.
anything that goes up and has a relative slope error of <40% is
"accretion-dominated", anything that is within some cone around
the theoretical reddening and has error <40% is "extinction-dominated",
anything else is "other".
If slope is classified as extinction, the spread in the CMD is converted
to AV and stored.
'''
# angle of standard reddening
alpha_red = math.atan(redvec[0])
cmd_dominated = 'bad'
AV = np.nan
if alpha_error/alpha <=0.4:
cmd_dominated = 'other'
if np.abs(alpha - alpha_red) < 0.3:
cmd_dominated = 'extinc.'
AV = spread/redvec[1]
if alpha < 0.:
cmd_dominated = 'accr.'
return alpha, alpha_error, cmd_m, cmd_b, cmd_m_error, cmd_b_error, AV, cmd_dominated, spread
register(cmdslope_odr, n_bands= 2, error = True, time = False, default_colnames = ['cmd_alpha', 'cmd_alpha_error', 'cmd_m', 'cmd_b', 'cmd_m_error', 'cmd_b_error', 'AV'], other_cols = OrderedDict([['cmd_dominated', 'S10'], ['CMD_length', 'float']]), name = 'cmdslopeodr', force = True, default_colunits=['rad','rad',None, None, None, None, 'mag',None, None, 'mag'], default_coldescriptions=['angle of best-fit line in CMD', 'uncertainty on angle', 'slope in CMD', 'offset of best-fits line', 'uncertainty on slope', 'uncertainty on angle', 'length of reddening vector', 'classification of slope in CMD', '90% spread in slope in CMD'])
| gpl-3.0 |
shsingh/ansible | lib/ansible/modules/storage/netapp/netapp_e_syslog.py | 11 | 10536 | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_syslog
short_description: NetApp E-Series manage syslog settings
description:
- Allow the syslog settings to be configured for an individual E-Series storage-system
version_added: '2.7'
author: Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- netapp.eseries
options:
state:
description:
- Add or remove the syslog server configuration for E-Series storage array.
- Existing syslog server configuration will be removed or updated when its address matches I(address).
- Fully qualified hostname that resolve to an IPv4 address that matches I(address) will not be
treated as a match.
choices:
- present
- absent
default: present
address:
description:
- The syslog server's IPv4 address or a fully qualified hostname.
- All existing syslog configurations will be removed when I(state=absent) and I(address=None).
port:
description:
- This is the port the syslog server is using.
default: 514
protocol:
description:
- This is the transmission protocol the syslog server's using to receive syslog messages.
choices:
- udp
- tcp
- tls
default: udp
components:
description:
- The e-series logging components define the specific logs to transfer to the syslog server.
- At the time of writing, 'auditLog' is the only logging component but more may become available.
default: ["auditLog"]
test:
description:
- This forces a test syslog message to be sent to the stated syslog server.
- Only attempts transmission when I(state=present).
type: bool
default: no
log_path:
description:
- This argument specifies a local path for logging purposes.
required: no
notes:
- Check mode is supported.
- This API is currently only supported with the Embedded Web Services API v2.12 (bundled with
SANtricity OS 11.40.2) and higher.
"""
EXAMPLES = """
- name: Add two syslog server configurations to NetApp E-Series storage array.
netapp_e_syslog:
state: present
address: "{{ item }}"
port: 514
protocol: tcp
component: "auditLog"
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
loop:
- "192.168.1.1"
- "192.168.1.100"
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The settings have been updated.
syslog:
description:
- True if syslog server configuration has been added to e-series storage array.
returned: on success
sample: True
type: bool
"""
import json
import logging
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class Syslog(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(choices=["present", "absent"], required=False, default="present"),
address=dict(type="str", required=False),
port=dict(type="int", default=514, required=False),
protocol=dict(choices=["tcp", "tls", "udp"], default="udp", required=False),
components=dict(type="list", required=False, default=["auditLog"]),
test=dict(type="bool", default=False, required=False),
log_path=dict(type="str", required=False),
))
required_if = [
["state", "present", ["address", "port", "protocol", "components"]],
]
mutually_exclusive = [
["test", "absent"],
]
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if,
mutually_exclusive=mutually_exclusive)
args = self.module.params
self.syslog = args["state"] in ["present"]
self.address = args["address"]
self.port = args["port"]
self.protocol = args["protocol"]
self.components = args["components"]
self.test = args["test"]
self.ssid = args["ssid"]
self.url = args["api_url"]
self.creds = dict(url_password=args["api_password"],
validate_certs=args["validate_certs"],
url_username=args["api_username"], )
self.components.sort()
self.check_mode = self.module.check_mode
# logging setup
log_path = args["log_path"]
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
def get_configuration(self):
"""Retrieve existing syslog configuration."""
try:
(rc, result) = request(self.url + "storage-systems/{0}/syslog".format(self.ssid),
headers=HEADERS, **self.creds)
return result
except Exception as err:
self.module.fail_json(msg="Failed to retrieve syslog configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def test_configuration(self, body):
"""Send test syslog message to the storage array.
Allows fix number of retries to occur before failure is issued to give the storage array time to create
new syslog server record.
"""
try:
(rc, result) = request(self.url + "storage-systems/{0}/syslog/{1}/test".format(self.ssid, body["id"]),
method='POST', headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="We failed to send test message! Array Id [{0}]. Error [{1}].".format(self.ssid, to_native(err)))
def update_configuration(self):
"""Post the syslog request to array."""
config_match = None
perfect_match = None
update = False
body = dict()
# search existing configuration for syslog server entry match
configs = self.get_configuration()
if self.address:
for config in configs:
if config["serverAddress"] == self.address:
config_match = config
if (config["port"] == self.port and config["protocol"] == self.protocol and
len(config["components"]) == len(self.components) and
all([component["type"] in self.components for component in config["components"]])):
perfect_match = config_match
break
# generate body for the http request
if self.syslog:
if not perfect_match:
update = True
if config_match:
body.update(dict(id=config_match["id"]))
components = [dict(type=component_type) for component_type in self.components]
body.update(dict(serverAddress=self.address, port=self.port,
protocol=self.protocol, components=components))
self._logger.info(body)
self.make_configuration_request(body)
# remove specific syslog server configuration
elif self.address:
update = True
body.update(dict(id=config_match["id"]))
self._logger.info(body)
self.make_configuration_request(body)
# if no address is specified, remove all syslog server configurations
elif configs:
update = True
for config in configs:
body.update(dict(id=config["id"]))
self._logger.info(body)
self.make_configuration_request(body)
return update
def make_configuration_request(self, body):
# make http request(s)
if not self.check_mode:
try:
if self.syslog:
if "id" in body:
(rc, result) = request(
self.url + "storage-systems/{0}/syslog/{1}".format(self.ssid, body["id"]),
method='POST', data=json.dumps(body), headers=HEADERS, **self.creds)
else:
(rc, result) = request(self.url + "storage-systems/{0}/syslog".format(self.ssid),
method='POST', data=json.dumps(body), headers=HEADERS, **self.creds)
body.update(result)
# send syslog test message
if self.test:
self.test_configuration(body)
elif "id" in body:
(rc, result) = request(self.url + "storage-systems/{0}/syslog/{1}".format(self.ssid, body["id"]),
method='DELETE', headers=HEADERS, **self.creds)
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(msg="We failed to modify syslog configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update(self):
"""Update configuration and respond to ansible."""
update = self.update_configuration()
self.module.exit_json(msg="The syslog settings have been updated.", changed=update)
def __call__(self, *args, **kwargs):
self.update()
def main():
settings = Syslog()
settings()
if __name__ == "__main__":
main()
| gpl-3.0 |
gooddata/openstack-nova | nova/virt/zvm/hypervisor.py | 2 | 6380 | # Copyright 2017,2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pwd
from oslo_log import log as logging
from nova.compute import power_state as compute_power_state
from nova import conf
from nova import exception
from nova.virt.zvm import utils as zvmutils
LOG = logging.getLogger(__name__)
CONF = conf.CONF
class Hypervisor(object):
"""z/VM implementation of Hypervisor."""
def __init__(self, zcc_url, ca_file=None):
super(Hypervisor, self).__init__()
self._reqh = zvmutils.ConnectorClient(zcc_url,
ca_file=ca_file)
host_info = self._get_host_info()
# Very very unlikely the hostname will be changed, so when create
# hypervisor object, store the information in the cache and after
# that we can use it directly without query again from connectorclient
self._hypervisor_hostname = host_info['hypervisor_hostname']
self._rhost = ''.join([pwd.getpwuid(os.geteuid()).pw_name, '@',
CONF.my_ip])
def _get_host_info(self):
host_stats = {}
try:
host_stats = self._reqh.call('host_get_info')
except exception.ZVMConnectorError as e:
LOG.warning("Failed to get host stats: %s", e)
return host_stats
def get_available_resource(self):
return self._get_host_info()
def get_available_nodes(self, refresh=False):
# It's not expected that the hostname change, no need to take
# 'refresh' into account.
return [self._hypervisor_hostname]
def list_names(self):
"""list names of the servers in the hypervisor"""
return self._reqh.call('guest_list')
def get_host_uptime(self):
host_info = self._get_host_info()
return host_info['ipl_time']
def guest_exists(self, instance):
return instance.name.upper() in self.list_names()
def guest_get_power_state(self, name):
power_state = compute_power_state.NOSTATE
try:
power_state = self._reqh.call('guest_get_power_state', name)
except exception.ZVMConnectorError as err:
if err.overallRC == 404:
# instance does not exist
LOG.warning("Failed to get power state due to nonexistent "
"instance: %s", name)
raise exception.InstanceNotFound(instance_id=name)
else:
raise
return power_state
def guest_create(self, name, vcpus, memory_mb, disk_list):
self._reqh.call('guest_create', name, vcpus, memory_mb,
disk_list=disk_list)
def guest_deploy(self, name, image_name, transportfiles):
self._reqh.call('guest_deploy', name, image_name,
transportfiles=transportfiles, remotehost=self._rhost)
def guest_delete(self, name):
self._reqh.call('guest_delete', name)
def guest_start(self, name):
self._reqh.call('guest_start', name)
def guest_create_network_interface(self, name, distro, nets):
self._reqh.call('guest_create_network_interface',
name, distro, nets)
def guest_get_definition_info(self, name):
"""Get user direct info
:returns: User direct is server definition, it will be
returned in a string format
"""
return self._reqh.call('guest_get_definition_info', name)
def guest_get_nic_vswitch_info(self, name):
"""Get the nic and vswitch info
:returns: Return the nic and vswitch info in dict
"""
return self._reqh.call('guest_get_nic_vswitch_info', name)
def guest_config_minidisks(self, name, disk_list):
self._reqh.call('guest_config_minidisks', name, disk_list)
def guest_capture(self, name, image_id):
self._reqh.call('guest_capture', name, image_id)
def guest_softstop(self, name, timeout=0, retry_interval=0):
self._reqh.call('guest_softstop', name, timeout=timeout,
poll_interval=retry_interval)
def guest_pause(self, name):
self._reqh.call('guest_pause', name)
def guest_unpause(self, name):
self._reqh.call('guest_unpause', name)
def guest_reboot(self, name):
self._reqh.call('guest_reboot', name)
def guest_reset(self, name):
self._reqh.call('guest_reset', name)
def guest_get_console_output(self, name):
"""get console out put of the given instance
:returns: The output of the console of the instace, in string format.
"""
return self._reqh.call('guest_get_console_output', name)
def image_query(self, imagename):
"""Check whether image is there or not
:returns: Query the image and returns a dict of the image info
if the image exists or return {}
"""
return self._reqh.call('image_query', imagename=imagename)
def image_get_root_disk_size(self, imagename):
"""Get the root disk size of image
:returns: return the size (in string) about the root disk of image
"""
return self._reqh.call('image_get_root_disk_size', imagename)
def image_import(self, image_href, image_url, image_meta):
self._reqh.call('image_import', image_href, image_url,
image_meta, remote_host=self._rhost)
def image_export(self, image_id, dest_path):
"""export image to a given place
:returns: a dict which represent the exported image information.
"""
resp = self._reqh.call('image_export', image_id,
dest_path, remote_host=self._rhost)
return resp
def image_delete(self, image_id):
self._reqh.call('image_delete', image_id)
| apache-2.0 |
robertobarreda/django-safe-browsing | safebrowsing/vendors/google/expression.py | 3 | 11623 | #!/usr/bin/env python2.5
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper classes which help converting a url to a list of SB expressions."""
import array
import logging
import re
import string
import urllib
import urlparse
import util
class UrlParseError(Exception):
pass
def GenerateSafeChars():
"""
Return a string containing all 'safe' characters that shouldn't be escaped
for url encoding. This includes all printable characters except '#%' and
whitespace characters.
"""
unfiltered_chars = string.digits + string.ascii_letters + string.punctuation
filtered_list = [c for c in unfiltered_chars if c not in '%#']
return array.array('c', filtered_list).tostring()
class ExpressionGenerator(object):
"""Class does the conversion url -> list of SafeBrowsing expressions.
This class converts a given url into the list of all SafeBrowsing host-suffix,
path-prefix expressions for that url. These are expressions that are on the
SafeBrowsing lists.
"""
HEX = re.compile(r'^0x([a-fA-F0-9]+)$')
OCT = re.compile(r'^0([0-7]+)$')
DEC = re.compile(r'^(\d+)$')
IP_WITH_TRAILING_SPACE = re.compile(r'^(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) ')
POSSIBLE_IP = re.compile(r'^(?i)((?:0x[0-9a-f]+|[0-9\\.])+)$')
FIND_BAD_OCTAL_REGEXP = re.compile(r'(^|\.)0\d*[89]')
# This regular expression parses the host and port from a hostname. Note: any
# user and password are removed from the hostname.
HOST_PORT_REGEXP = re.compile(r'^(?:.*@)?(?P<host>[^:]*)(:(?P<port>\d+))?$')
SAFE_CHARS = GenerateSafeChars()
# Dict that maps supported schemes to their default port number.
DEFAULT_PORTS = {'http': '80', 'https': '443', 'ftp': '21'}
def __init__(self, url):
parse_exception = UrlParseError('failed to parse URL "%s"' % (url,))
canonical_url = ExpressionGenerator.CanonicalizeUrl(url)
if not canonical_url:
raise parse_exception
# Each element is a list of host components used to build expressions.
self._host_lists = []
# A list of paths used to build expressions.
self._path_exprs = []
url_split = urlparse.urlsplit(canonical_url)
canonical_host, canonical_path = url_split[1], url_split[2]
self._MakeHostLists(canonical_host, parse_exception)
if url_split[3]:
# Include canonicalized path with query arguments
self._path_exprs.append(canonical_path + '?' + url_split[3])
self._path_exprs.append(canonical_path)
# Get the first three directory path components and create the 4 path
# expressions starting at the root (/) and successively appending directory
# path components, including the trailing slash. E.g.:
# /a/b/c/d.html -> [/, /a/, /a/b/, /a/b/c/]
path_parts = canonical_path.rstrip('/').lstrip('/').split('/')[:3]
if canonical_path.count('/') < 4:
# If the last component in not a directory we remove it.
path_parts.pop()
while path_parts:
self._path_exprs.append('/' + '/'.join(path_parts) + '/')
path_parts.pop()
if canonical_path != '/':
self._path_exprs.append('/')
@staticmethod
def CanonicalizeUrl(url):
"""Canonicalize the given URL for the SafeBrowsing protocol.
Args:
url: URL to canonicalize.
Returns:
A canonical URL or None if the URL could not be canonicalized.
"""
# Start by stripping off the fragment identifier.
tmp_pos = url.find('#')
if tmp_pos >= 0:
url = url[0:tmp_pos]
# Stripping off leading and trailing white spaces.
url = url.lstrip().rstrip()
# Remove any embedded tabs and CR/LF characters which aren't escaped.
url = url.replace('\t', '').replace('\r', '').replace('\n', '')
# Un-escape and re-escpae the URL just in case there are some encoded
# characters in the url scheme for example.
url = ExpressionGenerator._Escape(url)
url_split = urlparse.urlsplit(url)
if not url_split[0]:
# URL had no scheme. In this case we assume it is http://.
url = 'http://' + url
url_split = urlparse.urlsplit(url)
url_scheme = url_split[0].lower()
if url_scheme not in ExpressionGenerator.DEFAULT_PORTS:
return None # Unsupported scheme.
# Note: applying HOST_PORT_REGEXP also removes any user and password.
m = ExpressionGenerator.HOST_PORT_REGEXP.match(url_split[1])
if not m:
return None
host, port = m.group('host'), m.group('port')
canonical_host = ExpressionGenerator.CanonicalizeHost(host)
if not canonical_host:
return None
# Now that the host is canonicalized we add the port back if it's not the
# default port for that url scheme.
if port and port != ExpressionGenerator.DEFAULT_PORTS[url_scheme]:
canonical_host += ':' + port
canonical_path = ExpressionGenerator.CanonicalizePath(url_split[2])
# If the URL ends with ? we want to keep the ?.
canonical_url = url_split[0] + '://' + canonical_host + canonical_path
if url_split[3] != '' or url.endswith('?'):
canonical_url += '?' + url_split[3]
return canonical_url
@staticmethod
def CanonicalizePath(path):
"""Canonicalize the given path."""
if not path:
return '/'
# There are some cases where the path will not start with '/'. Example:
# "ftp://host.com?q" -- the hostname is 'host.com' and the path '%3Fq'.
# Browsers typically do prepend a leading slash to the path in this case,
# we'll do the same.
if path[0] != '/':
path = '/' + path
path = ExpressionGenerator._Escape(path)
path_components = []
for path_component in path.split('/'):
# If the path component is '..' we skip it and remove the preceding path
# component if there are any.
if path_component == '..':
if len(path_components) > 0:
path_components.pop()
# We skip empty path components to remove successive slashes (i.e.,
# // -> /). Note: this means that the leading and trailing slash will
# also be removed and need to be re-added afterwards.
#
# If the path component is '.' we also skip it (i.e., /./ -> /).
elif path_component != '.' and path_component != '':
path_components.append(path_component)
# Put the path components back together and re-add the leading slash which
# got stipped by removing empty path components.
canonical_path = '/' + '/'.join(path_components)
# If necessary we also re-add the trailing slash.
if path.endswith('/') and not canonical_path.endswith('/'):
canonical_path += '/'
return canonical_path
@staticmethod
def CanonicalizeHost(host):
"""Canonicalize the given host. Returns None in case of an error."""
if not host:
return None
host = ExpressionGenerator._Escape(host.lower())
ip = ExpressionGenerator.CanonicalizeIp(host)
if ip:
# Host is an IP address.
host = ip
else:
# Host is a normal hostname.
# Skip trailing, leading and consecutive dots.
host_split = [part for part in host.split('.') if part]
if len(host_split) < 2:
return None
host = '.'.join(host_split)
return host
@staticmethod
def CanonicalizeIp(host):
"""
Return a canonicalized IP if host can represent an IP and None otherwise.
"""
if len(host) <= 15:
# The Windows resolver allows a 4-part dotted decimal IP address to have a
# space followed by any old rubbish, so long as the total length of the
# string doesn't get above 15 characters. So, "10.192.95.89 xy" is
# resolved to 10.192.95.89.
# If the string length is greater than 15 characters,
# e.g. "10.192.95.89 xy.wildcard.example.com", it will be resolved through
# DNS.
m = ExpressionGenerator.IP_WITH_TRAILING_SPACE.match(host)
if m:
host = m.group(1)
if not ExpressionGenerator.POSSIBLE_IP.match(host):
return None
# Basically we should parse octal if we can, but if there are illegal octal
# numbers, i.e. 08 or 09, then we should just look at decimal and hex.
allow_octal = not ExpressionGenerator.FIND_BAD_OCTAL_REGEXP.search(host)
# Skip trailing, leading and consecutive dots.
host_split = [part for part in host.split('.') if part]
if len(host_split) > 4:
return None
ip = []
for i in xrange(len(host_split)):
m = ExpressionGenerator.HEX.match(host_split[i])
if m:
base = 16
else:
m = ExpressionGenerator.OCT.match(host_split[i])
if m and allow_octal:
base = 8
else:
m = ExpressionGenerator.DEC.match(host_split[i])
if m:
base = 10
else:
return None
n = long(m.group(1), base)
if n > 255:
if i < len(host_split) - 1:
n &= 0xff
ip.append(n)
else:
bytes = []
shift = 0
while n > 0 and len(bytes) < 4:
bytes.append(n & 0xff)
n >>= 8
if len(ip) + len(bytes) > 4:
return None
bytes.reverse()
ip.extend(bytes)
else:
ip.append(n)
while len(ip) < 4:
ip.append(0)
return '%u.%u.%u.%u' % tuple(ip)
def Expressions(self):
"""
A generator of the possible expressions.
"""
for host_parts in self._host_lists:
host = '.'.join(host_parts)
for p in self._path_exprs:
yield Expression(host, p)
@staticmethod
def _Escape(unescaped_str):
"""Fully unescape the given string, then re-escape once.
Args:
unescaped_str: string that should be escaped.
Returns:
Escaped string according to the SafeBrowsing protocol.
"""
unquoted = urllib.unquote(unescaped_str)
while unquoted != unescaped_str:
unescaped_str = unquoted
unquoted = urllib.unquote(unquoted)
return urllib.quote(unquoted, ExpressionGenerator.SAFE_CHARS)
def _MakeHostLists(self, host, parse_exception):
"""
Canonicalize host and build self._host_lists.
"""
ip = ExpressionGenerator.CanonicalizeIp(host)
if ip is not None:
# Is an IP.
self._host_lists.append([ip])
return
# Is a hostname.
# Skip trailing, leading and consecutive dots.
host_split = [part for part in host.split('.') if part]
if len(host_split) < 2:
raise parse_exception
start = len(host_split) - 5
stop = len(host_split) - 1
if start <= 0:
start = 1
self._host_lists.append(host_split)
for i in xrange(start, stop):
self._host_lists.append(host_split[i:])
class Expression(object):
"""Class which represents a host-suffix, path-prefix expression."""
def __init__(self, host, path):
self._host = host
self._path = path
self._value = host + path
self._hash_value = util.GetHash256(self._value)
def __str__(self):
return self.Value()
def __repr__(self):
"""
Not really a good repr. This is for debugging.
"""
return self.Value()
def Value(self):
return self._value
def HashValue(self):
return self._hash_value
| bsd-3-clause |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/scons-2.0.1/engine/SCons/Tool/gs.py | 61 | 2553 | """SCons.Tool.gs
Tool-specific initialization for Ghostscript.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/gs.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Action
import SCons.Platform
import SCons.Util
# Ghostscript goes by different names on different platforms...
platform = SCons.Platform.platform_default()
if platform == 'os2':
gs = 'gsos2'
elif platform == 'win32':
gs = 'gswin32c'
else:
gs = 'gs'
GhostscriptAction = None
def generate(env):
"""Add Builders and construction variables for Ghostscript to an
Environment."""
global GhostscriptAction
if GhostscriptAction is None:
GhostscriptAction = SCons.Action.Action('$GSCOM', '$GSCOMSTR')
import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
bld.add_action('.ps', GhostscriptAction)
env['GS'] = gs
env['GSFLAGS'] = SCons.Util.CLVar('-dNOPAUSE -dBATCH -sDEVICE=pdfwrite')
env['GSCOM'] = '$GS $GSFLAGS -sOutputFile=$TARGET $SOURCES'
def exists(env):
if 'PS2PDF' in env:
return env.Detect(env['PS2PDF'])
else:
return env.Detect(gs) or SCons.Util.WhereIs(gs)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2016_09_01/operations/_virtual_network_gateways_operations.py | 1 | 53818 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkGatewaysOperations(object):
"""VirtualNetworkGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VirtualNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetworkGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VirtualNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkGateway"]
"""Creates or updates a virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to create or update virtual network gateway operation.
:type parameters: ~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkGateway"
"""Gets the specified virtual network gateway by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkGatewayListResult"]
"""Gets all virtual network gateways by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkGatewayListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways'} # type: ignore
def _reset_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
gateway_vip=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Optional["_models.VirtualNetworkGateway"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualNetworkGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self._reset_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if gateway_vip is not None:
query_parameters['gatewayVip'] = self._serialize.query("gateway_vip", gateway_vip, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reset_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'} # type: ignore
def begin_reset(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
gateway_vip=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkGateway"]
"""Resets the primary of the virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param gateway_vip: Virtual network gateway vip address supplied to the begin reset of the
active-active feature enabled gateway.
:type gateway_vip: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
gateway_vip=gateway_vip,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'} # type: ignore
def _generatevpnclientpackage_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnClientParameters"
**kwargs # type: Any
):
# type: (...) -> str
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._generatevpnclientpackage_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VpnClientParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_generatevpnclientpackage_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'} # type: ignore
def begin_generatevpnclientpackage(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnClientParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[str]
"""Generates VPN client package for P2S client of the virtual network gateway in the specified
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network gateway VPN client
package operation.
:type parameters: ~azure.mgmt.network.v2016_09_01.models.VpnClientParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._generatevpnclientpackage_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_generatevpnclientpackage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'} # type: ignore
def _get_bgp_peer_status_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
peer=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Optional["_models.BgpPeerStatusListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.BgpPeerStatusListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self._get_bgp_peer_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if peer is not None:
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BgpPeerStatusListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_bgp_peer_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus'} # type: ignore
def begin_get_bgp_peer_status(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
peer=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.BgpPeerStatusListResult"]
"""The GetBgpPeerStatus operation retrieves the status of all BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer to retrieve the status of.
:type peer: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either BgpPeerStatusListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2016_09_01.models.BgpPeerStatusListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BgpPeerStatusListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_bgp_peer_status_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('BgpPeerStatusListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_bgp_peer_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus'} # type: ignore
def _get_learned_routes_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.GatewayRouteListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GatewayRouteListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self._get_learned_routes_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_learned_routes_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes'} # type: ignore
def begin_get_learned_routes(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.GatewayRouteListResult"]
"""This operation retrieves a list of routes the virtual network gateway has learned, including
routes learned from BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GatewayRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2016_09_01.models.GatewayRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GatewayRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_learned_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_learned_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes'} # type: ignore
def _get_advertised_routes_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
peer, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.GatewayRouteListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GatewayRouteListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self._get_advertised_routes_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_advertised_routes_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes'} # type: ignore
def begin_get_advertised_routes(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
peer, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.GatewayRouteListResult"]
"""This operation retrieves a list of routes the virtual network gateway is advertising to the
specified peer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer.
:type peer: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GatewayRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2016_09_01.models.GatewayRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GatewayRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_advertised_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_advertised_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes'} # type: ignore
| mit |
Luttik/mellowcakes_prototype | setup.py | 1 | 3722 | from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'pip_readme.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='freya',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.14',
description='freya',
long_description=long_description,
# The project's main homepage.
url='https://github.com/Luttik/mellowcakes_prototype/branches',
# Author details
author='D.T. Luttik',
author_email='d.t.luttik@tilburguniversity.edu',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='machinelearning email analysis freya',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy', 'pandas', 'scipy', 'scikit-learn'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': [],
'test': [],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
],
},
)
| mit |
Shouqun/node-gn | tools/depot_tools/recipes/recipe_modules/git/examples/full.py | 1 | 5612 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'git',
'recipe_engine/context',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/step',
]
def RunSteps(api):
url = 'https://chromium.googlesource.com/chromium/src.git'
# git.checkout can optionally dump GIT_CURL_VERBOSE traces to a log file,
# useful for debugging git access issues that are reproducible only on bots.
curl_trace_file = None
if api.properties.get('use_curl_trace'):
curl_trace_file = api.path['start_dir'].join('curl_trace.log')
submodule_update_force = api.properties.get('submodule_update_force', False)
submodule_update_recursive = api.properties.get('submodule_update_recursive',
True)
# You can use api.git.checkout to perform all the steps of a safe checkout.
retVal = api.git.checkout(
url,
ref=api.properties.get('revision'),
recursive=True,
submodule_update_force=submodule_update_force,
set_got_revision=api.properties.get('set_got_revision'),
curl_trace_file=curl_trace_file,
remote_name=api.properties.get('remote_name'),
display_fetch_size=api.properties.get('display_fetch_size'),
file_name=api.properties.get('checkout_file_name'),
submodule_update_recursive=submodule_update_recursive,
use_git_cache=api.properties.get('use_git_cache'))
assert retVal == "deadbeef", (
"expected retVal to be %r but was %r" % ("deadbeef", retVal))
# count_objects shows number and size of objects in .git dir.
api.git.count_objects(
name='count-objects',
can_fail_build=api.properties.get('count_objects_can_fail_build'),
git_config_options={'foo': 'bar'})
# Get the remote URL.
api.git.get_remote_url(
step_test_data=lambda: api.raw_io.test_api.stream_output('foo'))
api.git.get_timestamp(test_data='foo')
# You can use api.git.fetch_tags to fetch all tags from the remote
api.git.fetch_tags(api.properties.get('remote_name'))
# If you need to run more arbitrary git commands, you can use api.git itself,
# which behaves like api.step(), but automatically sets the name of the step.
with api.context(cwd=api.path['checkout']):
api.git('status')
api.git('status', name='git status can_fail_build',
can_fail_build=True)
api.git('status', name='git status cannot_fail_build',
can_fail_build=False)
# You should run git new-branch before you upload something with git cl.
api.git.new_branch('refactor') # Upstream is origin/master by default.
# And use upstream kwarg to set up different upstream for tracking.
api.git.new_branch('feature', upstream='refactor')
# You can use api.git.rebase to rebase the current branch onto another one
api.git.rebase(name_prefix='my repo', branch='origin/master',
dir_path=api.path['checkout'],
remote_name=api.properties.get('remote_name'))
if api.properties.get('cat_file', None):
step_result = api.git.cat_file_at_commit(api.properties['cat_file'],
api.properties['revision'],
stdout=api.raw_io.output())
if 'TestOutput' in step_result.stdout:
pass # Success!
# Bundle the repository.
api.git.bundle_create(
api.path['start_dir'].join('all.bundle'))
def GenTests(api):
yield api.test('basic')
yield api.test('basic_ref') + api.properties(revision='refs/foo/bar')
yield api.test('basic_branch') + api.properties(revision='refs/heads/testing')
yield api.test('basic_hash') + api.properties(
revision='abcdef0123456789abcdef0123456789abcdef01')
yield api.test('basic_file_name') + api.properties(checkout_file_name='DEPS')
yield api.test('basic_submodule_update_force') + api.properties(
submodule_update_force=True)
yield api.test('platform_win') + api.platform.name('win')
yield api.test('curl_trace_file') + api.properties(
revision='refs/foo/bar', use_curl_trace=True)
yield (
api.test('can_fail_build') +
api.step_data('git status can_fail_build', retcode=1)
)
yield (
api.test('cannot_fail_build') +
api.step_data('git status cannot_fail_build', retcode=1)
)
yield (
api.test('set_got_revision') +
api.properties(set_got_revision=True)
)
yield (
api.test('rebase_failed') +
api.step_data('my repo rebase', retcode=1)
)
yield api.test('remote_not_origin') + api.properties(remote_name='not_origin')
yield (
api.test('count-objects_delta') +
api.properties(display_fetch_size=True))
yield (
api.test('count-objects_failed') +
api.step_data('count-objects', retcode=1))
yield (
api.test('count-objects_with_bad_output') +
api.step_data(
'count-objects',
stdout=api.raw_io.output(api.git.count_objects_output('xxx'))))
yield (
api.test('count-objects_with_bad_output_fails_build') +
api.step_data(
'count-objects',
stdout=api.raw_io.output(api.git.count_objects_output('xxx'))) +
api.properties(count_objects_can_fail_build=True))
yield (
api.test('cat-file_test') +
api.step_data('git cat-file abcdef12345:TestFile',
stdout=api.raw_io.output('TestOutput')) +
api.properties(revision='abcdef12345', cat_file='TestFile'))
yield (
api.test('git-cache-checkout') +
api.properties(use_git_cache=True))
| mit |
michalkurka/h2o-3 | h2o-py/tests/testdir_algos/glrm/pyunit_arrests_set_loss_by_col_glrm.py | 6 | 1984 | from __future__ import print_function
from builtins import str
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glrm import H2OGeneralizedLowRankEstimator
import numpy as np
def glrm_set_loss_by_col():
print("Importing USArrests.csv data...")
arrestsH2O = h2o.upload_file(pyunit_utils.locate("smalldata/pca_test/USArrests.csv"))
arrestsPy = np.array(h2o.as_list(arrestsH2O))
arrestsH2O.describe()
print("H2O GLRM with loss by column = Absolute, Quadratic, Quadratic, Huber")
glrm_h2o = H2OGeneralizedLowRankEstimator(k=3, loss="Quadratic", loss_by_col=["Absolute","Huber"], loss_by_col_idx=[0,3], regularization_x="None", regularization_y="None")
glrm_h2o.train(x=arrestsH2O.names,training_frame=arrestsH2O)
# glrm_h2o = h2o.glrm(x=arrestsH2O, k=3, loss="Quadratic", loss_by_col=["Absolute","Huber"], loss_by_col_idx=[0,3], regularization_x="None", regularization_y="None")
glrm_h2o.show()
fit_y = glrm_h2o._model_json['output']['archetypes'].cell_values
fit_y_np = [[float(s) for s in list(row)[1:]] for row in fit_y]
fit_y_np = np.array(fit_y_np)
fit_x = h2o.get_frame(glrm_h2o._model_json['output']['representation_name'])
fit_x_np = np.array(h2o.as_list(fit_x))
print("Check final objective function value")
fit_xy = np.dot(fit_x_np, fit_y_np)
fit_diff = arrestsPy.__sub__(fit_xy)
obj_val = np.absolute(fit_diff[:,0]) + np.square(fit_diff[:,1]) + np.square(fit_diff[:,2])
def huber(a):
return a*a/2 if abs(a) <= 1 else abs(a)-0.5
huber = np.vectorize(huber)
obj_val = obj_val + huber(fit_diff[:,3])
obj_val = np.sum(obj_val)
glrm_obj = glrm_h2o._model_json['output']['objective']
assert abs(glrm_obj - obj_val) < 1e-6, "Final objective was " + str(glrm_obj) + " but should equal " + str(obj_val)
if __name__ == "__main__":
pyunit_utils.standalone_test(glrm_set_loss_by_col)
else:
glrm_set_loss_by_col()
| apache-2.0 |
nsmoooose/csp | csp/data/ui/scripts/windows/pause.py | 1 | 1717 | #!/usr/bin/python
# Combat Simulator Project
# Copyright (C) 2002-2005 The Combat Simulator Project
# http://csp.sourceforge.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
Combat Simulator Project : Pause window script
"""
import csp.cspsim
from csp.data.ui.scripts.utils import SlotManager
class Pause(csp.cspsim.Window, SlotManager):
def __init__(self, cspsim):
csp.cspsim.Window.__init__(self)
SlotManager.__init__(self)
# Install the move window event handler.
self.moveEventHandler = csp.cspsim.ControlMoveEventHandler(self)
self.cspsim = cspsim
self.cspsim.togglePause()
# Load the user interface for this window.
serializer = csp.cspsim.Serialization()
serializer.load(self, 'pause.xml')
resumeButton = self.getById('resume')
if resumeButton != None:
self.connectToClickSignal(resumeButton, self.resume_Click)
def resume_Click(self):
if self.cspsim.isPaused():
self.cspsim.togglePause()
self.close()
| gpl-2.0 |
sboparen/spacechem-scripts | compare/collect.py | 2 | 1117 | #!/usr/bin/env python2
import excess
class stats:
def __init__(self, user, planet):
self.user = user
self.planet = planet
def __str__(self):
return '<stats %s/%s>' % (self.user.name, self.planet.pnum)
def process(planet, users):
if 'children' in planet.__dict__:
for ch in planet.children:
process(ch, users)
planet.user_stats = {}
for u in users:
s = stats(u, planet)
s.excess = []
s.inhabited = False
s.complete = True
if 'children' not in planet.__dict__:
user_best = planet.best_cycles_by_user.get(u, None)
if user_best == None:
s.complete = False
else:
s.inhabited = True
s.excess.append(user_best.excess)
else:
for ch in planet.children:
ch_s = ch.user_stats[u]
if ch_s.inhabited:
s.inhabited = True
if ch_s.complete:
s.excess.append(ch_s.excess)
else:
s.complete = False
if s.complete:
s.excess = max(s.excess)
s.grade = excess.grade(s.excess)
else:
s.excess = None
s.grade = None
planet.user_stats[u] = s
| mit |
rhinstaller/anaconda | pyanaconda/modules/storage/fcoe/__init__.py | 6 | 1042 | #
# Copyright (C) 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pyanaconda.modules.storage.fcoe.fcoe import FCOEModule
__all__ = ["FCOEModule"]
| gpl-2.0 |
ganeshrn/ansible | test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_uri.py | 15 | 4256 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Corwin Brown <corwin@corwinbrown.com>
# Copyright: (c) 2017, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r'''
---
module: win_uri
short_description: Interacts with webservices
description:
- Interacts with FTP, HTTP and HTTPS web services.
- Supports Digest, Basic and WSSE HTTP authentication mechanisms.
- For non-Windows targets, use the M(ansible.builtin.uri) module instead.
options:
url:
description:
- Supports FTP, HTTP or HTTPS URLs in the form of (ftp|http|https)://host.domain:port/path.
type: str
required: yes
content_type:
description:
- Sets the "Content-Type" header.
type: str
body:
description:
- The body of the HTTP request/response to the web service.
type: raw
dest:
description:
- Output the response body to a file.
type: path
creates:
description:
- A filename, when it already exists, this step will be skipped.
type: path
removes:
description:
- A filename, when it does not exist, this step will be skipped.
type: path
return_content:
description:
- Whether or not to return the body of the response as a "content" key in
the dictionary result. If the reported Content-type is
"application/json", then the JSON is additionally loaded into a key
called C(json) in the dictionary results.
type: bool
default: no
status_code:
description:
- A valid, numeric, HTTP status code that signifies success of the request.
- Can also be comma separated list of status codes.
type: list
elements: int
default: [ 200 ]
url_method:
default: GET
aliases:
- method
url_timeout:
aliases:
- timeout
# Following defined in the web_request fragment but the module contains deprecated aliases for backwards compatibility.
url_username:
description:
- The username to use for authentication.
- The alias I(user) and I(username) is deprecated and will be removed on
the major release after C(2022-07-01).
aliases:
- user
- username
url_password:
description:
- The password for I(url_username).
- The alias I(password) is deprecated and will be removed on the major
release after C(2022-07-01).
aliases:
- password
extends_documentation_fragment:
- ansible.windows.web_request
seealso:
- module: ansible.builtin.uri
- module: ansible.windows.win_get_url
author:
- Corwin Brown (@blakfeld)
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
- name: Perform a GET and Store Output
ansible.windows.win_uri:
url: http://example.com/endpoint
register: http_output
# Set a HOST header to hit an internal webserver:
- name: Hit a Specific Host on the Server
ansible.windows.win_uri:
url: http://example.com/
method: GET
headers:
host: www.somesite.com
- name: Perform a HEAD on an Endpoint
ansible.windows.win_uri:
url: http://www.example.com/
method: HEAD
- name: POST a Body to an Endpoint
ansible.windows.win_uri:
url: http://www.somesite.com/
method: POST
body: "{ 'some': 'json' }"
'''
RETURN = r'''
elapsed:
description: The number of seconds that elapsed while performing the download.
returned: always
type: float
sample: 23.2
url:
description: The Target URL.
returned: always
type: str
sample: https://www.ansible.com
status_code:
description: The HTTP Status Code of the response.
returned: success
type: int
sample: 200
status_description:
description: A summary of the status.
returned: success
type: str
sample: OK
content:
description: The raw content of the HTTP response.
returned: success and return_content is True
type: str
sample: '{"foo": "bar"}'
content_length:
description: The byte size of the response.
returned: success
type: int
sample: 54447
json:
description: The json structure returned under content as a dictionary.
returned: success and Content-Type is "application/json" or "application/javascript" and return_content is True
type: dict
sample: {"this-is-dependent": "on the actual return content"}
'''
| gpl-3.0 |
hackers-terabit/portage | pym/portage/dbapi/bintree.py | 1 | 48970 | # Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import unicode_literals
__all__ = ["bindbapi", "binarytree"]
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.checksum:hashfunc_map,perform_multiple_checksums,' + \
'verify_all,_apply_hash_filter,_hash_filter',
'portage.dbapi.dep_expand:dep_expand',
'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list',
'portage.output:EOutput,colorize',
'portage.locks:lockfile,unlockfile',
'portage.package.ebuild.fetch:_check_distfile,_hide_url_passwd',
'portage.update:update_dbentries',
'portage.util:atomic_ofstream,ensure_dirs,normalize_path,' + \
'writemsg,writemsg_stdout',
'portage.util.path:first_existing',
'portage.util._urlopen:urlopen@_urlopen',
'portage.versions:best,catpkgsplit,catsplit,_pkg_str',
)
from portage.cache.mappings import slot_dict_class
from portage.const import CACHE_PATH, SUPPORTED_XPAK_EXTENSIONS
from portage.dbapi.virtual import fakedbapi
from portage.dep import Atom, use_reduce, paren_enclose
from portage.exception import AlarmSignal, InvalidData, InvalidPackageName, \
ParseError, PermissionDenied, PortageException
from portage.localization import _
from portage import _movefile
from portage import os
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
import codecs
import errno
import io
import stat
import subprocess
import sys
import tempfile
import textwrap
import time
import traceback
import warnings
from gzip import GzipFile
from itertools import chain
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
if sys.hexversion >= 0x3000000:
# pylint: disable=W0622
_unicode = str
basestring = str
long = int
else:
_unicode = unicode
class UseCachedCopyOfRemoteIndex(Exception):
# If the local copy is recent enough
# then fetching the remote index can be skipped.
pass
class bindbapi(fakedbapi):
_known_keys = frozenset(list(fakedbapi._known_keys) + \
["CHOST", "repository", "USE"])
def __init__(self, mybintree=None, **kwargs):
# Always enable multi_instance mode for bindbapi indexing. This
# does not affect the local PKGDIR file layout, since that is
# controlled independently by FEATURES=binpkg-multi-instance.
# The multi_instance mode is useful for the following reasons:
# * binary packages with the same cpv from multiple binhosts
# can be considered simultaneously
# * if binpkg-multi-instance is disabled, it's still possible
# to properly access a PKGDIR which has binpkg-multi-instance
# layout (or mixed layout)
fakedbapi.__init__(self, exclusive_slots=False,
multi_instance=True, **kwargs)
self.bintree = mybintree
self.move_ent = mybintree.move_ent
# Selectively cache metadata in order to optimize dep matching.
self._aux_cache_keys = set(
["BUILD_ID", "BUILD_TIME", "CHOST", "DEFINED_PHASES",
"DEPEND", "EAPI", "HDEPEND", "IUSE", "KEYWORDS",
"LICENSE", "MD5", "PDEPEND", "PROPERTIES", "PROVIDE",
"PROVIDES", "RDEPEND", "repository", "REQUIRES", "RESTRICT",
"SIZE", "SLOT", "USE", "_mtime_"
])
self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
self._aux_cache = {}
@property
def writable(self):
"""
Check if PKGDIR is writable, or permissions are sufficient
to create it if it does not exist yet.
@rtype: bool
@return: True if PKGDIR is writable or can be created,
False otherwise
"""
return os.access(first_existing(self.bintree.pkgdir), os.W_OK)
def match(self, *pargs, **kwargs):
if self.bintree and not self.bintree.populated:
self.bintree.populate()
return fakedbapi.match(self, *pargs, **kwargs)
def cpv_exists(self, cpv, myrepo=None):
if self.bintree and not self.bintree.populated:
self.bintree.populate()
return fakedbapi.cpv_exists(self, cpv)
def cpv_inject(self, cpv, **kwargs):
if not self.bintree.populated:
self.bintree.populate()
fakedbapi.cpv_inject(self, cpv,
metadata=cpv._metadata, **kwargs)
def cpv_remove(self, cpv):
if not self.bintree.populated:
self.bintree.populate()
fakedbapi.cpv_remove(self, cpv)
def aux_get(self, mycpv, wants, myrepo=None):
if self.bintree and not self.bintree.populated:
self.bintree.populate()
# Support plain string for backward compatibility with API
# consumers (including portageq, which passes in a cpv from
# a command-line argument).
instance_key = self._instance_key(mycpv,
support_string=True)
if not self._known_keys.intersection(
wants).difference(self._aux_cache_keys):
aux_cache = self.cpvdict[instance_key]
if aux_cache is not None:
return [aux_cache.get(x, "") for x in wants]
mysplit = mycpv.split("/")
mylist = []
tbz2name = mysplit[1]+".tbz2"
if not self.bintree._remotepkgs or \
not self.bintree.isremote(mycpv):
try:
tbz2_path = self.bintree._pkg_paths[instance_key]
except KeyError:
raise KeyError(mycpv)
tbz2_path = os.path.join(self.bintree.pkgdir, tbz2_path)
try:
st = os.lstat(tbz2_path)
except OSError:
raise KeyError(mycpv)
metadata_bytes = portage.xpak.tbz2(tbz2_path).get_data()
def getitem(k):
if k == "_mtime_":
return _unicode(st[stat.ST_MTIME])
elif k == "SIZE":
return _unicode(st.st_size)
v = metadata_bytes.get(_unicode_encode(k,
encoding=_encodings['repo.content'],
errors='backslashreplace'))
if v is not None:
v = _unicode_decode(v,
encoding=_encodings['repo.content'], errors='replace')
return v
else:
getitem = self.cpvdict[instance_key].get
mydata = {}
mykeys = wants
for x in mykeys:
myval = getitem(x)
# myval is None if the key doesn't exist
# or the tbz2 is corrupt.
if myval:
mydata[x] = " ".join(myval.split())
if not mydata.setdefault('EAPI', '0'):
mydata['EAPI'] = '0'
return [mydata.get(x, '') for x in wants]
def aux_update(self, cpv, values):
if not self.bintree.populated:
self.bintree.populate()
build_id = None
try:
build_id = cpv.build_id
except AttributeError:
if self.bintree._multi_instance:
# The cpv.build_id attribute is required if we are in
# multi-instance mode, since otherwise we won't know
# which instance to update.
raise
else:
cpv = self._instance_key(cpv, support_string=True)[0]
build_id = cpv.build_id
tbz2path = self.bintree.getname(cpv)
if not os.path.exists(tbz2path):
raise KeyError(cpv)
mytbz2 = portage.xpak.tbz2(tbz2path)
mydata = mytbz2.get_data()
for k, v in values.items():
k = _unicode_encode(k,
encoding=_encodings['repo.content'], errors='backslashreplace')
v = _unicode_encode(v,
encoding=_encodings['repo.content'], errors='backslashreplace')
mydata[k] = v
for k, v in list(mydata.items()):
if not v:
del mydata[k]
mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
# inject will clear stale caches via cpv_inject.
self.bintree.inject(cpv, filename=tbz2path)
def cp_list(self, *pargs, **kwargs):
if not self.bintree.populated:
self.bintree.populate()
return fakedbapi.cp_list(self, *pargs, **kwargs)
def cp_all(self, sort=False):
if not self.bintree.populated:
self.bintree.populate()
return fakedbapi.cp_all(self, sort=sort)
def cpv_all(self):
if not self.bintree.populated:
self.bintree.populate()
return fakedbapi.cpv_all(self)
def getfetchsizes(self, pkg):
"""
This will raise MissingSignature if SIZE signature is not available,
or InvalidSignature if SIZE signature is invalid.
"""
if not self.bintree.populated:
self.bintree.populate()
pkg = getattr(pkg, 'cpv', pkg)
filesdict = {}
if not self.bintree.isremote(pkg):
pass
else:
metadata = self.bintree._remotepkgs[self._instance_key(pkg)]
try:
size = int(metadata["SIZE"])
except KeyError:
raise portage.exception.MissingSignature("SIZE")
except ValueError:
raise portage.exception.InvalidSignature(
"SIZE: %s" % metadata["SIZE"])
else:
filesdict[os.path.basename(self.bintree.getname(pkg))] = size
return filesdict
class binarytree(object):
"this tree scans for a list of all packages available in PKGDIR"
def __init__(self, _unused=DeprecationWarning, pkgdir=None,
virtual=DeprecationWarning, settings=None):
if pkgdir is None:
raise TypeError("pkgdir parameter is required")
if settings is None:
raise TypeError("settings parameter is required")
if _unused is not DeprecationWarning:
warnings.warn("The first parameter of the "
"portage.dbapi.bintree.binarytree"
" constructor is now unused. Instead "
"settings['ROOT'] is used.",
DeprecationWarning, stacklevel=2)
if virtual is not DeprecationWarning:
warnings.warn("The 'virtual' parameter of the "
"portage.dbapi.bintree.binarytree"
" constructor is unused",
DeprecationWarning, stacklevel=2)
if True:
self.pkgdir = normalize_path(pkgdir)
# NOTE: Event if binpkg-multi-instance is disabled, it's
# still possible to access a PKGDIR which uses the
# binpkg-multi-instance layout (or mixed layout).
self._multi_instance = ("binpkg-multi-instance" in
settings.features)
if self._multi_instance:
self._allocate_filename = self._allocate_filename_multi
self.dbapi = bindbapi(self, settings=settings)
self.update_ents = self.dbapi.update_ents
self.move_slot_ent = self.dbapi.move_slot_ent
self.populated = 0
self.tree = {}
self._remote_has_index = False
self._remotepkgs = None # remote metadata indexed by cpv
self.invalids = []
self.settings = settings
self._pkg_paths = {}
self._populating = False
self._all_directory = os.path.isdir(
os.path.join(self.pkgdir, "All"))
self._pkgindex_version = 0
self._pkgindex_hashes = ["MD5","SHA1"]
self._pkgindex_file = os.path.join(self.pkgdir, "Packages")
self._pkgindex_keys = self.dbapi._aux_cache_keys.copy()
self._pkgindex_keys.update(["CPV", "SIZE"])
self._pkgindex_aux_keys = \
["BASE_URI", "BUILD_ID", "BUILD_TIME", "CHOST",
"DEFINED_PHASES", "DEPEND", "DESCRIPTION", "EAPI",
"HDEPEND", "IUSE", "KEYWORDS", "LICENSE", "PDEPEND",
"PKGINDEX_URI", "PROPERTIES", "PROVIDE", "PROVIDES",
"RDEPEND", "repository", "REQUIRES", "RESTRICT",
"SIZE", "SLOT", "USE"]
self._pkgindex_aux_keys = list(self._pkgindex_aux_keys)
self._pkgindex_use_evaluated_keys = \
("DEPEND", "HDEPEND", "LICENSE", "RDEPEND",
"PDEPEND", "PROPERTIES", "PROVIDE", "RESTRICT")
self._pkgindex_header_keys = set([
"ACCEPT_KEYWORDS", "ACCEPT_LICENSE",
"ACCEPT_PROPERTIES", "ACCEPT_RESTRICT", "CBUILD",
"CONFIG_PROTECT", "CONFIG_PROTECT_MASK", "FEATURES",
"GENTOO_MIRRORS", "INSTALL_MASK", "IUSE_IMPLICIT", "USE",
"USE_EXPAND", "USE_EXPAND_HIDDEN", "USE_EXPAND_IMPLICIT",
"USE_EXPAND_UNPREFIXED"])
self._pkgindex_default_pkg_data = {
"BUILD_ID" : "",
"BUILD_TIME" : "",
"DEFINED_PHASES" : "",
"DEPEND" : "",
"EAPI" : "0",
"HDEPEND" : "",
"IUSE" : "",
"KEYWORDS": "",
"LICENSE" : "",
"PATH" : "",
"PDEPEND" : "",
"PROPERTIES" : "",
"PROVIDE" : "",
"PROVIDES": "",
"RDEPEND" : "",
"REQUIRES": "",
"RESTRICT": "",
"SLOT" : "0",
"USE" : "",
}
self._pkgindex_inherited_keys = ["CHOST", "repository"]
# Populate the header with appropriate defaults.
self._pkgindex_default_header_data = {
"CHOST" : self.settings.get("CHOST", ""),
"repository" : "",
}
self._pkgindex_translated_keys = (
("DESCRIPTION" , "DESC"),
("_mtime_" , "MTIME"),
("repository" , "REPO"),
)
self._pkgindex_allowed_pkg_keys = set(chain(
self._pkgindex_keys,
self._pkgindex_aux_keys,
self._pkgindex_hashes,
self._pkgindex_default_pkg_data,
self._pkgindex_inherited_keys,
chain(*self._pkgindex_translated_keys)
))
@property
def root(self):
warnings.warn("The root attribute of "
"portage.dbapi.bintree.binarytree"
" is deprecated. Use "
"settings['ROOT'] instead.",
DeprecationWarning, stacklevel=3)
return self.settings['ROOT']
def move_ent(self, mylist, repo_match=None):
if not self.populated:
self.populate()
origcp = mylist[1]
newcp = mylist[2]
# sanity check
for atom in (origcp, newcp):
if not isjustname(atom):
raise InvalidPackageName(_unicode(atom))
mynewcat = catsplit(newcp)[0]
origmatches=self.dbapi.cp_list(origcp)
moves = 0
if not origmatches:
return moves
for mycpv in origmatches:
try:
mycpv = self.dbapi._pkg_str(mycpv, None)
except (KeyError, InvalidData):
continue
mycpv_cp = portage.cpv_getkey(mycpv)
if mycpv_cp != origcp:
# Ignore PROVIDE virtual match.
continue
if repo_match is not None \
and not repo_match(mycpv.repo):
continue
# Use isvalidatom() to check if this move is valid for the
# EAPI (characters allowed in package names may vary).
if not isvalidatom(newcp, eapi=mycpv.eapi):
continue
mynewcpv = mycpv.replace(mycpv_cp, _unicode(newcp), 1)
myoldpkg = catsplit(mycpv)[1]
mynewpkg = catsplit(mynewcpv)[1]
if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
writemsg(_("!!! Cannot update binary: Destination exists.\n"),
noiselevel=-1)
writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
continue
tbz2path = self.getname(mycpv)
if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
writemsg(_("!!! Cannot update readonly binary: %s\n") % mycpv,
noiselevel=-1)
continue
moves += 1
mytbz2 = portage.xpak.tbz2(tbz2path)
mydata = mytbz2.get_data()
updated_items = update_dbentries([mylist], mydata, parent=mycpv)
mydata.update(updated_items)
mydata[b'PF'] = \
_unicode_encode(mynewpkg + "\n",
encoding=_encodings['repo.content'])
mydata[b'CATEGORY'] = \
_unicode_encode(mynewcat + "\n",
encoding=_encodings['repo.content'])
if mynewpkg != myoldpkg:
ebuild_data = mydata.pop(_unicode_encode(myoldpkg + '.ebuild',
encoding=_encodings['repo.content']), None)
if ebuild_data is not None:
mydata[_unicode_encode(mynewpkg + '.ebuild',
encoding=_encodings['repo.content'])] = ebuild_data
mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
self.dbapi.cpv_remove(mycpv)
del self._pkg_paths[self.dbapi._instance_key(mycpv)]
metadata = self.dbapi._aux_cache_slot_dict()
for k in self.dbapi._aux_cache_keys:
v = mydata.get(_unicode_encode(k))
if v is not None:
v = _unicode_decode(v)
metadata[k] = " ".join(v.split())
mynewcpv = _pkg_str(mynewcpv, metadata=metadata)
new_path = self.getname(mynewcpv)
self._pkg_paths[
self.dbapi._instance_key(mynewcpv)] = new_path[len(self.pkgdir)+1:]
if new_path != mytbz2:
self._ensure_dir(os.path.dirname(new_path))
_movefile(tbz2path, new_path, mysettings=self.settings)
self.inject(mynewcpv)
return moves
def prevent_collision(self, cpv):
warnings.warn("The "
"portage.dbapi.bintree.binarytree.prevent_collision "
"method is deprecated.",
DeprecationWarning, stacklevel=2)
def _ensure_dir(self, path):
"""
Create the specified directory. Also, copy gid and group mode
bits from self.pkgdir if possible.
@param cat_dir: Absolute path of the directory to be created.
@type cat_dir: String
"""
try:
pkgdir_st = os.stat(self.pkgdir)
except OSError:
ensure_dirs(path)
return
pkgdir_gid = pkgdir_st.st_gid
pkgdir_grp_mode = 0o2070 & pkgdir_st.st_mode
try:
ensure_dirs(path, gid=pkgdir_gid, mode=pkgdir_grp_mode, mask=0)
except PortageException:
if not os.path.isdir(path):
raise
def _file_permissions(self, path):
try:
pkgdir_st = os.stat(self.pkgdir)
except OSError:
pass
else:
pkgdir_gid = pkgdir_st.st_gid
pkgdir_grp_mode = 0o0060 & pkgdir_st.st_mode
try:
portage.util.apply_permissions(path, gid=pkgdir_gid,
mode=pkgdir_grp_mode, mask=0)
except PortageException:
pass
def populate(self, getbinpkgs=0):
"populates the binarytree"
if self._populating:
return
pkgindex_lock = None
try:
if os.access(self.pkgdir, os.W_OK):
pkgindex_lock = lockfile(self._pkgindex_file,
wantnewlockfile=1)
self._populating = True
self._populate(getbinpkgs)
finally:
if pkgindex_lock:
unlockfile(pkgindex_lock)
self._populating = False
def _populate(self, getbinpkgs=0):
if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
return 0
# Clear all caches in case populate is called multiple times
# as may be the case when _global_updates calls populate()
# prior to performing package moves since it only wants to
# operate on local packages (getbinpkgs=0).
self._remotepkgs = None
self.dbapi.clear()
_instance_key = self.dbapi._instance_key
if True:
pkg_paths = {}
self._pkg_paths = pkg_paths
dir_files = {}
for parent, dir_names, file_names in os.walk(self.pkgdir):
relative_parent = parent[len(self.pkgdir)+1:]
dir_files[relative_parent] = file_names
pkgindex = self._load_pkgindex()
if not self._pkgindex_version_supported(pkgindex):
pkgindex = self._new_pkgindex()
header = pkgindex.header
metadata = {}
basename_index = {}
for d in pkgindex.packages:
cpv = _pkg_str(d["CPV"], metadata=d,
settings=self.settings)
d["CPV"] = cpv
metadata[_instance_key(cpv)] = d
path = d.get("PATH")
if not path:
path = cpv + ".tbz2"
basename = os.path.basename(path)
basename_index.setdefault(basename, []).append(d)
update_pkgindex = False
for mydir, file_names in dir_files.items():
try:
mydir = _unicode_decode(mydir,
encoding=_encodings["fs"], errors="strict")
except UnicodeDecodeError:
continue
for myfile in file_names:
try:
myfile = _unicode_decode(myfile,
encoding=_encodings["fs"], errors="strict")
except UnicodeDecodeError:
continue
if not myfile.endswith(SUPPORTED_XPAK_EXTENSIONS):
continue
mypath = os.path.join(mydir, myfile)
full_path = os.path.join(self.pkgdir, mypath)
s = os.lstat(full_path)
if not stat.S_ISREG(s.st_mode):
continue
# Validate data from the package index and try to avoid
# reading the xpak if possible.
possibilities = basename_index.get(myfile)
if possibilities:
match = None
for d in possibilities:
try:
if long(d["_mtime_"]) != s[stat.ST_MTIME]:
continue
except (KeyError, ValueError):
continue
try:
if long(d["SIZE"]) != long(s.st_size):
continue
except (KeyError, ValueError):
continue
if not self._pkgindex_keys.difference(d):
match = d
break
if match:
mycpv = match["CPV"]
instance_key = _instance_key(mycpv)
pkg_paths[instance_key] = mypath
# update the path if the package has been moved
oldpath = d.get("PATH")
if oldpath and oldpath != mypath:
update_pkgindex = True
# Omit PATH if it is the default path for
# the current Packages format version.
if mypath != mycpv + ".tbz2":
d["PATH"] = mypath
if not oldpath:
update_pkgindex = True
else:
d.pop("PATH", None)
if oldpath:
update_pkgindex = True
self.dbapi.cpv_inject(mycpv)
continue
if not os.access(full_path, os.R_OK):
writemsg(_("!!! Permission denied to read " \
"binary package: '%s'\n") % full_path,
noiselevel=-1)
self.invalids.append(myfile[:-5])
continue
pkg_metadata = self._read_metadata(full_path, s,
keys=chain(self.dbapi._aux_cache_keys,
("PF", "CATEGORY")))
mycat = pkg_metadata.get("CATEGORY", "")
mypf = pkg_metadata.get("PF", "")
slot = pkg_metadata.get("SLOT", "")
mypkg = myfile[:-5]
if not mycat or not mypf or not slot:
#old-style or corrupt package
writemsg(_("\n!!! Invalid binary package: '%s'\n") % full_path,
noiselevel=-1)
missing_keys = []
if not mycat:
missing_keys.append("CATEGORY")
if not mypf:
missing_keys.append("PF")
if not slot:
missing_keys.append("SLOT")
msg = []
if missing_keys:
missing_keys.sort()
msg.append(_("Missing metadata key(s): %s.") % \
", ".join(missing_keys))
msg.append(_(" This binary package is not " \
"recoverable and should be deleted."))
for line in textwrap.wrap("".join(msg), 72):
writemsg("!!! %s\n" % line, noiselevel=-1)
self.invalids.append(mypkg)
continue
multi_instance = False
invalid_name = False
build_id = None
if myfile.endswith(".xpak"):
multi_instance = True
build_id = self._parse_build_id(myfile)
if build_id < 1:
invalid_name = True
elif myfile != "%s-%s.xpak" % (
mypf, build_id):
invalid_name = True
else:
mypkg = mypkg[:-len(str(build_id))-1]
elif myfile != mypf + ".tbz2":
invalid_name = True
if invalid_name:
writemsg(_("\n!!! Binary package name is "
"invalid: '%s'\n") % full_path,
noiselevel=-1)
continue
if pkg_metadata.get("BUILD_ID"):
try:
build_id = long(pkg_metadata["BUILD_ID"])
except ValueError:
writemsg(_("!!! Binary package has "
"invalid BUILD_ID: '%s'\n") %
full_path, noiselevel=-1)
continue
else:
build_id = None
if multi_instance:
name_split = catpkgsplit("%s/%s" %
(mycat, mypf))
if (name_split is None or
tuple(catsplit(mydir)) != name_split[:2]):
continue
elif mycat != mydir and mydir != "All":
continue
if mypkg != mypf.strip():
continue
mycpv = mycat + "/" + mypkg
if not self.dbapi._category_re.match(mycat):
writemsg(_("!!! Binary package has an " \
"unrecognized category: '%s'\n") % full_path,
noiselevel=-1)
writemsg(_("!!! '%s' has a category that is not" \
" listed in %setc/portage/categories\n") % \
(mycpv, self.settings["PORTAGE_CONFIGROOT"]),
noiselevel=-1)
continue
if build_id is not None:
pkg_metadata["BUILD_ID"] = _unicode(build_id)
pkg_metadata["SIZE"] = _unicode(s.st_size)
# Discard items used only for validation above.
pkg_metadata.pop("CATEGORY")
pkg_metadata.pop("PF")
mycpv = _pkg_str(mycpv,
metadata=self.dbapi._aux_cache_slot_dict(
pkg_metadata))
pkg_paths[_instance_key(mycpv)] = mypath
self.dbapi.cpv_inject(mycpv)
update_pkgindex = True
d = metadata.get(_instance_key(mycpv),
pkgindex._pkg_slot_dict())
if d:
try:
if long(d["_mtime_"]) != s[stat.ST_MTIME]:
d.clear()
except (KeyError, ValueError):
d.clear()
if d:
try:
if long(d["SIZE"]) != long(s.st_size):
d.clear()
except (KeyError, ValueError):
d.clear()
for k in self._pkgindex_allowed_pkg_keys:
v = pkg_metadata.get(k)
if v is not None:
d[k] = v
d["CPV"] = mycpv
try:
self._eval_use_flags(mycpv, d)
except portage.exception.InvalidDependString:
writemsg(_("!!! Invalid binary package: '%s'\n") % \
self.getname(mycpv), noiselevel=-1)
self.dbapi.cpv_remove(mycpv)
del pkg_paths[_instance_key(mycpv)]
# record location if it's non-default
if mypath != mycpv + ".tbz2":
d["PATH"] = mypath
else:
d.pop("PATH", None)
metadata[_instance_key(mycpv)] = d
for instance_key in list(metadata):
if instance_key not in pkg_paths:
del metadata[instance_key]
# Do not bother to write the Packages index if $PKGDIR/All/ exists
# since it will provide no benefit due to the need to read CATEGORY
# from xpak.
if update_pkgindex and os.access(self.pkgdir, os.W_OK):
del pkgindex.packages[:]
pkgindex.packages.extend(iter(metadata.values()))
self._update_pkgindex_header(pkgindex.header)
self._pkgindex_write(pkgindex)
if getbinpkgs and not self.settings.get("PORTAGE_BINHOST"):
writemsg(_("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
noiselevel=-1)
if not getbinpkgs or 'PORTAGE_BINHOST' not in self.settings:
self.populated=1
return
self._remotepkgs = {}
for base_url in self.settings["PORTAGE_BINHOST"].split():
parsed_url = urlparse(base_url)
host = parsed_url.netloc
port = parsed_url.port
user = None
passwd = None
user_passwd = ""
if "@" in host:
user, host = host.split("@", 1)
user_passwd = user + "@"
if ":" in user:
user, passwd = user.split(":", 1)
port_args = []
if port is not None:
port_str = ":%s" % (port,)
if host.endswith(port_str):
host = host[:-len(port_str)]
pkgindex_file = os.path.join(self.settings["EROOT"], CACHE_PATH, "binhost",
host, parsed_url.path.lstrip("/"), "Packages")
pkgindex = self._new_pkgindex()
try:
f = io.open(_unicode_encode(pkgindex_file,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
errors='replace')
try:
pkgindex.read(f)
finally:
f.close()
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
local_timestamp = pkgindex.header.get("TIMESTAMP", None)
try:
download_timestamp = \
float(pkgindex.header.get("DOWNLOAD_TIMESTAMP", 0))
except ValueError:
download_timestamp = 0
remote_timestamp = None
rmt_idx = self._new_pkgindex()
proc = None
tmp_filename = None
try:
# urlparse.urljoin() only works correctly with recognized
# protocols and requires the base url to have a trailing
# slash, so join manually...
url = base_url.rstrip("/") + "/Packages"
f = None
try:
ttl = float(pkgindex.header.get("TTL", 0))
except ValueError:
pass
else:
if download_timestamp and ttl and \
download_timestamp + ttl > time.time():
raise UseCachedCopyOfRemoteIndex()
# Don't use urlopen for https, since it doesn't support
# certificate/hostname verification (bug #469888).
if parsed_url.scheme not in ('https',):
try:
f = _urlopen(url, if_modified_since=local_timestamp)
if hasattr(f, 'headers') and f.headers.get('timestamp', ''):
remote_timestamp = f.headers.get('timestamp')
except IOError as err:
if hasattr(err, 'code') and err.code == 304: # not modified (since local_timestamp)
raise UseCachedCopyOfRemoteIndex()
if parsed_url.scheme in ('ftp', 'http', 'https'):
# This protocol is supposedly supported by urlopen,
# so apparently there's a problem with the url
# or a bug in urlopen.
if self.settings.get("PORTAGE_DEBUG", "0") != "0":
traceback.print_exc()
raise
except ValueError:
raise ParseError("Invalid Portage BINHOST value '%s'"
% url.lstrip())
if f is None:
path = parsed_url.path.rstrip("/") + "/Packages"
if parsed_url.scheme == 'ssh':
# Use a pipe so that we can terminate the download
# early if we detect that the TIMESTAMP header
# matches that of the cached Packages file.
ssh_args = ['ssh']
if port is not None:
ssh_args.append("-p%s" % (port,))
# NOTE: shlex evaluates embedded quotes
ssh_args.extend(portage.util.shlex_split(
self.settings.get("PORTAGE_SSH_OPTS", "")))
ssh_args.append(user_passwd + host)
ssh_args.append('--')
ssh_args.append('cat')
ssh_args.append(path)
proc = subprocess.Popen(ssh_args,
stdout=subprocess.PIPE)
f = proc.stdout
else:
setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
fcmd = self.settings.get(setting)
if not fcmd:
fcmd = self.settings.get('FETCHCOMMAND')
if not fcmd:
raise EnvironmentError("FETCHCOMMAND is unset")
fd, tmp_filename = tempfile.mkstemp()
tmp_dirname, tmp_basename = os.path.split(tmp_filename)
os.close(fd)
fcmd_vars = {
"DISTDIR": tmp_dirname,
"FILE": tmp_basename,
"URI": url
}
for k in ("PORTAGE_SSH_OPTS",):
v = self.settings.get(k)
if v is not None:
fcmd_vars[k] = v
success = portage.getbinpkg.file_get(
fcmd=fcmd, fcmd_vars=fcmd_vars)
if not success:
raise EnvironmentError("%s failed" % (setting,))
f = open(tmp_filename, 'rb')
f_dec = codecs.iterdecode(f,
_encodings['repo.content'], errors='replace')
try:
rmt_idx.readHeader(f_dec)
if not remote_timestamp: # in case it had not been read from HTTP header
remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
if not remote_timestamp:
# no timestamp in the header, something's wrong
pkgindex = None
writemsg(_("\n\n!!! Binhost package index " \
" has no TIMESTAMP field.\n"), noiselevel=-1)
else:
if not self._pkgindex_version_supported(rmt_idx):
writemsg(_("\n\n!!! Binhost package index version" \
" is not supported: '%s'\n") % \
rmt_idx.header.get("VERSION"), noiselevel=-1)
pkgindex = None
elif local_timestamp != remote_timestamp:
rmt_idx.readBody(f_dec)
pkgindex = rmt_idx
finally:
# Timeout after 5 seconds, in case close() blocks
# indefinitely (see bug #350139).
try:
try:
AlarmSignal.register(5)
f.close()
finally:
AlarmSignal.unregister()
except AlarmSignal:
writemsg("\n\n!!! %s\n" % \
_("Timed out while closing connection to binhost"),
noiselevel=-1)
except UseCachedCopyOfRemoteIndex:
writemsg_stdout("\n")
writemsg_stdout(
colorize("GOOD", _("Local copy of remote index is up-to-date and will be used.")) + \
"\n")
rmt_idx = pkgindex
except EnvironmentError as e:
writemsg(_("\n\n!!! Error fetching binhost package" \
" info from '%s'\n") % _hide_url_passwd(base_url))
# With Python 2, the EnvironmentError message may
# contain bytes or unicode, so use _unicode to ensure
# safety with all locales (bug #532784).
try:
error_msg = _unicode(e)
except UnicodeDecodeError as uerror:
error_msg = _unicode(uerror.object,
encoding='utf_8', errors='replace')
writemsg("!!! %s\n\n" % error_msg)
del e
pkgindex = None
if proc is not None:
if proc.poll() is None:
proc.kill()
proc.wait()
proc = None
if tmp_filename is not None:
try:
os.unlink(tmp_filename)
except OSError:
pass
if pkgindex is rmt_idx:
pkgindex.modified = False # don't update the header
pkgindex.header["DOWNLOAD_TIMESTAMP"] = "%d" % time.time()
try:
ensure_dirs(os.path.dirname(pkgindex_file))
f = atomic_ofstream(pkgindex_file)
pkgindex.write(f)
f.close()
except (IOError, PortageException):
if os.access(os.path.dirname(pkgindex_file), os.W_OK):
raise
# The current user doesn't have permission to cache the
# file, but that's alright.
if pkgindex:
remote_base_uri = pkgindex.header.get("URI", base_url)
for d in pkgindex.packages:
cpv = _pkg_str(d["CPV"], metadata=d,
settings=self.settings)
instance_key = _instance_key(cpv)
# Local package instances override remote instances
# with the same instance_key.
if instance_key in metadata:
continue
d["CPV"] = cpv
d["BASE_URI"] = remote_base_uri
d["PKGINDEX_URI"] = url
self._remotepkgs[instance_key] = d
metadata[instance_key] = d
self.dbapi.cpv_inject(cpv)
self._remote_has_index = True
self.populated=1
def inject(self, cpv, filename=None):
"""Add a freshly built package to the database. This updates
$PKGDIR/Packages with the new package metadata (including MD5).
@param cpv: The cpv of the new package to inject
@type cpv: string
@param filename: File path of the package to inject, or None if it's
already in the location returned by getname()
@type filename: string
@rtype: _pkg_str or None
@return: A _pkg_str instance on success, or None on failure.
"""
mycat, mypkg = catsplit(cpv)
if not self.populated:
self.populate()
if filename is None:
full_path = self.getname(cpv)
else:
full_path = filename
try:
s = os.stat(full_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
del e
writemsg(_("!!! Binary package does not exist: '%s'\n") % full_path,
noiselevel=-1)
return
metadata = self._read_metadata(full_path, s)
slot = metadata.get("SLOT")
try:
self._eval_use_flags(cpv, metadata)
except portage.exception.InvalidDependString:
slot = None
if slot is None:
writemsg(_("!!! Invalid binary package: '%s'\n") % full_path,
noiselevel=-1)
return
fetched = False
try:
build_id = cpv.build_id
except AttributeError:
build_id = None
else:
instance_key = self.dbapi._instance_key(cpv)
if instance_key in self.dbapi.cpvdict:
# This means we've been called by aux_update (or
# similar). The instance key typically changes (due to
# file modification), so we need to discard existing
# instance key references.
self.dbapi.cpv_remove(cpv)
self._pkg_paths.pop(instance_key, None)
if self._remotepkgs is not None:
fetched = self._remotepkgs.pop(instance_key, None)
cpv = _pkg_str(cpv, metadata=metadata, settings=self.settings)
# Reread the Packages index (in case it's been changed by another
# process) and then updated it, all while holding a lock.
pkgindex_lock = None
try:
pkgindex_lock = lockfile(self._pkgindex_file,
wantnewlockfile=1)
if filename is not None:
new_filename = self.getname(cpv, allocate_new=True)
try:
samefile = os.path.samefile(filename, new_filename)
except OSError:
samefile = False
if not samefile:
self._ensure_dir(os.path.dirname(new_filename))
_movefile(filename, new_filename, mysettings=self.settings)
full_path = new_filename
basename = os.path.basename(full_path)
pf = catsplit(cpv)[1]
if (build_id is None and not fetched and
basename.endswith(".xpak")):
# Apply the newly assigned BUILD_ID. This is intended
# to occur only for locally built packages. If the
# package was fetched, we want to preserve its
# attributes, so that we can later distinguish that it
# is identical to its remote counterpart.
build_id = self._parse_build_id(basename)
metadata["BUILD_ID"] = _unicode(build_id)
cpv = _pkg_str(cpv, metadata=metadata,
settings=self.settings)
binpkg = portage.xpak.tbz2(full_path)
binary_data = binpkg.get_data()
binary_data[b"BUILD_ID"] = _unicode_encode(
metadata["BUILD_ID"])
binpkg.recompose_mem(portage.xpak.xpak_mem(binary_data))
self._file_permissions(full_path)
pkgindex = self._load_pkgindex()
if not self._pkgindex_version_supported(pkgindex):
pkgindex = self._new_pkgindex()
d = self._inject_file(pkgindex, cpv, full_path)
self._update_pkgindex_header(pkgindex.header)
self._pkgindex_write(pkgindex)
finally:
if pkgindex_lock:
unlockfile(pkgindex_lock)
# This is used to record BINPKGMD5 in the installed package
# database, for a package that has just been built.
cpv._metadata["MD5"] = d["MD5"]
return cpv
def _read_metadata(self, filename, st, keys=None):
if keys is None:
keys = self.dbapi._aux_cache_keys
metadata = self.dbapi._aux_cache_slot_dict()
else:
metadata = {}
binary_metadata = portage.xpak.tbz2(filename).get_data()
for k in keys:
if k == "_mtime_":
metadata[k] = _unicode(st[stat.ST_MTIME])
elif k == "SIZE":
metadata[k] = _unicode(st.st_size)
else:
v = binary_metadata.get(_unicode_encode(k))
if v is not None:
v = _unicode_decode(v)
metadata[k] = " ".join(v.split())
metadata.setdefault("EAPI", "0")
return metadata
def _inject_file(self, pkgindex, cpv, filename):
"""
Add a package to internal data structures, and add an
entry to the given pkgindex.
@param pkgindex: The PackageIndex instance to which an entry
will be added.
@type pkgindex: PackageIndex
@param cpv: A _pkg_str instance corresponding to the package
being injected.
@type cpv: _pkg_str
@param filename: Absolute file path of the package to inject.
@type filename: string
@rtype: dict
@return: A dict corresponding to the new entry which has been
added to pkgindex. This may be used to access the checksums
which have just been generated.
"""
# Update state for future isremote calls.
instance_key = self.dbapi._instance_key(cpv)
if self._remotepkgs is not None:
self._remotepkgs.pop(instance_key, None)
self.dbapi.cpv_inject(cpv)
self._pkg_paths[instance_key] = filename[len(self.pkgdir)+1:]
d = self._pkgindex_entry(cpv)
# If found, remove package(s) with duplicate path.
path = d.get("PATH", "")
for i in range(len(pkgindex.packages) - 1, -1, -1):
d2 = pkgindex.packages[i]
if path and path == d2.get("PATH"):
# Handle path collisions in $PKGDIR/All
# when CPV is not identical.
del pkgindex.packages[i]
elif cpv == d2.get("CPV"):
if path == d2.get("PATH", ""):
del pkgindex.packages[i]
pkgindex.packages.append(d)
return d
def _pkgindex_write(self, pkgindex):
contents = codecs.getwriter(_encodings['repo.content'])(io.BytesIO())
pkgindex.write(contents)
contents = contents.getvalue()
atime = mtime = long(pkgindex.header["TIMESTAMP"])
output_files = [(atomic_ofstream(self._pkgindex_file, mode="wb"),
self._pkgindex_file, None)]
if "compress-index" in self.settings.features:
gz_fname = self._pkgindex_file + ".gz"
fileobj = atomic_ofstream(gz_fname, mode="wb")
output_files.append((GzipFile(filename='', mode="wb",
fileobj=fileobj, mtime=mtime), gz_fname, fileobj))
for f, fname, f_close in output_files:
f.write(contents)
f.close()
if f_close is not None:
f_close.close()
self._file_permissions(fname)
# some seconds might have elapsed since TIMESTAMP
os.utime(fname, (atime, mtime))
def _pkgindex_entry(self, cpv):
"""
Performs checksums, and gets size and mtime via lstat.
Raises InvalidDependString if necessary.
@rtype: dict
@return: a dict containing entry for the give cpv.
"""
pkg_path = self.getname(cpv)
d = dict(cpv._metadata.items())
d.update(perform_multiple_checksums(
pkg_path, hashes=self._pkgindex_hashes))
d["CPV"] = cpv
st = os.lstat(pkg_path)
d["_mtime_"] = _unicode(st[stat.ST_MTIME])
d["SIZE"] = _unicode(st.st_size)
rel_path = pkg_path[len(self.pkgdir)+1:]
# record location if it's non-default
if rel_path != cpv + ".tbz2":
d["PATH"] = rel_path
return d
def _new_pkgindex(self):
return portage.getbinpkg.PackageIndex(
allowed_pkg_keys=self._pkgindex_allowed_pkg_keys,
default_header_data=self._pkgindex_default_header_data,
default_pkg_data=self._pkgindex_default_pkg_data,
inherited_keys=self._pkgindex_inherited_keys,
translated_keys=self._pkgindex_translated_keys)
def _update_pkgindex_header(self, header):
portdir = normalize_path(os.path.realpath(self.settings["PORTDIR"]))
profiles_base = os.path.join(portdir, "profiles") + os.path.sep
if self.settings.profile_path:
profile_path = normalize_path(
os.path.realpath(self.settings.profile_path))
if profile_path.startswith(profiles_base):
profile_path = profile_path[len(profiles_base):]
header["PROFILE"] = profile_path
header["VERSION"] = _unicode(self._pkgindex_version)
base_uri = self.settings.get("PORTAGE_BINHOST_HEADER_URI")
if base_uri:
header["URI"] = base_uri
else:
header.pop("URI", None)
for k in self._pkgindex_header_keys:
v = self.settings.get(k, None)
if v:
header[k] = v
else:
header.pop(k, None)
# These values may be useful for using a binhost without
# having a local copy of the profile (bug #470006).
for k in self.settings.get("USE_EXPAND_IMPLICIT", "").split():
k = "USE_EXPAND_VALUES_" + k
v = self.settings.get(k)
if v:
header[k] = v
else:
header.pop(k, None)
def _pkgindex_version_supported(self, pkgindex):
version = pkgindex.header.get("VERSION")
if version:
try:
if int(version) <= self._pkgindex_version:
return True
except ValueError:
pass
return False
def _eval_use_flags(self, cpv, metadata):
use = frozenset(metadata.get("USE", "").split())
for k in self._pkgindex_use_evaluated_keys:
if k.endswith('DEPEND'):
token_class = Atom
else:
token_class = None
deps = metadata.get(k)
if deps is None:
continue
try:
deps = use_reduce(deps, uselist=use, token_class=token_class)
deps = paren_enclose(deps)
except portage.exception.InvalidDependString as e:
writemsg("%s: %s\n" % (k, e), noiselevel=-1)
raise
metadata[k] = deps
def exists_specific(self, cpv):
if not self.populated:
self.populate()
return self.dbapi.match(
dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
def dep_bestmatch(self, mydep):
"compatibility method -- all matches, not just visible ones"
if not self.populated:
self.populate()
writemsg("\n\n", 1)
writemsg("mydep: %s\n" % mydep, 1)
mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
writemsg("mydep: %s\n" % mydep, 1)
mykey = dep_getkey(mydep)
writemsg("mykey: %s\n" % mykey, 1)
mymatch = best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
writemsg("mymatch: %s\n" % mymatch, 1)
if mymatch is None:
return ""
return mymatch
def getname(self, cpv, allocate_new=None):
"""Returns a file location for this package.
If cpv has both build_time and build_id attributes, then the
path to the specific corresponding instance is returned.
Otherwise, allocate a new path and return that. When allocating
a new path, behavior depends on the binpkg-multi-instance
FEATURES setting.
"""
if not self.populated:
self.populate()
try:
cpv.cp
except AttributeError:
cpv = _pkg_str(cpv)
filename = None
if allocate_new:
filename = self._allocate_filename(cpv)
elif self._is_specific_instance(cpv):
instance_key = self.dbapi._instance_key(cpv)
path = self._pkg_paths.get(instance_key)
if path is not None:
filename = os.path.join(self.pkgdir, path)
if filename is None and not allocate_new:
try:
instance_key = self.dbapi._instance_key(cpv,
support_string=True)
except KeyError:
pass
else:
filename = self._pkg_paths.get(instance_key)
if filename is not None:
filename = os.path.join(self.pkgdir, filename)
if filename is None:
if self._multi_instance:
pf = catsplit(cpv)[1]
filename = "%s-%s.xpak" % (
os.path.join(self.pkgdir, cpv.cp, pf), "1")
else:
filename = os.path.join(self.pkgdir, cpv + ".tbz2")
return filename
def _is_specific_instance(self, cpv):
specific = True
try:
build_time = cpv.build_time
build_id = cpv.build_id
except AttributeError:
specific = False
else:
if build_time is None or build_id is None:
specific = False
return specific
def _max_build_id(self, cpv):
max_build_id = 0
for x in self.dbapi.cp_list(cpv.cp):
if (x == cpv and x.build_id is not None and
x.build_id > max_build_id):
max_build_id = x.build_id
return max_build_id
def _allocate_filename(self, cpv):
return os.path.join(self.pkgdir, cpv + ".tbz2")
def _allocate_filename_multi(self, cpv):
# First, get the max build_id found when _populate was
# called.
max_build_id = self._max_build_id(cpv)
# A new package may have been added concurrently since the
# last _populate call, so use increment build_id until
# we locate an unused id.
pf = catsplit(cpv)[1]
build_id = max_build_id + 1
while True:
filename = "%s-%s.xpak" % (
os.path.join(self.pkgdir, cpv.cp, pf), build_id)
if os.path.exists(filename):
build_id += 1
else:
return filename
@staticmethod
def _parse_build_id(filename):
build_id = -1
hyphen = filename.rfind("-", 0, -6)
if hyphen != -1:
build_id = filename[hyphen+1:-5]
try:
build_id = long(build_id)
except ValueError:
pass
return build_id
def isremote(self, pkgname):
"""Returns true if the package is kept remotely and it has not been
downloaded (or it is only partially downloaded)."""
if (self._remotepkgs is None or
self.dbapi._instance_key(pkgname) not in self._remotepkgs):
return False
# Presence in self._remotepkgs implies that it's remote. When a
# package is downloaded, state is updated by self.inject().
return True
def get_pkgindex_uri(self, cpv):
"""Returns the URI to the Packages file for a given package."""
uri = None
if self._remotepkgs is not None:
metadata = self._remotepkgs.get(self.dbapi._instance_key(cpv))
if metadata is not None:
uri = metadata["PKGINDEX_URI"]
return uri
def gettbz2(self, pkgname):
"""Fetches the package from a remote site, if necessary. Attempts to
resume if the file appears to be partially downloaded."""
instance_key = self.dbapi._instance_key(pkgname)
tbz2_path = self.getname(pkgname)
tbz2name = os.path.basename(tbz2_path)
resume = False
if os.path.exists(tbz2_path):
if tbz2name[:-5] not in self.invalids:
return
else:
resume = True
writemsg(_("Resuming download of this tbz2, but it is possible that it is corrupt.\n"),
noiselevel=-1)
mydest = os.path.dirname(self.getname(pkgname))
self._ensure_dir(mydest)
# urljoin doesn't work correctly with unrecognized protocols like sftp
if self._remote_has_index:
rel_url = self._remotepkgs[instance_key].get("PATH")
if not rel_url:
rel_url = pkgname+".tbz2"
remote_base_uri = self._remotepkgs[instance_key]["BASE_URI"]
url = remote_base_uri.rstrip("/") + "/" + rel_url.lstrip("/")
else:
url = self.settings["PORTAGE_BINHOST"].rstrip("/") + "/" + tbz2name
protocol = urlparse(url)[0]
fcmd_prefix = "FETCHCOMMAND"
if resume:
fcmd_prefix = "RESUMECOMMAND"
fcmd = self.settings.get(fcmd_prefix + "_" + protocol.upper())
if not fcmd:
fcmd = self.settings.get(fcmd_prefix)
success = portage.getbinpkg.file_get(url, mydest, fcmd=fcmd)
if not success:
try:
os.unlink(self.getname(pkgname))
except OSError:
pass
raise portage.exception.FileNotFound(mydest)
self.inject(pkgname)
def _load_pkgindex(self):
pkgindex = self._new_pkgindex()
try:
f = io.open(_unicode_encode(self._pkgindex_file,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
errors='replace')
except EnvironmentError:
pass
else:
try:
pkgindex.read(f)
finally:
f.close()
return pkgindex
def _get_digests(self, pkg):
try:
cpv = pkg.cpv
except AttributeError:
cpv = pkg
_instance_key = self.dbapi._instance_key
instance_key = _instance_key(cpv)
digests = {}
metadata = (None if self._remotepkgs is None else
self._remotepkgs.get(instance_key))
if metadata is None:
for d in self._load_pkgindex().packages:
if (d["CPV"] == cpv and
instance_key == _instance_key(_pkg_str(d["CPV"],
metadata=d, settings=self.settings))):
metadata = d
break
if metadata is None:
return digests
for k in hashfunc_map:
v = metadata.get(k)
if not v:
continue
digests[k] = v
if "SIZE" in metadata:
try:
digests["size"] = int(metadata["SIZE"])
except ValueError:
writemsg(_("!!! Malformed SIZE attribute in remote " \
"metadata for '%s'\n") % cpv)
return digests
def digestCheck(self, pkg):
"""
Verify digests for the given package and raise DigestException
if verification fails.
@rtype: bool
@return: True if digests could be located, False otherwise.
"""
digests = self._get_digests(pkg)
if not digests:
return False
try:
cpv = pkg.cpv
except AttributeError:
cpv = pkg
pkg_path = self.getname(cpv)
hash_filter = _hash_filter(
self.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
if not hash_filter.transparent:
digests = _apply_hash_filter(digests, hash_filter)
eout = EOutput()
eout.quiet = self.settings.get("PORTAGE_QUIET") == "1"
ok, st = _check_distfile(pkg_path, digests, eout, show_errors=0)
if not ok:
ok, reason = verify_all(pkg_path, digests)
if not ok:
raise portage.exception.DigestException(
(pkg_path,) + tuple(reason))
return True
def getslot(self, mycatpkg):
"Get a slot for a catpkg; assume it exists."
myslot = ""
try:
myslot = self.dbapi._pkg_str(mycatpkg, None).slot
except KeyError:
pass
return myslot
| gpl-2.0 |
thaim/ansible | lib/ansible/module_utils/openstack.py | 26 | 7072 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from ansible.module_utils.six import iteritems
def openstack_argument_spec():
# DEPRECATED: This argument spec is only used for the deprecated old
# OpenStack modules. It turns out that modern OpenStack auth is WAY
# more complex than this.
# Consume standard OpenStack environment variables.
# This is mainly only useful for ad-hoc command line operation as
# in playbooks one would assume variables would be used appropriately
OS_AUTH_URL = os.environ.get('OS_AUTH_URL', 'http://127.0.0.1:35357/v2.0/')
OS_PASSWORD = os.environ.get('OS_PASSWORD', None)
OS_REGION_NAME = os.environ.get('OS_REGION_NAME', None)
OS_USERNAME = os.environ.get('OS_USERNAME', 'admin')
OS_TENANT_NAME = os.environ.get('OS_TENANT_NAME', OS_USERNAME)
spec = dict(
login_username=dict(default=OS_USERNAME),
auth_url=dict(default=OS_AUTH_URL),
region_name=dict(default=OS_REGION_NAME),
availability_zone=dict(),
)
if OS_PASSWORD:
spec['login_password'] = dict(default=OS_PASSWORD)
else:
spec['login_password'] = dict(required=True)
if OS_TENANT_NAME:
spec['login_tenant_name'] = dict(default=OS_TENANT_NAME)
else:
spec['login_tenant_name'] = dict(required=True)
return spec
def openstack_find_nova_addresses(addresses, ext_tag, key_name=None):
ret = []
for (k, v) in iteritems(addresses):
if key_name and k == key_name:
ret.extend([addrs['addr'] for addrs in v])
else:
for interface_spec in v:
if 'OS-EXT-IPS:type' in interface_spec and interface_spec['OS-EXT-IPS:type'] == ext_tag:
ret.append(interface_spec['addr'])
return ret
def openstack_full_argument_spec(**kwargs):
spec = dict(
cloud=dict(default=None, type='raw'),
auth_type=dict(default=None),
auth=dict(default=None, type='dict', no_log=True),
region_name=dict(default=None),
availability_zone=dict(default=None),
validate_certs=dict(default=None, type='bool', aliases=['verify']),
ca_cert=dict(default=None, aliases=['cacert']),
client_cert=dict(default=None, aliases=['cert']),
client_key=dict(default=None, no_log=True, aliases=['key']),
wait=dict(default=True, type='bool'),
timeout=dict(default=180, type='int'),
api_timeout=dict(default=None, type='int'),
interface=dict(
default='public', choices=['public', 'internal', 'admin'],
aliases=['endpoint_type']),
)
spec.update(kwargs)
return spec
def openstack_module_kwargs(**kwargs):
ret = {}
for key in ('mutually_exclusive', 'required_together', 'required_one_of'):
if key in kwargs:
if key in ret:
ret[key].extend(kwargs[key])
else:
ret[key] = kwargs[key]
return ret
def openstack_cloud_from_module(module, min_version='0.12.0'):
from distutils.version import StrictVersion
try:
# Due to the name shadowing we should import other way
import importlib
sdk = importlib.import_module('openstack')
sdk_version = importlib.import_module('openstack.version')
except ImportError:
module.fail_json(msg='openstacksdk is required for this module')
if min_version:
min_version = max(StrictVersion('0.12.0'), StrictVersion(min_version))
else:
min_version = StrictVersion('0.12.0')
if StrictVersion(sdk_version.__version__) < min_version:
module.fail_json(
msg="To utilize this module, the installed version of "
"the openstacksdk library MUST be >={min_version}.".format(
min_version=min_version))
cloud_config = module.params.pop('cloud', None)
try:
if isinstance(cloud_config, dict):
fail_message = (
"A cloud config dict was provided to the cloud parameter"
" but also a value was provided for {param}. If a cloud"
" config dict is provided, {param} should be"
" excluded.")
for param in (
'auth', 'region_name', 'validate_certs',
'ca_cert', 'client_key', 'api_timeout', 'auth_type'):
if module.params[param] is not None:
module.fail_json(msg=fail_message.format(param=param))
# For 'interface' parameter, fail if we receive a non-default value
if module.params['interface'] != 'public':
module.fail_json(msg=fail_message.format(param='interface'))
return sdk, sdk.connect(**cloud_config)
else:
return sdk, sdk.connect(
cloud=cloud_config,
auth_type=module.params['auth_type'],
auth=module.params['auth'],
region_name=module.params['region_name'],
verify=module.params['validate_certs'],
cacert=module.params['ca_cert'],
key=module.params['client_key'],
api_timeout=module.params['api_timeout'],
interface=module.params['interface'],
)
except sdk.exceptions.SDKException as e:
# Probably a cloud configuration/login error
module.fail_json(msg=str(e))
| mit |
petteyg/intellij-community | python/lib/Lib/site-packages/django/db/backends/dummy/base.py | 89 | 1515 | """
Dummy database backend for Django.
Django uses this if the database ENGINE setting is empty (None or empty string).
Each of these API functions, except connection.close(), raises
ImproperlyConfigured.
"""
from django.core.exceptions import ImproperlyConfigured
from django.db.backends import *
from django.db.backends.creation import BaseDatabaseCreation
def complain(*args, **kwargs):
raise ImproperlyConfigured("You haven't set the database ENGINE setting yet.")
def ignore(*args, **kwargs):
pass
class DatabaseError(Exception):
pass
class IntegrityError(DatabaseError):
pass
class DatabaseOperations(BaseDatabaseOperations):
quote_name = complain
class DatabaseClient(BaseDatabaseClient):
runshell = complain
class DatabaseIntrospection(BaseDatabaseIntrospection):
get_table_list = complain
get_table_description = complain
get_relations = complain
get_indexes = complain
class DatabaseWrapper(object):
operators = {}
cursor = complain
_commit = complain
_rollback = ignore
def __init__(self, settings_dict, alias, *args, **kwargs):
self.features = BaseDatabaseFeatures(self)
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = BaseDatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
self.settings_dict = settings_dict
self.alias = alias
def close(self):
pass
| apache-2.0 |
jfmartinez64/test | libs/xmpp/auth.py | 196 | 15633 | ## auth.py
##
## Copyright (C) 2003-2005 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: auth.py,v 1.41 2008/09/13 21:45:21 normanr Exp $
"""
Provides library with all Non-SASL and SASL authentication mechanisms.
Can be used both for client and transport authentication.
"""
from protocol import *
from client import PlugIn
import sha,base64,random,dispatcher,re
import md5
def HH(some): return md5.new(some).hexdigest()
def H(some): return md5.new(some).digest()
def C(some): return ':'.join(some)
class NonSASL(PlugIn):
""" Implements old Non-SASL (JEP-0078) authentication used in jabberd1.4 and transport authentication."""
def __init__(self,user,password,resource):
""" Caches username, password and resource for auth. """
PlugIn.__init__(self)
self.DBG_LINE='gen_auth'
self.user=user
self.password=password
self.resource=resource
def plugin(self,owner):
""" Determine the best auth method (digest/0k/plain) and use it for auth.
Returns used method name on success. Used internally. """
if not self.resource: return self.authComponent(owner)
self.DEBUG('Querying server about possible auth methods','start')
resp=owner.Dispatcher.SendAndWaitForResponse(Iq('get',NS_AUTH,payload=[Node('username',payload=[self.user])]))
if not isResultNode(resp):
self.DEBUG('No result node arrived! Aborting...','error')
return
iq=Iq(typ='set',node=resp)
query=iq.getTag('query')
query.setTagData('username',self.user)
query.setTagData('resource',self.resource)
if query.getTag('digest'):
self.DEBUG("Performing digest authentication",'ok')
query.setTagData('digest',sha.new(owner.Dispatcher.Stream._document_attrs['id']+self.password).hexdigest())
if query.getTag('password'): query.delChild('password')
method='digest'
elif query.getTag('token'):
token=query.getTagData('token')
seq=query.getTagData('sequence')
self.DEBUG("Performing zero-k authentication",'ok')
hash = sha.new(sha.new(self.password).hexdigest()+token).hexdigest()
for foo in xrange(int(seq)): hash = sha.new(hash).hexdigest()
query.setTagData('hash',hash)
method='0k'
else:
self.DEBUG("Sequre methods unsupported, performing plain text authentication",'warn')
query.setTagData('password',self.password)
method='plain'
resp=owner.Dispatcher.SendAndWaitForResponse(iq)
if isResultNode(resp):
self.DEBUG('Sucessfully authenticated with remove host.','ok')
owner.User=self.user
owner.Resource=self.resource
owner._registered_name=owner.User+'@'+owner.Server+'/'+owner.Resource
return method
self.DEBUG('Authentication failed!','error')
def authComponent(self,owner):
""" Authenticate component. Send handshake stanza and wait for result. Returns "ok" on success. """
self.handshake=0
owner.send(Node(NS_COMPONENT_ACCEPT+' handshake',payload=[sha.new(owner.Dispatcher.Stream._document_attrs['id']+self.password).hexdigest()]))
owner.RegisterHandler('handshake',self.handshakeHandler,xmlns=NS_COMPONENT_ACCEPT)
while not self.handshake:
self.DEBUG("waiting on handshake",'notify')
owner.Process(1)
owner._registered_name=self.user
if self.handshake+1: return 'ok'
def handshakeHandler(self,disp,stanza):
""" Handler for registering in dispatcher for accepting transport authentication. """
if stanza.getName()=='handshake': self.handshake=1
else: self.handshake=-1
class SASL(PlugIn):
""" Implements SASL authentication. """
def __init__(self,username,password):
PlugIn.__init__(self)
self.username=username
self.password=password
def plugin(self,owner):
if not self._owner.Dispatcher.Stream._document_attrs.has_key('version'): self.startsasl='not-supported'
elif self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else: self.startsasl=None
def auth(self):
""" Start authentication. Result can be obtained via "SASL.startsasl" attribute and will be
either "success" or "failure". Note that successfull auth will take at least
two Dispatcher.Process() calls. """
if self.startsasl: pass
elif self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else: self._owner.RegisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def plugout(self):
""" Remove SASL handlers from owner's dispatcher. Used internally. """
if self._owner.__dict__.has_key('features'): self._owner.UnregisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
if self._owner.__dict__.has_key('challenge'): self._owner.UnregisterHandler('challenge',self.SASLHandler,xmlns=NS_SASL)
if self._owner.__dict__.has_key('failure'): self._owner.UnregisterHandler('failure',self.SASLHandler,xmlns=NS_SASL)
if self._owner.__dict__.has_key('success'): self._owner.UnregisterHandler('success',self.SASLHandler,xmlns=NS_SASL)
def FeaturesHandler(self,conn,feats):
""" Used to determine if server supports SASL auth. Used internally. """
if not feats.getTag('mechanisms',namespace=NS_SASL):
self.startsasl='not-supported'
self.DEBUG('SASL not supported by server','error')
return
mecs=[]
for mec in feats.getTag('mechanisms',namespace=NS_SASL).getTags('mechanism'):
mecs.append(mec.getData())
self._owner.RegisterHandler('challenge',self.SASLHandler,xmlns=NS_SASL)
self._owner.RegisterHandler('failure',self.SASLHandler,xmlns=NS_SASL)
self._owner.RegisterHandler('success',self.SASLHandler,xmlns=NS_SASL)
if "ANONYMOUS" in mecs and self.username == None:
node=Node('auth',attrs={'xmlns':NS_SASL,'mechanism':'ANONYMOUS'})
elif "DIGEST-MD5" in mecs:
node=Node('auth',attrs={'xmlns':NS_SASL,'mechanism':'DIGEST-MD5'})
elif "PLAIN" in mecs:
sasl_data='%s\x00%s\x00%s'%(self.username+'@'+self._owner.Server,self.username,self.password)
node=Node('auth',attrs={'xmlns':NS_SASL,'mechanism':'PLAIN'},payload=[base64.encodestring(sasl_data).replace('\r','').replace('\n','')])
else:
self.startsasl='failure'
self.DEBUG('I can only use DIGEST-MD5 and PLAIN mecanisms.','error')
return
self.startsasl='in-process'
self._owner.send(node.__str__())
raise NodeProcessed
def SASLHandler(self,conn,challenge):
""" Perform next SASL auth step. Used internally. """
if challenge.getNamespace()<>NS_SASL: return
if challenge.getName()=='failure':
self.startsasl='failure'
try: reason=challenge.getChildren()[0]
except: reason=challenge
self.DEBUG('Failed SASL authentification: %s'%reason,'error')
raise NodeProcessed
elif challenge.getName()=='success':
self.startsasl='success'
self.DEBUG('Successfully authenticated with remote server.','ok')
handlers=self._owner.Dispatcher.dumpHandlers()
self._owner.Dispatcher.PlugOut()
dispatcher.Dispatcher().PlugIn(self._owner)
self._owner.Dispatcher.restoreHandlers(handlers)
self._owner.User=self.username
raise NodeProcessed
########################################3333
incoming_data=challenge.getData()
chal={}
data=base64.decodestring(incoming_data)
self.DEBUG('Got challenge:'+data,'ok')
for pair in re.findall('(\w+\s*=\s*(?:(?:"[^"]+")|(?:[^,]+)))',data):
key,value=[x.strip() for x in pair.split('=', 1)]
if value[:1]=='"' and value[-1:]=='"': value=value[1:-1]
chal[key]=value
if chal.has_key('qop') and 'auth' in [x.strip() for x in chal['qop'].split(',')]:
resp={}
resp['username']=self.username
resp['realm']=self._owner.Server
resp['nonce']=chal['nonce']
cnonce=''
for i in range(7):
cnonce+=hex(int(random.random()*65536*4096))[2:]
resp['cnonce']=cnonce
resp['nc']=('00000001')
resp['qop']='auth'
resp['digest-uri']='xmpp/'+self._owner.Server
A1=C([H(C([resp['username'],resp['realm'],self.password])),resp['nonce'],resp['cnonce']])
A2=C(['AUTHENTICATE',resp['digest-uri']])
response= HH(C([HH(A1),resp['nonce'],resp['nc'],resp['cnonce'],resp['qop'],HH(A2)]))
resp['response']=response
resp['charset']='utf-8'
sasl_data=''
for key in ['charset','username','realm','nonce','nc','cnonce','digest-uri','response','qop']:
if key in ['nc','qop','response','charset']: sasl_data+="%s=%s,"%(key,resp[key])
else: sasl_data+='%s="%s",'%(key,resp[key])
########################################3333
node=Node('response',attrs={'xmlns':NS_SASL},payload=[base64.encodestring(sasl_data[:-1]).replace('\r','').replace('\n','')])
self._owner.send(node.__str__())
elif chal.has_key('rspauth'): self._owner.send(Node('response',attrs={'xmlns':NS_SASL}).__str__())
else:
self.startsasl='failure'
self.DEBUG('Failed SASL authentification: unknown challenge','error')
raise NodeProcessed
class Bind(PlugIn):
""" Bind some JID to the current connection to allow router know of our location."""
def __init__(self):
PlugIn.__init__(self)
self.DBG_LINE='bind'
self.bound=None
def plugin(self,owner):
""" Start resource binding, if allowed at this time. Used internally. """
if self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else: self._owner.RegisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def plugout(self):
""" Remove Bind handler from owner's dispatcher. Used internally. """
self._owner.UnregisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def FeaturesHandler(self,conn,feats):
""" Determine if server supports resource binding and set some internal attributes accordingly. """
if not feats.getTag('bind',namespace=NS_BIND):
self.bound='failure'
self.DEBUG('Server does not requested binding.','error')
return
if feats.getTag('session',namespace=NS_SESSION): self.session=1
else: self.session=-1
self.bound=[]
def Bind(self,resource=None):
""" Perform binding. Use provided resource name or random (if not provided). """
while self.bound is None and self._owner.Process(1): pass
if resource: resource=[Node('resource',payload=[resource])]
else: resource=[]
resp=self._owner.SendAndWaitForResponse(Protocol('iq',typ='set',payload=[Node('bind',attrs={'xmlns':NS_BIND},payload=resource)]))
if isResultNode(resp):
self.bound.append(resp.getTag('bind').getTagData('jid'))
self.DEBUG('Successfully bound %s.'%self.bound[-1],'ok')
jid=JID(resp.getTag('bind').getTagData('jid'))
self._owner.User=jid.getNode()
self._owner.Resource=jid.getResource()
resp=self._owner.SendAndWaitForResponse(Protocol('iq',typ='set',payload=[Node('session',attrs={'xmlns':NS_SESSION})]))
if isResultNode(resp):
self.DEBUG('Successfully opened session.','ok')
self.session=1
return 'ok'
else:
self.DEBUG('Session open failed.','error')
self.session=0
elif resp: self.DEBUG('Binding failed: %s.'%resp.getTag('error'),'error')
else:
self.DEBUG('Binding failed: timeout expired.','error')
return ''
class ComponentBind(PlugIn):
""" ComponentBind some JID to the current connection to allow router know of our location."""
def __init__(self, sasl):
PlugIn.__init__(self)
self.DBG_LINE='bind'
self.bound=None
self.needsUnregister=None
self.sasl = sasl
def plugin(self,owner):
""" Start resource binding, if allowed at this time. Used internally. """
if not self.sasl:
self.bound=[]
return
if self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher,self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else:
self._owner.RegisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
self.needsUnregister=1
def plugout(self):
""" Remove ComponentBind handler from owner's dispatcher. Used internally. """
if self.needsUnregister:
self._owner.UnregisterHandler('features',self.FeaturesHandler,xmlns=NS_STREAMS)
def FeaturesHandler(self,conn,feats):
""" Determine if server supports resource binding and set some internal attributes accordingly. """
if not feats.getTag('bind',namespace=NS_BIND):
self.bound='failure'
self.DEBUG('Server does not requested binding.','error')
return
if feats.getTag('session',namespace=NS_SESSION): self.session=1
else: self.session=-1
self.bound=[]
def Bind(self,domain=None):
""" Perform binding. Use provided domain name (if not provided). """
while self.bound is None and self._owner.Process(1): pass
if self.sasl:
xmlns = NS_COMPONENT_1
else:
xmlns = None
self.bindresponse = None
ttl = dispatcher.DefaultTimeout
self._owner.RegisterHandler('bind',self.BindHandler,xmlns=xmlns)
self._owner.send(Protocol('bind',attrs={'name':domain},xmlns=NS_COMPONENT_1))
while self.bindresponse is None and self._owner.Process(1) and ttl > 0: ttl-=1
self._owner.UnregisterHandler('bind',self.BindHandler,xmlns=xmlns)
resp=self.bindresponse
if resp and resp.getAttr('error'):
self.DEBUG('Binding failed: %s.'%resp.getAttr('error'),'error')
elif resp:
self.DEBUG('Successfully bound.','ok')
return 'ok'
else:
self.DEBUG('Binding failed: timeout expired.','error')
return ''
def BindHandler(self,conn,bind):
self.bindresponse = bind
pass
| gpl-3.0 |
chris-wood/ndn-cxx-accounting | .waf-tools/pch.py | 16 | 4636 | #! /usr/bin/env python
# encoding: utf-8
# Alexander Afanasyev (UCLA), 2014
"""
Enable precompiled C++ header support (currently only clang++ and g++ are supported)
To use this tool, wscript should look like:
def options(opt):
opt.load('pch')
# This will add `--with-pch` configure option.
# Unless --with-pch during configure stage specified, the precompiled header support is disabled
def configure(conf):
conf.load('pch')
# this will set conf.env.WITH_PCH if --with-pch is specified and the supported compiler is used
# Unless conf.env.WITH_PCH is set, the precompiled header support is disabled
def build(bld):
bld(features='cxx pch',
target='precompiled-headers',
name='precompiled-headers',
headers='a.h b.h c.h', # headers to pre-compile into `precompiled-headers`
# Other parameters to compile precompiled headers
# includes=...,
# export_includes=...,
# use=...,
# ...
# Exported parameters will be propagated even if precompiled headers are disabled
)
bld(
target='test',
features='cxx cxxprogram',
source='a.cpp b.cpp d.cpp main.cpp',
use='precompiled-headers',
)
# or
bld(
target='test',
features='pch cxx cxxprogram',
source='a.cpp b.cpp d.cpp main.cpp',
headers='a.h b.h c.h',
)
Note that precompiled header must have multiple inclusion guards. If the guards are missing, any benefit of precompiled header will be voided and compilation may fail in some cases.
"""
import os
from waflib import Task, TaskGen, Logs, Utils
from waflib.Tools import c_preproc, cxx
PCH_COMPILER_OPTIONS = {
'clang++': [['-include'], '.pch', ['-x', 'c++-header']],
'g++': [['-include'], '.gch', ['-x', 'c++-header']],
}
def options(opt):
opt.add_option('--without-pch', action='store_false', default=True, dest='with_pch', help='''Try to use precompiled header to speed up compilation (only g++ and clang++)''')
def configure(conf):
if (conf.options.with_pch and conf.env['COMPILER_CXX'] in PCH_COMPILER_OPTIONS.keys()):
if Utils.unversioned_sys_platform() == "darwin" and conf.env['CXX_NAME'] == 'clang':
version = tuple(int(i) for i in conf.env['CC_VERSION'])
if version < (6, 1, 0):
# Issue #2804
return
conf.env.WITH_PCH = True
flags = PCH_COMPILER_OPTIONS[conf.env['COMPILER_CXX']]
conf.env.CXXPCH_F = flags[0]
conf.env.CXXPCH_EXT = flags[1]
conf.env.CXXPCH_FLAGS = flags[2]
@TaskGen.feature('pch')
@TaskGen.before('process_source')
def apply_pch(self):
if not self.env.WITH_PCH:
return
if getattr(self.bld, 'pch_tasks', None) is None:
self.bld.pch_tasks = {}
if getattr(self, 'headers', None) is None:
return
self.headers = self.to_nodes(self.headers)
if getattr(self, 'name', None):
try:
task = self.bld.pch_tasks["%s.%s" % (self.name, self.idx)]
self.bld.fatal("Duplicated 'pch' task with name %r" % "%s.%s" % (self.name, self.idx))
except KeyError:
pass
out = '%s.%d%s' % (self.target, self.idx, self.env['CXXPCH_EXT'])
out = self.path.find_or_declare(out)
task = self.create_task('gchx', self.headers, out)
# target should be an absolute path of `out`, but without precompiled header extension
task.target = out.abspath()[:-len(out.suffix())]
self.pch_task = task
if getattr(self, 'name', None):
self.bld.pch_tasks["%s.%s" % (self.name, self.idx)] = task
@TaskGen.feature('cxx')
@TaskGen.after_method('process_source', 'propagate_uselib_vars')
def add_pch(self):
if not (self.env['WITH_PCH'] and getattr(self, 'use', None) and getattr(self, 'compiled_tasks', None) and getattr(self.bld, 'pch_tasks', None)):
return
pch = None
# find pch task, if any
if getattr(self, 'pch_task', None):
pch = self.pch_task
else:
for use in Utils.to_list(self.use):
try:
pch = self.bld.pch_tasks[use]
except KeyError:
pass
if pch:
for x in self.compiled_tasks:
x.env.append_value('CXXFLAGS', self.env['CXXPCH_F'] + [pch.target])
class gchx(Task.Task):
run_str = '${CXX} ${ARCH_ST:ARCH} ${CXXFLAGS} ${CPPFLAGS} ${CXXPCH_FLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CXXPCH_F:SRC} ${CXX_SRC_F}${SRC[0].abspath()} ${CXX_TGT_F}${TGT[0].abspath()}'
scan = c_preproc.scan
color = 'BLUE'
ext_out=['.h']
def runnable_status(self):
try:
node_deps = self.generator.bld.node_deps[self.uid()]
except KeyError:
node_deps = []
ret = Task.Task.runnable_status(self)
if ret == Task.SKIP_ME and self.env.CXX_NAME == 'clang':
t = os.stat(self.outputs[0].abspath()).st_mtime
for n in self.inputs + node_deps:
if os.stat(n.abspath()).st_mtime > t:
return Task.RUN_ME
return ret
| gpl-3.0 |
odicraig/kodi2odi | addons/plugin.video.footballreplays/net.py | 2 | 10235 | '''
common XBMC Module
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import cookielib
import gzip
import re
import StringIO
import urllib
import urllib2
import socket
#Set Global timeout - Useful for slow connections and Putlocker.
socket.setdefaulttimeout(30)
class HeadRequest(urllib2.Request):
'''A Request class that sends HEAD requests'''
def get_method(self):
return 'HEAD'
class Net:
'''
This class wraps :mod:`urllib2` and provides an easy way to make http
requests while taking care of cookies, proxies, gzip compression and
character encoding.
Example::
from t0mm0.common.net import Net
net = Net()
response = net.http_GET('http://xbmc.org')
print response.content
'''
_cj = cookielib.LWPCookieJar()
_proxy = None
_user_agent= 'XBMC'
_http_debug = False
def __init__(self, cookie_file='', proxy='', user_agent='',
http_debug=False):
'''
Kwargs:
cookie_file (str): Full path to a file to be used to load and save
cookies to.
proxy (str): Proxy setting (eg.
``'http://user:pass@example.com:1234'``)
user_agent (str): String to use as the User Agent header. If not
supplied the class will use a default user agent (chrome)
http_debug (bool): Set ``True`` to have HTTP header info written to
the XBMC log for all requests.
'''
if cookie_file:
self.set_cookies(cookie_file)
if proxy:
self.set_proxy(proxy)
if user_agent:
self.set_user_agent(user_agent)
self._http_debug = http_debug
self._update_opener()
def set_cookies(self, cookie_file):
'''
Set the cookie file and try to load cookies from it if it exists.
Args:
cookie_file (str): Full path to a file to be used to load and save
cookies to.
'''
try:
self._cj.load(cookie_file, ignore_discard=True)
self._update_opener()
return True
except:
return False
def get_cookies(self):
'''Returns A dictionary containing all cookie information by domain.'''
return self._cj._cookies
def save_cookies(self, cookie_file):
'''
Saves cookies to a file.
Args:
cookie_file (str): Full path to a file to save cookies to.
'''
self._cj.save(cookie_file, ignore_discard=True)
def set_proxy(self, proxy):
'''
Args:
proxy (str): Proxy setting (eg.
``'http://user:pass@example.com:1234'``)
'''
self._proxy = proxy
self._update_opener()
def get_proxy(self):
'''Returns string containing proxy details.'''
return self._proxy
def set_user_agent(self, user_agent):
'''
Args:
user_agent (str): String to use as the User Agent header.
'''
self._user_agent = user_agent
def get_user_agent(self):
'''Returns user agent string.'''
return self._user_agent
def _update_opener(self):
'''
Builds and installs a new opener to be used by all future calls to
:func:`urllib2.urlopen`.
'''
if self._http_debug:
http = urllib2.HTTPHandler(debuglevel=1)
else:
http = urllib2.HTTPHandler()
if self._proxy:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
urllib2.ProxyHandler({'http':
self._proxy}),
urllib2.HTTPBasicAuthHandler(),
http)
else:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
urllib2.HTTPBasicAuthHandler(),
http)
urllib2.install_opener(opener)
def http_GET(self, url, headers={}, compression=True):
'''
Perform an HTTP GET request.
Args:
url (str): The URL to GET.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
return self._fetch(url, headers=headers, compression=compression)
def http_POST(self, url, form_data, headers={}, compression=True):
'''
Perform an HTTP POST request.
Args:
url (str): The URL to POST.
form_data (dict): A dictionary of form data to POST.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
return self._fetch(url, form_data, headers=headers,
compression=compression)
def http_HEAD(self, url, headers={}):
'''
Perform an HTTP HEAD request.
Args:
url (str): The URL to GET.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page.
'''
req = HeadRequest(url)
req.add_header('User-Agent', self._user_agent)
for k, v in headers.items():
req.add_header(k, v)
response = urllib2.urlopen(req)
return HttpResponse(response)
def _fetch(self, url, form_data={}, headers={}, compression=True):
'''
Perform an HTTP GET or POST request.
Args:
url (str): The URL to GET or POST.
form_data (dict): A dictionary of form data to POST. If empty, the
request will be a GET, if it contains form data it will be a POST.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
encoding = ''
req = urllib2.Request(url)
if form_data:
form_data = urllib.urlencode(form_data)
req = urllib2.Request(url, form_data)
req.add_header('User-Agent', self._user_agent)
for k, v in headers.items():
req.add_header(k, v)
if compression:
req.add_header('Accept-Encoding', 'gzip')
response = urllib2.urlopen(req)
return HttpResponse(response)
class HttpResponse:
'''
This class represents a resoponse from an HTTP request.
The content is examined and every attempt is made to properly encode it to
Unicode.
.. seealso::
:meth:`Net.http_GET`, :meth:`Net.http_HEAD` and :meth:`Net.http_POST`
'''
content = ''
'''Unicode encoded string containing the body of the reposne.'''
def __init__(self, response):
'''
Args:
response (:class:`mimetools.Message`): The object returned by a call
to :func:`urllib2.urlopen`.
'''
self._response = response
html = response.read()
try:
if response.headers['content-encoding'].lower() == 'gzip':
html = gzip.GzipFile(fileobj=StringIO.StringIO(html)).read()
except:
pass
try:
content_type = response.headers['content-type']
if 'charset=' in content_type:
encoding = content_type.split('charset=')[-1]
except:
pass
r = re.search('<meta\s+http-equiv="Content-Type"\s+content="(?:.+?);' +
'\s+charset=(.+?)"', html, re.IGNORECASE)
if r:
encoding = r.group(1)
try:
html = unicode(html, encoding)
except:
pass
self.content = html
def get_headers(self):
'''Returns a List of headers returned by the server.'''
return self._response.info().headers
def get_url(self):
'''
Return the URL of the resource retrieved, commonly used to determine if
a redirect was followed.
'''
return self._response.geturl()
| gpl-3.0 |
eyohansa/django | tests/distinct_on_fields/tests.py | 263 | 5996 | from __future__ import unicode_literals
from django.db.models import Max
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import str_prefix
from .models import Celebrity, Fan, Staff, StaffTag, Tag
@skipUnlessDBFeature('can_distinct_on_fields')
@skipUnlessDBFeature('supports_nullable_unique_constraints')
class DistinctOnTests(TestCase):
def setUp(self):
t1 = Tag.objects.create(name='t1')
Tag.objects.create(name='t2', parent=t1)
t3 = Tag.objects.create(name='t3', parent=t1)
Tag.objects.create(name='t4', parent=t3)
Tag.objects.create(name='t5', parent=t3)
self.p1_o1 = Staff.objects.create(id=1, name="p1", organisation="o1")
self.p2_o1 = Staff.objects.create(id=2, name="p2", organisation="o1")
self.p3_o1 = Staff.objects.create(id=3, name="p3", organisation="o1")
self.p1_o2 = Staff.objects.create(id=4, name="p1", organisation="o2")
self.p1_o1.coworkers.add(self.p2_o1, self.p3_o1)
StaffTag.objects.create(staff=self.p1_o1, tag=t1)
StaffTag.objects.create(staff=self.p1_o1, tag=t1)
celeb1 = Celebrity.objects.create(name="c1")
celeb2 = Celebrity.objects.create(name="c2")
self.fan1 = Fan.objects.create(fan_of=celeb1)
self.fan2 = Fan.objects.create(fan_of=celeb1)
self.fan3 = Fan.objects.create(fan_of=celeb2)
def test_basic_distinct_on(self):
"""QuerySet.distinct('field', ...) works"""
# (qset, expected) tuples
qsets = (
(
Staff.objects.distinct().order_by('name'),
['<Staff: p1>', '<Staff: p1>', '<Staff: p2>', '<Staff: p3>'],
),
(
Staff.objects.distinct('name').order_by('name'),
['<Staff: p1>', '<Staff: p2>', '<Staff: p3>'],
),
(
Staff.objects.distinct('organisation').order_by('organisation', 'name'),
['<Staff: p1>', '<Staff: p1>'],
),
(
Staff.objects.distinct('name', 'organisation').order_by('name', 'organisation'),
['<Staff: p1>', '<Staff: p1>', '<Staff: p2>', '<Staff: p3>'],
),
(
Celebrity.objects.filter(fan__in=[self.fan1, self.fan2, self.fan3]).distinct('name').order_by('name'),
['<Celebrity: c1>', '<Celebrity: c2>'],
),
# Does combining querysets work?
(
(Celebrity.objects.filter(fan__in=[self.fan1, self.fan2]).
distinct('name').order_by('name') |
Celebrity.objects.filter(fan__in=[self.fan3]).
distinct('name').order_by('name')),
['<Celebrity: c1>', '<Celebrity: c2>'],
),
(
StaffTag.objects.distinct('staff', 'tag'),
['<StaffTag: t1 -> p1>'],
),
(
Tag.objects.order_by('parent__pk', 'pk').distinct('parent'),
['<Tag: t2>', '<Tag: t4>', '<Tag: t1>'],
),
(
StaffTag.objects.select_related('staff').distinct('staff__name').order_by('staff__name'),
['<StaffTag: t1 -> p1>'],
),
# Fetch the alphabetically first coworker for each worker
(
(Staff.objects.distinct('id').order_by('id', 'coworkers__name').
values_list('id', 'coworkers__name')),
[str_prefix("(1, %(_)s'p2')"), str_prefix("(2, %(_)s'p1')"),
str_prefix("(3, %(_)s'p1')"), "(4, None)"]
),
)
for qset, expected in qsets:
self.assertQuerysetEqual(qset, expected)
self.assertEqual(qset.count(), len(expected))
# Combining queries with different distinct_fields is not allowed.
base_qs = Celebrity.objects.all()
self.assertRaisesMessage(
AssertionError,
"Cannot combine queries with different distinct fields.",
lambda: (base_qs.distinct('id') & base_qs.distinct('name'))
)
# Test join unreffing
c1 = Celebrity.objects.distinct('greatest_fan__id', 'greatest_fan__fan_of')
self.assertIn('OUTER JOIN', str(c1.query))
c2 = c1.distinct('pk')
self.assertNotIn('OUTER JOIN', str(c2.query))
def test_distinct_not_implemented_checks(self):
# distinct + annotate not allowed
with self.assertRaises(NotImplementedError):
Celebrity.objects.annotate(Max('id')).distinct('id')[0]
with self.assertRaises(NotImplementedError):
Celebrity.objects.distinct('id').annotate(Max('id'))[0]
# However this check is done only when the query executes, so you
# can use distinct() to remove the fields before execution.
Celebrity.objects.distinct('id').annotate(Max('id')).distinct()[0]
# distinct + aggregate not allowed
with self.assertRaises(NotImplementedError):
Celebrity.objects.distinct('id').aggregate(Max('id'))
def test_distinct_on_in_ordered_subquery(self):
qs = Staff.objects.distinct('name').order_by('name', 'id')
qs = Staff.objects.filter(pk__in=qs).order_by('name')
self.assertQuerysetEqual(
qs, [self.p1_o1, self.p2_o1, self.p3_o1],
lambda x: x
)
qs = Staff.objects.distinct('name').order_by('name', '-id')
qs = Staff.objects.filter(pk__in=qs).order_by('name')
self.assertQuerysetEqual(
qs, [self.p1_o2, self.p2_o1, self.p3_o1],
lambda x: x
)
def test_distinct_on_get_ordering_preserved(self):
"""
Ordering shouldn't be cleared when distinct on fields are specified.
refs #25081
"""
staff = Staff.objects.distinct('name').order_by('name', '-organisation').get(name='p1')
self.assertEqual(staff.organisation, 'o2')
| bsd-3-clause |
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks | Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/core/http.py | 2 | 10074 | from builtins import str
from builtins import range
from builtins import object
import time
import traceback
import six
import munch
from cloudconnectlib.common import util
from cloudconnectlib.common.log import get_cc_logger
from cloudconnectlib.core import defaults
from cloudconnectlib.core.exceptions import HTTPError
from httplib2 import Http, socks, ProxyInfo
from solnlib.packages.requests import PreparedRequest, utils
from solnlib.utils import is_true
try: # Python2 environment support
from httplib2 import SSLHandshakeError
except: # Python3 environment support
from ssl import SSLError as SSLHandshakeError
_logger = get_cc_logger()
_PROXY_TYPE_MAP = {
'http': socks.PROXY_TYPE_HTTP,
'http_no_tunnel': socks.PROXY_TYPE_HTTP_NO_TUNNEL,
'socks4': socks.PROXY_TYPE_SOCKS4,
'socks5': socks.PROXY_TYPE_SOCKS5,
}
class HTTPResponse(object):
"""
HTTPResponse class wraps response of HTTP request for later use.
"""
def __init__(self, response, content):
"""Construct a HTTPResponse from response and content returned
with httplib2 request"""
self._status_code = response.status
self._header = response
self._body = self._decode_content(response, content)
@staticmethod
def _decode_content(response, content):
if not content:
return ''
charset = utils.get_encoding_from_headers(response)
if charset is None:
charset = defaults.charset
_logger.info(
'Unable to find charset in response headers,'
' set it to default "%s"', charset
)
_logger.info('Decoding response content with charset=%s', charset)
try:
return content.decode(charset, errors='replace')
except Exception as ex:
_logger.warning(
'Failure decoding response content with charset=%s,'
' decode it with utf-8: %s',
charset, ex
)
return content.decode('utf-8', errors='replace')
@property
def header(self):
return self._header
@property
def body(self):
"""
Return response body as a `string`.
:return: A `string`
"""
return self._body
@property
def status_code(self):
"""
Return response status code.
:return: A `integer`
"""
return self._status_code
def _make_prepare_url_func():
"""Expose prepare_url in `PreparedRequest`"""
pr = PreparedRequest()
def prepare_url(url, params=None):
"""Prepare the given HTTP URL with ability provided in requests lib.
For some illegal characters in URL or parameters like space(' ') will
be escaped to make sure we can request the correct URL."""
pr.prepare_url(url, params=params)
return pr.url
return prepare_url
def get_proxy_info(proxy_config):
if not proxy_config or not is_true(proxy_config.get('proxy_enabled')):
_logger.info('Proxy is not enabled')
return None
url = proxy_config.get('proxy_url')
port = proxy_config.get('proxy_port')
if url or port:
if not url:
raise ValueError('Proxy "url" must not be empty')
if not util.is_valid_port(port):
raise ValueError(
'Proxy "port" must be in range [1,65535]: %s' % port
)
user = proxy_config.get('proxy_username')
password = proxy_config.get('proxy_password')
if not all((user, password)):
_logger.info('Proxy has no credentials found')
user, password = None, None
proxy_type = proxy_config.get('proxy_type')
proxy_type = proxy_type.lower() if proxy_type else 'http'
if proxy_type in _PROXY_TYPE_MAP:
ptv = _PROXY_TYPE_MAP[proxy_type]
elif proxy_type in list(_PROXY_TYPE_MAP.values()):
ptv = proxy_type
else:
ptv = socks.PROXY_TYPE_HTTP
_logger.info('Proxy type not found, set to "HTTP"')
rdns = is_true(proxy_config.get('proxy_rdns'))
proxy_info = ProxyInfo(
proxy_host=url,
proxy_port=int(port),
proxy_type=ptv,
proxy_user=user,
proxy_pass=password,
proxy_rdns=rdns
)
return proxy_info
def standardize_proxy_config(proxy_config):
"""
This function is used to standardize the proxy information structure to get it evaluated through `get_proxy_info` function
"""
if not isinstance(proxy_config, dict):
raise ValueError("Received unexpected format of proxy configuration. Expected format: object, Actual format: {}".format(type(proxy_config)))
standard_proxy_config = {
"proxy_enabled": proxy_config.get("enabled", proxy_config.get("proxy_enabled")),
"proxy_username": proxy_config.get("username", proxy_config.get("proxy_username")),
"proxy_password": proxy_config.get("password", proxy_config.get("proxy_password")),
"proxy_url": proxy_config.get("host", proxy_config.get("proxy_url")),
"proxy_type": proxy_config.get("type", proxy_config.get("proxy_type")),
"proxy_port": proxy_config.get("port", proxy_config.get("proxy_port")),
"proxy_rdns": proxy_config.get("rdns", proxy_config.get("proxy_rdns"))
}
return standard_proxy_config
class HttpClient(object):
def __init__(self, proxy_info=None):
"""Constructs a `HTTPRequest` with a optional proxy setting.
"""
self._connection = None
if proxy_info:
if isinstance(proxy_info, munch.Munch):
proxy_info = dict(proxy_info)
# Updating the proxy_info object to make it compatible for getting evaluated through `get_proxy_info` function
proxy_info = standardize_proxy_config(proxy_info)
self._proxy_info = get_proxy_info(proxy_info)
else:
self._proxy_info = proxy_info
self._url_preparer = PreparedRequest()
def _send_internal(self, uri, method, headers=None, body=None, proxy_info=None):
"""Do send request to target URL and validate SSL cert by default.
If validation failed, disable it and try again."""
try:
return self._connection.request(
uri, body=body, method=method, headers=headers
)
except SSLHandshakeError:
_logger.warning(
"[SSL: CERTIFICATE_VERIFY_FAILED] certificate verification failed. "
"The certificate of the https server [%s] is not trusted, "
"this add-on will proceed to connect with this certificate. "
"You may need to check the certificate and "
"refer to the documentation and add it to the trust list. %s",
uri,
traceback.format_exc()
)
self._connection = self._build_http_connection(
proxy_info=proxy_info,
disable_ssl_cert_validation=True
)
return self._connection.request(
uri, body=body, method=method, headers=headers
)
def _retry_send_request_if_needed(self, uri, method='GET', headers=None, body=None):
"""Invokes request and auto retry with an exponential backoff
if the response status is configured in defaults.retry_statuses."""
retries = max(defaults.retries, 0)
_logger.info('Invoking request to [%s] using [%s] method', uri, method)
for i in range(retries + 1):
try:
response, content = self._send_internal(
uri=uri, body=body, method=method, headers=headers
)
except Exception as err:
_logger.exception(
'Could not send request url=%s method=%s', uri, method)
raise HTTPError('HTTP Error %s' % str(err))
status = response.status
if self._is_need_retry(status, i, retries):
delay = 2 ** i
_logger.warning(
'The response status=%s of request which url=%s and'
' method=%s. Retry after %s seconds.',
status, uri, method, delay,
)
time.sleep(delay)
continue
return HTTPResponse(response, content)
def _prepare_url(self, url, params=None):
self._url_preparer.prepare_url(url, params)
return self._url_preparer.url
def _initialize_connection(self):
if self._proxy_info:
_logger.info('Proxy is enabled for http connection.')
else:
_logger.info('Proxy is not enabled for http connection.')
self._connection = self._build_http_connection(self._proxy_info)
def send(self, request):
if not request:
raise ValueError('The request is none')
if request.body and not isinstance(request.body, six.string_types):
raise TypeError('Invalid request body type: {}'.format(request.body))
if self._connection is None:
self._initialize_connection()
try:
url = self._prepare_url(request.url)
except Exception:
_logger.warning(
'Failed to encode url=%s: %s',
request.url, traceback.format_exc()
)
url = request.url
return self._retry_send_request_if_needed(
url, request.method, request.headers, request.body
)
@staticmethod
def _build_http_connection(
proxy_info=None,
timeout=defaults.timeout,
disable_ssl_cert_validation=defaults.disable_ssl_cert_validation):
return Http(
proxy_info=proxy_info,
timeout=timeout,
disable_ssl_certificate_validation=disable_ssl_cert_validation)
@staticmethod
def _is_need_retry(status, retried, maximum_retries):
return retried < maximum_retries \
and status in defaults.retry_statuses
| isc |
tedye/leetcode | tools/leetcode.082.Remove Duplicates from Sorted List II/leetcode.082.Remove Duplicates from Sorted List II.submission8.py | 1 | 1096 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @return a ListNode
def deleteDuplicates(self, head):
if not head: return None
dummyHead = ListNode(-0x7fffffff)
dummyHead.next = head
temp = dummyHead
temp1 = temp.next
temp2 = temp1
while temp2.next != None:
temp2 = temp2.next
while temp2 != None and temp2.val == temp1.val:
temp2 = temp2.next
if temp2 != None and temp2.val != temp1.val:
temp1 = temp2
temp2 = temp2.next
if temp2 == None:
if temp1.next == temp2:
temp.next = temp1
else:
temp.next = None
break
temp.next = temp1
temp = temp.next
temp1 = temp.next
temp2 = temp1
return dummyHead.next
| mit |
yanheven/nova | nova/compute/build_results.py | 96 | 1039 | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Possible results from instance build
Results represent the ultimate result of an attempt to build an instance.
Results describe whether an instance was actually built, failed to build, or
was rescheduled.
"""
ACTIVE = 'active' # Instance is running
FAILED = 'failed' # Instance failed to build and was not rescheduled
RESCHEDULED = 'rescheduled' # Instance failed to build, but was rescheduled
| apache-2.0 |
hudie8655/google | lib/flask/ctx.py | 776 | 14266 | # -*- coding: utf-8 -*-
"""
flask.ctx
~~~~~~~~~
Implements the objects required to keep the context.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import sys
from functools import update_wrapper
from werkzeug.exceptions import HTTPException
from .globals import _request_ctx_stack, _app_ctx_stack
from .module import blueprint_is_module
from .signals import appcontext_pushed, appcontext_popped
class _AppCtxGlobals(object):
"""A plain object."""
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __contains__(self, item):
return item in self.__dict__
def __iter__(self):
return iter(self.__dict__)
def __repr__(self):
top = _app_ctx_stack.top
if top is not None:
return '<flask.g of %r>' % top.app.name
return object.__repr__(self)
def after_this_request(f):
"""Executes a function after this request. This is useful to modify
response objects. The function is passed the response object and has
to return the same or a new one.
Example::
@app.route('/')
def index():
@after_this_request
def add_header(response):
response.headers['X-Foo'] = 'Parachute'
return response
return 'Hello World!'
This is more useful if a function other than the view function wants to
modify a response. For instance think of a decorator that wants to add
some headers without converting the return value into a response object.
.. versionadded:: 0.9
"""
_request_ctx_stack.top._after_request_functions.append(f)
return f
def copy_current_request_context(f):
"""A helper function that decorates a function to retain the current
request context. This is useful when working with greenlets. The moment
the function is decorated a copy of the request context is created and
then pushed when the function is called.
Example::
import gevent
from flask import copy_current_request_context
@app.route('/')
def index():
@copy_current_request_context
def do_some_work():
# do some work here, it can access flask.request like you
# would otherwise in the view function.
...
gevent.spawn(do_some_work)
return 'Regular response'
.. versionadded:: 0.10
"""
top = _request_ctx_stack.top
if top is None:
raise RuntimeError('This decorator can only be used at local scopes '
'when a request context is on the stack. For instance within '
'view functions.')
reqctx = top.copy()
def wrapper(*args, **kwargs):
with reqctx:
return f(*args, **kwargs)
return update_wrapper(wrapper, f)
def has_request_context():
"""If you have code that wants to test if a request context is there or
not this function can be used. For instance, you may want to take advantage
of request information if the request object is available, but fail
silently if it is unavailable.
::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and has_request_context():
remote_addr = request.remote_addr
self.remote_addr = remote_addr
Alternatively you can also just test any of the context bound objects
(such as :class:`request` or :class:`g` for truthness)::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and request:
remote_addr = request.remote_addr
self.remote_addr = remote_addr
.. versionadded:: 0.7
"""
return _request_ctx_stack.top is not None
def has_app_context():
"""Works like :func:`has_request_context` but for the application
context. You can also just do a boolean check on the
:data:`current_app` object instead.
.. versionadded:: 0.9
"""
return _app_ctx_stack.top is not None
class AppContext(object):
"""The application context binds an application object implicitly
to the current thread or greenlet, similar to how the
:class:`RequestContext` binds request information. The application
context is also implicitly created if a request context is created
but the application is not on top of the individual application
context.
"""
def __init__(self, app):
self.app = app
self.url_adapter = app.create_url_adapter(None)
self.g = app.app_ctx_globals_class()
# Like request context, app contexts can be pushed multiple times
# but there a basic "refcount" is enough to track them.
self._refcnt = 0
def push(self):
"""Binds the app context to the current context."""
self._refcnt += 1
_app_ctx_stack.push(self)
appcontext_pushed.send(self.app)
def pop(self, exc=None):
"""Pops the app context."""
self._refcnt -= 1
if self._refcnt <= 0:
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_appcontext(exc)
rv = _app_ctx_stack.pop()
assert rv is self, 'Popped wrong app context. (%r instead of %r)' \
% (rv, self)
appcontext_popped.send(self.app)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop(exc_value)
class RequestContext(object):
"""The request context contains all request relevant information. It is
created at the beginning of the request and pushed to the
`_request_ctx_stack` and removed at the end of it. It will create the
URL adapter and request object for the WSGI environment provided.
Do not attempt to use this class directly, instead use
:meth:`~flask.Flask.test_request_context` and
:meth:`~flask.Flask.request_context` to create this object.
When the request context is popped, it will evaluate all the
functions registered on the application for teardown execution
(:meth:`~flask.Flask.teardown_request`).
The request context is automatically popped at the end of the request
for you. In debug mode the request context is kept around if
exceptions happen so that interactive debuggers have a chance to
introspect the data. With 0.4 this can also be forced for requests
that did not fail and outside of `DEBUG` mode. By setting
``'flask._preserve_context'`` to `True` on the WSGI environment the
context will not pop itself at the end of the request. This is used by
the :meth:`~flask.Flask.test_client` for example to implement the
deferred cleanup functionality.
You might find this helpful for unittests where you need the
information from the context local around for a little longer. Make
sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in
that situation, otherwise your unittests will leak memory.
"""
def __init__(self, app, environ, request=None):
self.app = app
if request is None:
request = app.request_class(environ)
self.request = request
self.url_adapter = app.create_url_adapter(self.request)
self.flashes = None
self.session = None
# Request contexts can be pushed multiple times and interleaved with
# other request contexts. Now only if the last level is popped we
# get rid of them. Additionally if an application context is missing
# one is created implicitly so for each level we add this information
self._implicit_app_ctx_stack = []
# indicator if the context was preserved. Next time another context
# is pushed the preserved context is popped.
self.preserved = False
# remembers the exception for pop if there is one in case the context
# preservation kicks in.
self._preserved_exc = None
# Functions that should be executed after the request on the response
# object. These will be called before the regular "after_request"
# functions.
self._after_request_functions = []
self.match_request()
# XXX: Support for deprecated functionality. This is going away with
# Flask 1.0
blueprint = self.request.blueprint
if blueprint is not None:
# better safe than sorry, we don't want to break code that
# already worked
bp = app.blueprints.get(blueprint)
if bp is not None and blueprint_is_module(bp):
self.request._is_old_module = True
def _get_g(self):
return _app_ctx_stack.top.g
def _set_g(self, value):
_app_ctx_stack.top.g = value
g = property(_get_g, _set_g)
del _get_g, _set_g
def copy(self):
"""Creates a copy of this request context with the same request object.
This can be used to move a request context to a different greenlet.
Because the actual request object is the same this cannot be used to
move a request context to a different thread unless access to the
request object is locked.
.. versionadded:: 0.10
"""
return self.__class__(self.app,
environ=self.request.environ,
request=self.request
)
def match_request(self):
"""Can be overridden by a subclass to hook into the matching
of the request.
"""
try:
url_rule, self.request.view_args = \
self.url_adapter.match(return_rule=True)
self.request.url_rule = url_rule
except HTTPException as e:
self.request.routing_exception = e
def push(self):
"""Binds the request context to the current context."""
# If an exception occurs in debug mode or if context preservation is
# activated under exception situations exactly one context stays
# on the stack. The rationale is that you want to access that
# information under debug situations. However if someone forgets to
# pop that context again we want to make sure that on the next push
# it's invalidated, otherwise we run at risk that something leaks
# memory. This is usually only a problem in testsuite since this
# functionality is not active in production environments.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop(top._preserved_exc)
# Before we push the request context we have to ensure that there
# is an application context.
app_ctx = _app_ctx_stack.top
if app_ctx is None or app_ctx.app != self.app:
app_ctx = self.app.app_context()
app_ctx.push()
self._implicit_app_ctx_stack.append(app_ctx)
else:
self._implicit_app_ctx_stack.append(None)
_request_ctx_stack.push(self)
# Open the session at the moment that the request context is
# available. This allows a custom open_session method to use the
# request context (e.g. code that access database information
# stored on `g` instead of the appcontext).
self.session = self.app.open_session(self.request)
if self.session is None:
self.session = self.app.make_null_session()
def pop(self, exc=None):
"""Pops the request context and unbinds it by doing that. This will
also trigger the execution of functions registered by the
:meth:`~flask.Flask.teardown_request` decorator.
.. versionchanged:: 0.9
Added the `exc` argument.
"""
app_ctx = self._implicit_app_ctx_stack.pop()
clear_request = False
if not self._implicit_app_ctx_stack:
self.preserved = False
self._preserved_exc = None
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_request(exc)
# If this interpreter supports clearing the exception information
# we do that now. This will only go into effect on Python 2.x,
# on 3.x it disappears automatically at the end of the exception
# stack.
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
request_close = getattr(self.request, 'close', None)
if request_close is not None:
request_close()
clear_request = True
rv = _request_ctx_stack.pop()
assert rv is self, 'Popped wrong request context. (%r instead of %r)' \
% (rv, self)
# get rid of circular dependencies at the end of the request
# so that we don't require the GC to be active.
if clear_request:
rv.request.environ['werkzeug.request'] = None
# Get rid of the app as well if necessary.
if app_ctx is not None:
app_ctx.pop(exc)
def auto_pop(self, exc):
if self.request.environ.get('flask._preserve_context') or \
(exc is not None and self.app.preserve_context_on_exception):
self.preserved = True
self._preserved_exc = exc
else:
self.pop(exc)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
# do not pop the request stack if we are in debug mode and an
# exception happened. This will allow the debugger to still
# access the request object in the interactive shell. Furthermore
# the context can be force kept alive for the test client.
# See flask.testing for how this works.
self.auto_pop(exc_value)
def __repr__(self):
return '<%s \'%s\' [%s] of %s>' % (
self.__class__.__name__,
self.request.url,
self.request.method,
self.app.name,
)
| apache-2.0 |
ARM-software/trappy | trappy/cpu_power.py | 1 | 6417 | # Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Process the output of the cpu_cooling devices in the current
directory's trace.dat"""
from __future__ import division
from __future__ import unicode_literals
import pandas as pd
from trappy.base import Base
from trappy.dynamic import register_ftrace_parser
def pivot_with_labels(dfr, data_col_name, new_col_name, mapping_label):
"""Pivot a :mod:`pandas.DataFrame` row into columns
:param dfr: The :mod:`pandas.DataFrame` to operate on.
:param data_col_name: The name of the column in the :mod:`pandas.DataFrame`
which contains the values.
:param new_col_name: The name of the column in the :mod:`pandas.DataFrame` that will
become the new columns.
:param mapping_label: A dictionary whose keys are the values in
new_col_name and whose values are their
corresponding name in the :mod:`pandas.DataFrame` to be returned.
:type dfr: :mod:`pandas.DataFrame`
:type data_col_name: str
:type new_col_name: str
:type mapping_label: dict
Example:
>>> dfr_in = pd.DataFrame({'cpus': ["000000f0",
>>> "0000000f",
>>> "000000f0",
>>> "0000000f"
>>> ],
>>> 'freq': [1, 3, 2, 6]})
>>> dfr_in
cpus freq
0 000000f0 1
1 0000000f 3
2 000000f0 2
3 0000000f 6
>>> map_label = {"000000f0": "A15", "0000000f": "A7"}
>>> power.pivot_with_labels(dfr_in, "freq", "cpus", map_label)
A15 A7
0 1 NaN
1 1 3
2 2 3
3 2 6
"""
# There has to be a more "pandas" way of doing this.
col_set = set(dfr[new_col_name])
ret_series = {}
for col in col_set:
try:
label = mapping_label[col]
except KeyError:
available_keys = ", ".join(mapping_label.keys())
error_str = '"{}" not found, available keys: {}'.format(col,
available_keys)
raise KeyError(error_str)
data = dfr[dfr[new_col_name] == col][data_col_name]
ret_series[label] = data
return pd.DataFrame(ret_series).fillna(method="pad")
def num_cpus_in_mask(mask):
"""Return the number of cpus in a cpumask"""
mask = mask.replace(",", "")
value = int(mask, 16)
return bin(value).count("1")
class CpuOutPower(Base):
"""Process the cpufreq cooling power actor data in a ftrace dump"""
unique_word = "thermal_power_cpu_limit"
"""The unique word that will be matched in a trace line"""
name = "cpu_out_power"
"""The name of the :mod:`pandas.DataFrame` member that will be created in a
:mod:`trappy.ftrace.FTrace` object"""
pivot = "cpus"
"""The Pivot along which the data is orthogonal"""
def get_all_freqs(self, mapping_label):
"""Get a :mod:`pandas.DataFrame` with the maximum frequencies allowed by the governor
:param mapping_label: A dictionary that maps cpumasks to name
of the cpu.
:type mapping_label: dict
:return: freqs are in MHz
"""
dfr = self.data_frame
return pivot_with_labels(dfr, "freq", "cpus", mapping_label) / 1000
register_ftrace_parser(CpuOutPower, "thermal")
class CpuInPower(Base):
"""Process the cpufreq cooling power actor data in a ftrace dump
"""
unique_word = "thermal_power_cpu_get_power"
"""The unique word that will be matched in a trace line"""
name = "cpu_in_power"
"""The name of the :mod:`pandas.DataFrame` member that will be created in a
:mod:`trappy.ftrace.FTrace` object"""
pivot = "cpus"
"""The Pivot along which the data is orthogonal"""
def _get_load_series(self):
"""get a :mod:`pandas.Series` with the aggregated load"""
dfr = self.data_frame
load_cols = [s for s in dfr.columns if s.startswith("load")]
load_series = dfr[load_cols[0]].copy()
for col in load_cols[1:]:
load_series += dfr[col]
return load_series
def get_load_data(self, mapping_label):
"""Return :mod:`pandas.DataFrame` suitable for plot_load()
:param mapping_label: A Dictionary mapping cluster cpumasks to labels
:type mapping_label: dict
"""
dfr = self.data_frame
load_series = self._get_load_series()
load_dfr = pd.DataFrame({"cpus": dfr["cpus"], "load": load_series})
return pivot_with_labels(load_dfr, "load", "cpus", mapping_label)
def get_normalized_load_data(self, mapping_label):
"""Return a :mod:`pandas.DataFrame` for plotting normalized load data
:param mapping_label: should be a dictionary mapping cluster cpumasks
to labels
:type mapping_label: dict
"""
dfr = self.data_frame
load_series = self._get_load_series()
load_series *= dfr['freq']
for cpumask in mapping_label:
num_cpus = num_cpus_in_mask(cpumask)
idx = dfr["cpus"] == cpumask
max_freq = max(dfr[idx]["freq"])
load_series[idx] = load_series[idx] / (max_freq * num_cpus)
load_dfr = pd.DataFrame({"cpus": dfr["cpus"], "load": load_series})
return pivot_with_labels(load_dfr, "load", "cpus", mapping_label)
def get_all_freqs(self, mapping_label):
"""get a :mod:`pandas.DataFrame` with the "in" frequencies as seen by the governor
.. note::
Frequencies are in MHz
"""
dfr = self.data_frame
return pivot_with_labels(dfr, "freq", "cpus", mapping_label) / 1000
register_ftrace_parser(CpuInPower, "thermal")
| apache-2.0 |
Kamigami55/anime-checker | main.py | 1 | 2465 | #!/usr/bin/python3
# coding: utf8
###########################################################
#
# anime-checker.py
#
# by Eason Chang <eason@easonchang.com>
#
# A python script to automatically check whether my favorite animes
# have updated and then send me an email to notify me.
#
# This script does a one-time check.
# This script should be set as a scheduled job by using crontab.
#
# Contains 2 config files:
# - .env : stores environment variables of my email addresses and
# password.
# - animes.json : stores a list of my favorite animes, including title,
# website url, and current episode number.
#
###########################################################
import logging
import jsonpickle
from os import path
from packages.gmail.mailClient import MailClient
# Set logging config
logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s')
# Disable logging
logging.disable(logging.CRITICAL)
mailClient = MailClient()
def loadFile(filePath):
# Load list of my favorite animes from ./storage.json
file = open(filePath, 'r')
fileContent = file.read()
file.close()
content = jsonpickle.decode(fileContent)
return content
def performCheck(DMs):
hasDMUpdated = False
global mailClient
for i in range(len(DMs)):
DM = DMs[i]
if DM.checkUpdate():
# this DM has been updated
# send email
DM.sendMail(mailClient)
# set flag to true
hasDMUpdated = True
return hasDMUpdated
def updateFile(filePath, content):
# Update episode numbers to ./storage.json
jsonpickle.set_encoder_options('simplejson', sort_keys=True, indent=4, ensure_ascii=False)
jsonpickle.set_encoder_options('json', sort_keys=True, indent=4, ensure_ascii=False)
jsonpickle.set_encoder_options('demjson', sort_keys=True, indent=4, ensure_ascii=False)
fileContent = jsonpickle.encode(content)
# fileContent = json.dumps(animes, indent=4, ensure_ascii=False)
file = open(filePath, 'w')
file.write(fileContent)
file.close()
def main():
DMs = None
filePath = path.join(path.dirname(__file__), 'storage.json')
DMs = loadFile(filePath)
hasDMUpdated = performCheck(DMs)
if hasDMUpdated:
updateFile(filePath, DMs)
print('File updated')
else:
print('新番尚未更新哦')
if __name__ == '__main__':
main()
| mit |
aidanlister/django | django/contrib/gis/gdal/envelope.py | 477 | 7009 | """
The GDAL/OGR library uses an Envelope structure to hold the bounding
box information for a geometry. The envelope (bounding box) contains
two pairs of coordinates, one for the lower left coordinate and one
for the upper right coordinate:
+----------o Upper right; (max_x, max_y)
| |
| |
| |
Lower left (min_x, min_y) o----------+
"""
from ctypes import Structure, c_double
from django.contrib.gis.gdal.error import GDALException
# The OGR definition of an Envelope is a C structure containing four doubles.
# See the 'ogr_core.h' source file for more information:
# http://www.gdal.org/ogr/ogr__core_8h-source.html
class OGREnvelope(Structure):
"Represents the OGREnvelope C Structure."
_fields_ = [("MinX", c_double),
("MaxX", c_double),
("MinY", c_double),
("MaxY", c_double),
]
class Envelope(object):
"""
The Envelope object is a C structure that contains the minimum and
maximum X, Y coordinates for a rectangle bounding box. The naming
of the variables is compatible with the OGR Envelope structure.
"""
def __init__(self, *args):
"""
The initialization function may take an OGREnvelope structure, 4-element
tuple or list, or 4 individual arguments.
"""
if len(args) == 1:
if isinstance(args[0], OGREnvelope):
# OGREnvelope (a ctypes Structure) was passed in.
self._envelope = args[0]
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) != 4:
raise GDALException('Incorrect number of tuple elements (%d).' % len(args[0]))
else:
self._from_sequence(args[0])
else:
raise TypeError('Incorrect type of argument: %s' % str(type(args[0])))
elif len(args) == 4:
# Individual parameters passed in.
# Thanks to ww for the help
self._from_sequence([float(a) for a in args])
else:
raise GDALException('Incorrect number (%d) of arguments.' % len(args))
# Checking the x,y coordinates
if self.min_x > self.max_x:
raise GDALException('Envelope minimum X > maximum X.')
if self.min_y > self.max_y:
raise GDALException('Envelope minimum Y > maximum Y.')
def __eq__(self, other):
"""
Returns True if the envelopes are equivalent; can compare against
other Envelopes and 4-tuples.
"""
if isinstance(other, Envelope):
return (self.min_x == other.min_x) and (self.min_y == other.min_y) and \
(self.max_x == other.max_x) and (self.max_y == other.max_y)
elif isinstance(other, tuple) and len(other) == 4:
return (self.min_x == other[0]) and (self.min_y == other[1]) and \
(self.max_x == other[2]) and (self.max_y == other[3])
else:
raise GDALException('Equivalence testing only works with other Envelopes.')
def __str__(self):
"Returns a string representation of the tuple."
return str(self.tuple)
def _from_sequence(self, seq):
"Initializes the C OGR Envelope structure from the given sequence."
self._envelope = OGREnvelope()
self._envelope.MinX = seq[0]
self._envelope.MinY = seq[1]
self._envelope.MaxX = seq[2]
self._envelope.MaxY = seq[3]
def expand_to_include(self, *args):
"""
Modifies the envelope to expand to include the boundaries of
the passed-in 2-tuple (a point), 4-tuple (an extent) or
envelope.
"""
# We provide a number of different signatures for this method,
# and the logic here is all about converting them into a
# 4-tuple single parameter which does the actual work of
# expanding the envelope.
if len(args) == 1:
if isinstance(args[0], Envelope):
return self.expand_to_include(args[0].tuple)
elif hasattr(args[0], 'x') and hasattr(args[0], 'y'):
return self.expand_to_include(args[0].x, args[0].y, args[0].x, args[0].y)
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) == 2:
return self.expand_to_include((args[0][0], args[0][1], args[0][0], args[0][1]))
elif len(args[0]) == 4:
(minx, miny, maxx, maxy) = args[0]
if minx < self._envelope.MinX:
self._envelope.MinX = minx
if miny < self._envelope.MinY:
self._envelope.MinY = miny
if maxx > self._envelope.MaxX:
self._envelope.MaxX = maxx
if maxy > self._envelope.MaxY:
self._envelope.MaxY = maxy
else:
raise GDALException('Incorrect number of tuple elements (%d).' % len(args[0]))
else:
raise TypeError('Incorrect type of argument: %s' % str(type(args[0])))
elif len(args) == 2:
# An x and an y parameter were passed in
return self.expand_to_include((args[0], args[1], args[0], args[1]))
elif len(args) == 4:
# Individual parameters passed in.
return self.expand_to_include(args)
else:
raise GDALException('Incorrect number (%d) of arguments.' % len(args[0]))
@property
def min_x(self):
"Returns the value of the minimum X coordinate."
return self._envelope.MinX
@property
def min_y(self):
"Returns the value of the minimum Y coordinate."
return self._envelope.MinY
@property
def max_x(self):
"Returns the value of the maximum X coordinate."
return self._envelope.MaxX
@property
def max_y(self):
"Returns the value of the maximum Y coordinate."
return self._envelope.MaxY
@property
def ur(self):
"Returns the upper-right coordinate."
return (self.max_x, self.max_y)
@property
def ll(self):
"Returns the lower-left coordinate."
return (self.min_x, self.min_y)
@property
def tuple(self):
"Returns a tuple representing the envelope."
return (self.min_x, self.min_y, self.max_x, self.max_y)
@property
def wkt(self):
"Returns WKT representing a Polygon for this envelope."
# TODO: Fix significant figures.
return 'POLYGON((%s %s,%s %s,%s %s,%s %s,%s %s))' % \
(self.min_x, self.min_y, self.min_x, self.max_y,
self.max_x, self.max_y, self.max_x, self.min_y,
self.min_x, self.min_y)
| bsd-3-clause |
MCRSoftwares/AcadSocial | mainAcad/methods.py | 1 | 3190 | # -*- encoding: utf-8 -*-
"""
Equipe MCRSoftwares - AcadSocial
Versão do Código: 01v001a
Responsável: Victor Ferraz
Auxiliar: -
Requisito(s): RF001, RF002, RF020, RF022, RF023, RF027
Caso(s) de Uso: DV001, DV002, DVA012
Descrição:
Definição dos métodos auxiliares relacionados à aplicação principal.
"""
from datetime import datetime
from AcadSocial.settings import MEDIA_ROOT, EMAIL_HOST_USER
from AcadSocial.settings import AWS_S3_ACCESS_KEY_ID, AWS_S3_SECRET_ACCESS_KEY, AWS_STORAGE_BUCKET_NAME
from django.core.mail import EmailMessage
from PIL import Image
import hashlib
import random
import os
import boto
from boto.s3.key import Key
from django.utils import timezone
def upload_image_as(instance, filename):
chave = gerar_chave_imagem(str(filename))[:5]
return '%s/%s%s%s' % ('media/', timezone.now().strftime("%Y%m%d%H%M%S"), chave, '.jpg')
def upload_with_boto(thumb120, thumb68, thumb45):
bucket_name = AWS_STORAGE_BUCKET_NAME
conn = boto.connect_s3(AWS_S3_ACCESS_KEY_ID, AWS_S3_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(bucket_name)
generate_image_bucket_key(thumb120, bucket)
generate_image_bucket_key(thumb68, bucket)
generate_image_bucket_key(thumb45, bucket)
def generate_image_bucket_key(imagem, bucket):
bucket_key = Key(bucket)
bucket_key.key = imagem
bucket_key.set_contents_from_filename(imagem)
bucket_key.make_public()
os.remove(imagem)
def gerar_chave_imagem(param):
"""
Gera um hash baseado num valor random e soma com uma
chave baseada num parâmetro passado para esta função.
"""
hash_salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
chave = hashlib.sha1(hash_salt+param).hexdigest()
return chave
def gerar_nome_imagem(random_id):
"""
Gera um nome para a foto utilizando a hora/data em que fora postada,
junto do id do usuário que a postou e um hash aleatório baseado na data da postagem.
"""
data = datetime.today()
data_f = data.strftime('%Y%m%d%H%M%S')
nome_foto = str(data_f) + gerar_chave_imagem(str(random_id))[:5]
return nome_foto + '.jpg'
def converter_para_jpg(imagem_path, media_path):
"""
Converte a imagem hospedada pelo usuário para JPEG.
"""
imagem = Image.open(imagem_path)
nova_imagem = Image.new("RGB", imagem.size, (255, 255, 255))
nova_imagem.paste(imagem)
imagem_name = gerar_nome_imagem(imagem_path)
nova_imagem_path = str(MEDIA_ROOT + media_path + imagem_name)
nova_imagem.save(nova_imagem_path, 'JPEG', quality=95)
return media_path[1:] + imagem_name
def gerar_thumbnail(imagem, img_path, name, size):
thumbnail = imagem.resize(size, Image.ANTIALIAS)
imagem_path = os.path.splitext(img_path)[0]
thumbnail_media_path = imagem_path + name + '.jpg'
thumbnail.save(thumbnail_media_path, 'JPEG', quality=95)
return thumbnail_media_path
def enviar_email(nome, email, assunto, conteudo):
conteudo_email = '[Enviado por %s (%s)]\n\n %s' % (nome, email, conteudo)
email_msg = EmailMessage(subject=assunto, body=conteudo_email, from_email=email, to=[EMAIL_HOST_USER])
return email_msg.send() | gpl-2.0 |
BeATz-UnKNoWN/python-for-android | python-modules/twisted/twisted/mail/pop3client.py | 53 | 24432 | # -*- test-case-name: twisted.mail.test.test_pop3client -*-
# Copyright (c) 2001-2004 Divmod Inc.
# Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
POP3 client protocol implementation
Don't use this module directly. Use twisted.mail.pop3 instead.
@author: Jp Calderone
"""
import re
from twisted.python import log
from twisted.python.hashlib import md5
from twisted.internet import defer
from twisted.protocols import basic
from twisted.protocols import policies
from twisted.internet import error
from twisted.internet import interfaces
OK = '+OK'
ERR = '-ERR'
class POP3ClientError(Exception):
"""Base class for all exceptions raised by POP3Client.
"""
class InsecureAuthenticationDisallowed(POP3ClientError):
"""Secure authentication was required but no mechanism could be found.
"""
class TLSError(POP3ClientError):
"""
Secure authentication was required but either the transport does
not support TLS or no TLS context factory was supplied.
"""
class TLSNotSupportedError(POP3ClientError):
"""
Secure authentication was required but the server does not support
TLS.
"""
class ServerErrorResponse(POP3ClientError):
"""The server returned an error response to a request.
"""
def __init__(self, reason, consumer=None):
POP3ClientError.__init__(self, reason)
self.consumer = consumer
class LineTooLong(POP3ClientError):
"""The server sent an extremely long line.
"""
class _ListSetter:
# Internal helper. POP3 responses sometimes occur in the
# form of a list of lines containing two pieces of data,
# a message index and a value of some sort. When a message
# is deleted, it is omitted from these responses. The
# setitem method of this class is meant to be called with
# these two values. In the cases where indexes are skipped,
# it takes care of padding out the missing values with None.
def __init__(self, L):
self.L = L
def setitem(self, (item, value)):
diff = item - len(self.L) + 1
if diff > 0:
self.L.extend([None] * diff)
self.L[item] = value
def _statXform(line):
# Parse a STAT response
numMsgs, totalSize = line.split(None, 1)
return int(numMsgs), int(totalSize)
def _listXform(line):
# Parse a LIST response
index, size = line.split(None, 1)
return int(index) - 1, int(size)
def _uidXform(line):
# Parse a UIDL response
index, uid = line.split(None, 1)
return int(index) - 1, uid
def _codeStatusSplit(line):
# Parse an +OK or -ERR response
parts = line.split(' ', 1)
if len(parts) == 1:
return parts[0], ''
return parts
def _dotUnquoter(line):
"""
C{'.'} characters which begin a line of a message are doubled to avoid
confusing with the terminating C{'.\\r\\n'} sequence. This function
unquotes them.
"""
if line.startswith('..'):
return line[1:]
return line
class POP3Client(basic.LineOnlyReceiver, policies.TimeoutMixin):
"""POP3 client protocol implementation class
Instances of this class provide a convenient, efficient API for
retrieving and deleting messages from a POP3 server.
@type startedTLS: C{bool}
@ivar startedTLS: Whether TLS has been negotiated successfully.
@type allowInsecureLogin: C{bool}
@ivar allowInsecureLogin: Indicate whether login() should be
allowed if the server offers no authentication challenge and if
our transport does not offer any protection via encryption.
@type serverChallenge: C{str} or C{None}
@ivar serverChallenge: Challenge received from the server
@type timeout: C{int}
@ivar timeout: Number of seconds to wait before timing out a
connection. If the number is <= 0, no timeout checking will be
performed.
"""
startedTLS = False
allowInsecureLogin = False
timeout = 0
serverChallenge = None
# Capabilities are not allowed to change during the session
# (except when TLS is negotiated), so cache the first response and
# use that for all later lookups
_capCache = None
# Regular expression to search for in the challenge string in the server
# greeting line.
_challengeMagicRe = re.compile('(<[^>]+>)')
# List of pending calls.
# We are a pipelining API but don't actually
# support pipelining on the network yet.
_blockedQueue = None
# The Deferred to which the very next result will go.
_waiting = None
# Whether we dropped the connection because of a timeout
_timedOut = False
# If the server sends an initial -ERR, this is the message it sent
# with it.
_greetingError = None
def _blocked(self, f, *a):
# Internal helper. If commands are being blocked, append
# the given command and arguments to a list and return a Deferred
# that will be chained with the return value of the function
# when it eventually runs. Otherwise, set up for commands to be
# blocked and return None.
if self._blockedQueue is not None:
d = defer.Deferred()
self._blockedQueue.append((d, f, a))
return d
self._blockedQueue = []
return None
def _unblock(self):
# Internal helper. Indicate that a function has completed.
# If there are blocked commands, run the next one. If there
# are not, set up for the next command to not be blocked.
if self._blockedQueue == []:
self._blockedQueue = None
elif self._blockedQueue is not None:
_blockedQueue = self._blockedQueue
self._blockedQueue = None
d, f, a = _blockedQueue.pop(0)
d2 = f(*a)
d2.chainDeferred(d)
# f is a function which uses _blocked (otherwise it wouldn't
# have gotten into the blocked queue), which means it will have
# re-set _blockedQueue to an empty list, so we can put the rest
# of the blocked queue back into it now.
self._blockedQueue.extend(_blockedQueue)
def sendShort(self, cmd, args):
# Internal helper. Send a command to which a short response
# is expected. Return a Deferred that fires when the response
# is received. Block all further commands from being sent until
# the response is received. Transition the state to SHORT.
d = self._blocked(self.sendShort, cmd, args)
if d is not None:
return d
if args:
self.sendLine(cmd + ' ' + args)
else:
self.sendLine(cmd)
self.state = 'SHORT'
self._waiting = defer.Deferred()
return self._waiting
def sendLong(self, cmd, args, consumer, xform):
# Internal helper. Send a command to which a multiline
# response is expected. Return a Deferred that fires when
# the entire response is received. Block all further commands
# from being sent until the entire response is received.
# Transition the state to LONG_INITIAL.
d = self._blocked(self.sendLong, cmd, args, consumer, xform)
if d is not None:
return d
if args:
self.sendLine(cmd + ' ' + args)
else:
self.sendLine(cmd)
self.state = 'LONG_INITIAL'
self._xform = xform
self._consumer = consumer
self._waiting = defer.Deferred()
return self._waiting
# Twisted protocol callback
def connectionMade(self):
if self.timeout > 0:
self.setTimeout(self.timeout)
self.state = 'WELCOME'
self._blockedQueue = []
def timeoutConnection(self):
self._timedOut = True
self.transport.loseConnection()
def connectionLost(self, reason):
if self.timeout > 0:
self.setTimeout(None)
if self._timedOut:
reason = error.TimeoutError()
elif self._greetingError:
reason = ServerErrorResponse(self._greetingError)
d = []
if self._waiting is not None:
d.append(self._waiting)
self._waiting = None
if self._blockedQueue is not None:
d.extend([deferred for (deferred, f, a) in self._blockedQueue])
self._blockedQueue = None
for w in d:
w.errback(reason)
def lineReceived(self, line):
if self.timeout > 0:
self.resetTimeout()
state = self.state
self.state = None
state = getattr(self, 'state_' + state)(line) or state
if self.state is None:
self.state = state
def lineLengthExceeded(self, buffer):
# XXX - We need to be smarter about this
if self._waiting is not None:
waiting, self._waiting = self._waiting, None
waiting.errback(LineTooLong())
self.transport.loseConnection()
# POP3 Client state logic - don't touch this.
def state_WELCOME(self, line):
# WELCOME is the first state. The server sends one line of text
# greeting us, possibly with an APOP challenge. Transition the
# state to WAITING.
code, status = _codeStatusSplit(line)
if code != OK:
self._greetingError = status
self.transport.loseConnection()
else:
m = self._challengeMagicRe.search(status)
if m is not None:
self.serverChallenge = m.group(1)
self.serverGreeting(status)
self._unblock()
return 'WAITING'
def state_WAITING(self, line):
# The server isn't supposed to send us anything in this state.
log.msg("Illegal line from server: " + repr(line))
def state_SHORT(self, line):
# This is the state we are in when waiting for a single
# line response. Parse it and fire the appropriate callback
# or errback. Transition the state back to WAITING.
deferred, self._waiting = self._waiting, None
self._unblock()
code, status = _codeStatusSplit(line)
if code == OK:
deferred.callback(status)
else:
deferred.errback(ServerErrorResponse(status))
return 'WAITING'
def state_LONG_INITIAL(self, line):
# This is the state we are in when waiting for the first
# line of a long response. Parse it and transition the
# state to LONG if it is an okay response; if it is an
# error response, fire an errback, clean up the things
# waiting for a long response, and transition the state
# to WAITING.
code, status = _codeStatusSplit(line)
if code == OK:
return 'LONG'
consumer = self._consumer
deferred = self._waiting
self._consumer = self._waiting = self._xform = None
self._unblock()
deferred.errback(ServerErrorResponse(status, consumer))
return 'WAITING'
def state_LONG(self, line):
# This is the state for each line of a long response.
# If it is the last line, finish things, fire the
# Deferred, and transition the state to WAITING.
# Otherwise, pass the line to the consumer.
if line == '.':
consumer = self._consumer
deferred = self._waiting
self._consumer = self._waiting = self._xform = None
self._unblock()
deferred.callback(consumer)
return 'WAITING'
else:
if self._xform is not None:
self._consumer(self._xform(line))
else:
self._consumer(line)
return 'LONG'
# Callbacks - override these
def serverGreeting(self, greeting):
"""Called when the server has sent us a greeting.
@type greeting: C{str} or C{None}
@param greeting: The status message sent with the server
greeting. For servers implementing APOP authentication, this
will be a challenge string. .
"""
# External API - call these (most of 'em anyway)
def startTLS(self, contextFactory=None):
"""
Initiates a 'STLS' request and negotiates the TLS / SSL
Handshake.
@type contextFactory: C{ssl.ClientContextFactory} @param
contextFactory: The context factory with which to negotiate
TLS. If C{None}, try to create a new one.
@return: A Deferred which fires when the transport has been
secured according to the given contextFactory, or which fails
if the transport cannot be secured.
"""
tls = interfaces.ITLSTransport(self.transport, None)
if tls is None:
return defer.fail(TLSError(
"POP3Client transport does not implement "
"interfaces.ITLSTransport"))
if contextFactory is None:
contextFactory = self._getContextFactory()
if contextFactory is None:
return defer.fail(TLSError(
"POP3Client requires a TLS context to "
"initiate the STLS handshake"))
d = self.capabilities()
d.addCallback(self._startTLS, contextFactory, tls)
return d
def _startTLS(self, caps, contextFactory, tls):
assert not self.startedTLS, "Client and Server are currently communicating via TLS"
if 'STLS' not in caps:
return defer.fail(TLSNotSupportedError(
"Server does not support secure communication "
"via TLS / SSL"))
d = self.sendShort('STLS', None)
d.addCallback(self._startedTLS, contextFactory, tls)
d.addCallback(lambda _: self.capabilities())
return d
def _startedTLS(self, result, context, tls):
self.transport = tls
self.transport.startTLS(context)
self._capCache = None
self.startedTLS = True
return result
def _getContextFactory(self):
try:
from twisted.internet import ssl
except ImportError:
return None
else:
context = ssl.ClientContextFactory()
context.method = ssl.SSL.TLSv1_METHOD
return context
def login(self, username, password):
"""Log into the server.
If APOP is available it will be used. Otherwise, if TLS is
available an 'STLS' session will be started and plaintext
login will proceed. Otherwise, if the instance attribute
allowInsecureLogin is set to True, insecure plaintext login
will proceed. Otherwise, InsecureAuthenticationDisallowed
will be raised (asynchronously).
@param username: The username with which to log in.
@param password: The password with which to log in.
@rtype: C{Deferred}
@return: A deferred which fires when login has
completed.
"""
d = self.capabilities()
d.addCallback(self._login, username, password)
return d
def _login(self, caps, username, password):
if self.serverChallenge is not None:
return self._apop(username, password, self.serverChallenge)
tryTLS = 'STLS' in caps
#If our transport supports switching to TLS, we might want to try to switch to TLS.
tlsableTransport = interfaces.ITLSTransport(self.transport, None) is not None
# If our transport is not already using TLS, we might want to try to switch to TLS.
nontlsTransport = interfaces.ISSLTransport(self.transport, None) is None
if not self.startedTLS and tryTLS and tlsableTransport and nontlsTransport:
d = self.startTLS()
d.addCallback(self._loginTLS, username, password)
return d
elif self.startedTLS or not nontlsTransport or self.allowInsecureLogin:
return self._plaintext(username, password)
else:
return defer.fail(InsecureAuthenticationDisallowed())
def _loginTLS(self, res, username, password):
return self._plaintext(username, password)
def _plaintext(self, username, password):
# Internal helper. Send a username/password pair, returning a Deferred
# that fires when both have succeeded or fails when the server rejects
# either.
return self.user(username).addCallback(lambda r: self.password(password))
def _apop(self, username, password, challenge):
# Internal helper. Computes and sends an APOP response. Returns
# a Deferred that fires when the server responds to the response.
digest = md5(challenge + password).hexdigest()
return self.apop(username, digest)
def apop(self, username, digest):
"""Perform APOP login.
This should be used in special circumstances only, when it is
known that the server supports APOP authentication, and APOP
authentication is absolutely required. For the common case,
use L{login} instead.
@param username: The username with which to log in.
@param digest: The challenge response to authenticate with.
"""
return self.sendShort('APOP', username + ' ' + digest)
def user(self, username):
"""Send the user command.
This performs the first half of plaintext login. Unless this
is absolutely required, use the L{login} method instead.
@param username: The username with which to log in.
"""
return self.sendShort('USER', username)
def password(self, password):
"""Send the password command.
This performs the second half of plaintext login. Unless this
is absolutely required, use the L{login} method instead.
@param password: The plaintext password with which to authenticate.
"""
return self.sendShort('PASS', password)
def delete(self, index):
"""Delete a message from the server.
@type index: C{int}
@param index: The index of the message to delete.
This is 0-based.
@rtype: C{Deferred}
@return: A deferred which fires when the delete command
is successful, or fails if the server returns an error.
"""
return self.sendShort('DELE', str(index + 1))
def _consumeOrSetItem(self, cmd, args, consumer, xform):
# Internal helper. Send a long command. If no consumer is
# provided, create a consumer that puts results into a list
# and return a Deferred that fires with that list when it
# is complete.
if consumer is None:
L = []
consumer = _ListSetter(L).setitem
return self.sendLong(cmd, args, consumer, xform).addCallback(lambda r: L)
return self.sendLong(cmd, args, consumer, xform)
def _consumeOrAppend(self, cmd, args, consumer, xform):
# Internal helper. Send a long command. If no consumer is
# provided, create a consumer that appends results to a list
# and return a Deferred that fires with that list when it is
# complete.
if consumer is None:
L = []
consumer = L.append
return self.sendLong(cmd, args, consumer, xform).addCallback(lambda r: L)
return self.sendLong(cmd, args, consumer, xform)
def capabilities(self, useCache=True):
"""Retrieve the capabilities supported by this server.
Not all servers support this command. If the server does not
support this, it is treated as though it returned a successful
response listing no capabilities. At some future time, this may be
changed to instead seek out information about a server's
capabilities in some other fashion (only if it proves useful to do
so, and only if there are servers still in use which do not support
CAPA but which do support POP3 extensions that are useful).
@type useCache: C{bool}
@param useCache: If set, and if capabilities have been
retrieved previously, just return the previously retrieved
results.
@return: A Deferred which fires with a C{dict} mapping C{str}
to C{None} or C{list}s of C{str}. For example::
C: CAPA
S: +OK Capability list follows
S: TOP
S: USER
S: SASL CRAM-MD5 KERBEROS_V4
S: RESP-CODES
S: LOGIN-DELAY 900
S: PIPELINING
S: EXPIRE 60
S: UIDL
S: IMPLEMENTATION Shlemazle-Plotz-v302
S: .
will be lead to a result of::
| {'TOP': None,
| 'USER': None,
| 'SASL': ['CRAM-MD5', 'KERBEROS_V4'],
| 'RESP-CODES': None,
| 'LOGIN-DELAY': ['900'],
| 'PIPELINING': None,
| 'EXPIRE': ['60'],
| 'UIDL': None,
| 'IMPLEMENTATION': ['Shlemazle-Plotz-v302']}
"""
if useCache and self._capCache is not None:
return defer.succeed(self._capCache)
cache = {}
def consume(line):
tmp = line.split()
if len(tmp) == 1:
cache[tmp[0]] = None
elif len(tmp) > 1:
cache[tmp[0]] = tmp[1:]
def capaNotSupported(err):
err.trap(ServerErrorResponse)
return None
def gotCapabilities(result):
self._capCache = cache
return cache
d = self._consumeOrAppend('CAPA', None, consume, None)
d.addErrback(capaNotSupported).addCallback(gotCapabilities)
return d
def noop(self):
"""Do nothing, with the help of the server.
No operation is performed. The returned Deferred fires when
the server responds.
"""
return self.sendShort("NOOP", None)
def reset(self):
"""Remove the deleted flag from any messages which have it.
The returned Deferred fires when the server responds.
"""
return self.sendShort("RSET", None)
def retrieve(self, index, consumer=None, lines=None):
"""Retrieve a message from the server.
If L{consumer} is not None, it will be called with
each line of the message as it is received. Otherwise,
the returned Deferred will be fired with a list of all
the lines when the message has been completely received.
"""
idx = str(index + 1)
if lines is None:
return self._consumeOrAppend('RETR', idx, consumer, _dotUnquoter)
return self._consumeOrAppend('TOP', '%s %d' % (idx, lines), consumer, _dotUnquoter)
def stat(self):
"""Get information about the size of this mailbox.
The returned Deferred will be fired with a tuple containing
the number or messages in the mailbox and the size (in bytes)
of the mailbox.
"""
return self.sendShort('STAT', None).addCallback(_statXform)
def listSize(self, consumer=None):
"""Retrieve a list of the size of all messages on the server.
If L{consumer} is not None, it will be called with two-tuples
of message index number and message size as they are received.
Otherwise, a Deferred which will fire with a list of B{only}
message sizes will be returned. For messages which have been
deleted, None will be used in place of the message size.
"""
return self._consumeOrSetItem('LIST', None, consumer, _listXform)
def listUID(self, consumer=None):
"""Retrieve a list of the UIDs of all messages on the server.
If L{consumer} is not None, it will be called with two-tuples
of message index number and message UID as they are received.
Otherwise, a Deferred which will fire with of list of B{only}
message UIDs will be returned. For messages which have been
deleted, None will be used in place of the message UID.
"""
return self._consumeOrSetItem('UIDL', None, consumer, _uidXform)
def quit(self):
"""Disconnect from the server.
"""
return self.sendShort('QUIT', None)
__all__ = [
# Exceptions
'InsecureAuthenticationDisallowed', 'LineTooLong', 'POP3ClientError',
'ServerErrorResponse', 'TLSError', 'TLSNotSupportedError',
# Protocol classes
'POP3Client']
| apache-2.0 |
no2a/ansible | lib/ansible/parsing/utils/jsonify.py | 203 | 1451 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
import json
except ImportError:
import simplejson as json
def jsonify(result, format=False):
''' format JSON output (uncompressed or uncompressed) '''
if result is None:
return "{}"
result2 = result.copy()
for key, value in result2.items():
if type(value) is str:
result2[key] = value.decode('utf-8', 'ignore')
indent = None
if format:
indent = 4
try:
return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False)
except UnicodeDecodeError:
return json.dumps(result2, sort_keys=True, indent=indent)
| gpl-3.0 |
mach0/QGIS | python/plugins/grassprovider/ext/r_tileset.py | 9 | 1300 | # -*- coding: utf-8 -*-
"""
***************************************************************************
r_proj.py
---------
Date : October 2017
Copyright : (C) 2017 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'October 2017'
__copyright__ = '(C) 2017, Médéric Ribreux'
from grassprovider.Grass7Utils import Grass7Utils
def processOutputs(alg, parameters, context, feedback):
crs = alg.parameterAsCrs(parameters, 'sourceproj', context)
wkt_file_name = Grass7Utils.exportCrsWktToFile(crs)
alg.commands.insert(0, 'g.proj -c wkt="{}"'.format(wkt_file_name))
| gpl-2.0 |
vikatory/kbengine | kbe/res/scripts/common/Lib/_markupbase.py | 891 | 14598 | """Shared support for scanning document type declarations in HTML and XHTML.
This module is used as a foundation for the html.parser module. It has no
documented public API and should not be used directly.
"""
import re
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
_commentclose = re.compile(r'--\s*>')
_markedsectionclose = re.compile(r']\s*]\s*>')
# An analysis of the MS-Word extensions is available at
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
_msmarkedsectionclose = re.compile(r']\s*>')
del re
class ParserBase:
"""Parser base class which provides some common support methods used
by the SGML/HTML and XHTML parsers."""
def __init__(self):
if self.__class__ is ParserBase:
raise RuntimeError(
"_markupbase.ParserBase must be subclassed")
def error(self, message):
raise NotImplementedError(
"subclasses of ParserBase must override error()")
def reset(self):
self.lineno = 1
self.offset = 0
def getpos(self):
"""Return current line number and offset."""
return self.lineno, self.offset
# Internal -- update line number and offset. This should be
# called for each piece of data exactly once, in order -- in other
# words the concatenation of all the input strings to this
# function should be exactly the entire input.
def updatepos(self, i, j):
if i >= j:
return j
rawdata = self.rawdata
nlines = rawdata.count("\n", i, j)
if nlines:
self.lineno = self.lineno + nlines
pos = rawdata.rindex("\n", i, j) # Should not fail
self.offset = j-(pos+1)
else:
self.offset = self.offset + j-i
return j
_decl_otherchars = ''
# Internal -- parse declaration (for use by subclasses).
def parse_declaration(self, i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
# ISO 8879:1986, however, has more complex
# declaration syntax for elements in <!...>, including:
# --comment--
# [marked section]
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
# ATTLIST, NOTATION, SHORTREF, USEMAP,
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
rawdata = self.rawdata
j = i + 2
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
if rawdata[j:j+1] == ">":
# the empty comment <!>
return j + 1
if rawdata[j:j+1] in ("-", ""):
# Start of comment followed by buffer boundary,
# or just a buffer boundary.
return -1
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
n = len(rawdata)
if rawdata[j:j+2] == '--': #comment
# Locate --.*-- as the body of the comment
return self.parse_comment(i)
elif rawdata[j] == '[': #marked section
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
# Note that this is extended by Microsoft Office "Save as Web" function
# to include [if...] and [endif].
return self.parse_marked_section(i)
else: #all other declaration elements
decltype, j = self._scan_name(j, i)
if j < 0:
return j
if decltype == "doctype":
self._decl_otherchars = ''
while j < n:
c = rawdata[j]
if c == ">":
# end of declaration syntax
data = rawdata[i+2:j]
if decltype == "doctype":
self.handle_decl(data)
else:
# According to the HTML5 specs sections "8.2.4.44 Bogus
# comment state" and "8.2.4.45 Markup declaration open
# state", a comment token should be emitted.
# Calling unknown_decl provides more flexibility though.
self.unknown_decl(data)
return j + 1
if c in "\"'":
m = _declstringlit_match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
name, j = self._scan_name(j, i)
elif c in self._decl_otherchars:
j = j + 1
elif c == "[":
# this could be handled in a separate doctype parser
if decltype == "doctype":
j = self._parse_doctype_subset(j + 1, i)
elif decltype in {"attlist", "linktype", "link", "element"}:
# must tolerate []'d groups in a content model in an element declaration
# also in data attribute specifications of attlist declaration
# also link type declaration subsets in linktype declarations
# also link attribute specification lists in link declarations
self.error("unsupported '[' char in %s declaration" % decltype)
else:
self.error("unexpected '[' char in declaration")
else:
self.error(
"unexpected %r char in declaration" % rawdata[j])
if j < 0:
return j
return -1 # incomplete
# Internal -- parse a marked section
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
def parse_marked_section(self, i, report=1):
rawdata= self.rawdata
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
sectName, j = self._scan_name( i+3, i )
if j < 0:
return j
if sectName in {"temp", "cdata", "ignore", "include", "rcdata"}:
# look for standard ]]> ending
match= _markedsectionclose.search(rawdata, i+3)
elif sectName in {"if", "else", "endif"}:
# look for MS Office ]> ending
match= _msmarkedsectionclose.search(rawdata, i+3)
else:
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
if not match:
return -1
if report:
j = match.start(0)
self.unknown_decl(rawdata[i+3: j])
return match.end(0)
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
self.error('unexpected call to parse_comment()')
match = _commentclose.search(rawdata, i+4)
if not match:
return -1
if report:
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
return match.end(0)
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
# returning the index just past any whitespace following the trailing ']'.
def _parse_doctype_subset(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
j = i
while j < n:
c = rawdata[j]
if c == "<":
s = rawdata[j:j+2]
if s == "<":
# end of buffer; incomplete
return -1
if s != "<!":
self.updatepos(declstartpos, j + 1)
self.error("unexpected char in internal subset (in %r)" % s)
if (j + 2) == n:
# end of buffer; incomplete
return -1
if (j + 4) > n:
# end of buffer; incomplete
return -1
if rawdata[j:j+4] == "<!--":
j = self.parse_comment(j, report=0)
if j < 0:
return j
continue
name, j = self._scan_name(j + 2, declstartpos)
if j == -1:
return -1
if name not in {"attlist", "element", "entity", "notation"}:
self.updatepos(declstartpos, j + 2)
self.error(
"unknown declaration %r in internal subset" % name)
# handle the individual names
meth = getattr(self, "_parse_doctype_" + name)
j = meth(j, declstartpos)
if j < 0:
return j
elif c == "%":
# parameter entity reference
if (j + 1) == n:
# end of buffer; incomplete
return -1
s, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
if rawdata[j] == ";":
j = j + 1
elif c == "]":
j = j + 1
while j < n and rawdata[j].isspace():
j = j + 1
if j < n:
if rawdata[j] == ">":
return j
self.updatepos(declstartpos, j)
self.error("unexpected char after internal subset")
else:
return -1
elif c.isspace():
j = j + 1
else:
self.updatepos(declstartpos, j)
self.error("unexpected char %r in internal subset" % c)
# end of buffer reached
return -1
# Internal -- scan past <!ELEMENT declarations
def _parse_doctype_element(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j == -1:
return -1
# style content model; just skip until '>'
rawdata = self.rawdata
if '>' in rawdata[j:]:
return rawdata.find(">", j) + 1
return -1
# Internal -- scan past <!ATTLIST declarations
def _parse_doctype_attlist(self, i, declstartpos):
rawdata = self.rawdata
name, j = self._scan_name(i, declstartpos)
c = rawdata[j:j+1]
if c == "":
return -1
if c == ">":
return j + 1
while 1:
# scan a series of attribute descriptions; simplified:
# name type [value] [#constraint]
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if c == "":
return -1
if c == "(":
# an enumerated type; look for ')'
if ")" in rawdata[j:]:
j = rawdata.find(")", j) + 1
else:
return -1
while rawdata[j:j+1].isspace():
j = j + 1
if not rawdata[j:]:
# end of buffer, incomplete
return -1
else:
name, j = self._scan_name(j, declstartpos)
c = rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1
c = rawdata[j:j+1]
if not c:
return -1
if c == "#":
if rawdata[j:] == "#":
# end of buffer
return -1
name, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if not c:
return -1
if c == '>':
# all done
return j + 1
# Internal -- scan past <!NOTATION declarations
def _parse_doctype_notation(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j < 0:
return j
rawdata = self.rawdata
while 1:
c = rawdata[j:j+1]
if not c:
# end of buffer; incomplete
return -1
if c == '>':
return j + 1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if not m:
return -1
j = m.end()
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan past <!ENTITY declarations
def _parse_doctype_entity(self, i, declstartpos):
rawdata = self.rawdata
if rawdata[i:i+1] == "%":
j = i + 1
while 1:
c = rawdata[j:j+1]
if not c:
return -1
if c.isspace():
j = j + 1
else:
break
else:
j = i
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
while 1:
c = self.rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1 # incomplete
elif c == ">":
return j + 1
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan a name token and the new position and the token, or
# return -1 if we've reached the end of the buffer.
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = _declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.updatepos(declstartpos, i)
self.error("expected name token at %r"
% rawdata[declstartpos:declstartpos+20])
# To be overridden -- handlers for unknown objects
def unknown_decl(self, data):
pass
| lgpl-3.0 |
tumbl3w33d/ansible | test/units/modules/network/fortios/test_fortios_router_static6.py | 21 | 10757 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_router_static6
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_router_static6.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_router_static6_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_static6': {
'bfd': 'enable',
'blackhole': 'enable',
'comment': 'Optional comments.',
'device': 'test_value_6',
'devindex': '7',
'distance': '8',
'dst': 'test_value_9',
'gateway': 'test_value_10',
'priority': '11',
'seq_num': '12',
'status': 'enable',
'virtual_wan_link': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_router_static6.fortios_router(input_data, fos_instance)
expected_data = {
'bfd': 'enable',
'blackhole': 'enable',
'comment': 'Optional comments.',
'device': 'test_value_6',
'devindex': '7',
'distance': '8',
'dst': 'test_value_9',
'gateway': 'test_value_10',
'priority': '11',
'seq-num': '12',
'status': 'enable',
'virtual-wan-link': 'enable'
}
set_method_mock.assert_called_with('router', 'static6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_router_static6_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_static6': {
'bfd': 'enable',
'blackhole': 'enable',
'comment': 'Optional comments.',
'device': 'test_value_6',
'devindex': '7',
'distance': '8',
'dst': 'test_value_9',
'gateway': 'test_value_10',
'priority': '11',
'seq_num': '12',
'status': 'enable',
'virtual_wan_link': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_router_static6.fortios_router(input_data, fos_instance)
expected_data = {
'bfd': 'enable',
'blackhole': 'enable',
'comment': 'Optional comments.',
'device': 'test_value_6',
'devindex': '7',
'distance': '8',
'dst': 'test_value_9',
'gateway': 'test_value_10',
'priority': '11',
'seq-num': '12',
'status': 'enable',
'virtual-wan-link': 'enable'
}
set_method_mock.assert_called_with('router', 'static6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_router_static6_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'router_static6': {
'bfd': 'enable',
'blackhole': 'enable',
'comment': 'Optional comments.',
'device': 'test_value_6',
'devindex': '7',
'distance': '8',
'dst': 'test_value_9',
'gateway': 'test_value_10',
'priority': '11',
'seq_num': '12',
'status': 'enable',
'virtual_wan_link': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_router_static6.fortios_router(input_data, fos_instance)
delete_method_mock.assert_called_with('router', 'static6', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_router_static6_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'router_static6': {
'bfd': 'enable',
'blackhole': 'enable',
'comment': 'Optional comments.',
'device': 'test_value_6',
'devindex': '7',
'distance': '8',
'dst': 'test_value_9',
'gateway': 'test_value_10',
'priority': '11',
'seq_num': '12',
'status': 'enable',
'virtual_wan_link': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_router_static6.fortios_router(input_data, fos_instance)
delete_method_mock.assert_called_with('router', 'static6', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_router_static6_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_static6': {
'bfd': 'enable',
'blackhole': 'enable',
'comment': 'Optional comments.',
'device': 'test_value_6',
'devindex': '7',
'distance': '8',
'dst': 'test_value_9',
'gateway': 'test_value_10',
'priority': '11',
'seq_num': '12',
'status': 'enable',
'virtual_wan_link': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_router_static6.fortios_router(input_data, fos_instance)
expected_data = {
'bfd': 'enable',
'blackhole': 'enable',
'comment': 'Optional comments.',
'device': 'test_value_6',
'devindex': '7',
'distance': '8',
'dst': 'test_value_9',
'gateway': 'test_value_10',
'priority': '11',
'seq-num': '12',
'status': 'enable',
'virtual-wan-link': 'enable'
}
set_method_mock.assert_called_with('router', 'static6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_router_static6_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_static6': {
'random_attribute_not_valid': 'tag',
'bfd': 'enable',
'blackhole': 'enable',
'comment': 'Optional comments.',
'device': 'test_value_6',
'devindex': '7',
'distance': '8',
'dst': 'test_value_9',
'gateway': 'test_value_10',
'priority': '11',
'seq_num': '12',
'status': 'enable',
'virtual_wan_link': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_router_static6.fortios_router(input_data, fos_instance)
expected_data = {
'bfd': 'enable',
'blackhole': 'enable',
'comment': 'Optional comments.',
'device': 'test_value_6',
'devindex': '7',
'distance': '8',
'dst': 'test_value_9',
'gateway': 'test_value_10',
'priority': '11',
'seq-num': '12',
'status': 'enable',
'virtual-wan-link': 'enable'
}
set_method_mock.assert_called_with('router', 'static6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
pdellaert/ansible | test/units/modules/remote_management/oneview/hpe_test_utils.py | 118 | 8245 | # -*- coding: utf-8 -*-
#
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import re
import yaml
from mock import Mock, patch
from oneview_module_loader import ONEVIEW_MODULE_UTILS_PATH
from hpOneView.oneview_client import OneViewClient
class OneViewBaseTest(object):
@pytest.fixture(autouse=True)
def setUp(self, mock_ansible_module, mock_ov_client, request):
marker = request.node.get_marker('resource')
self.resource = getattr(mock_ov_client, "%s" % (marker.args))
self.mock_ov_client = mock_ov_client
self.mock_ansible_module = mock_ansible_module
@pytest.fixture
def testing_module(self):
resource_name = type(self).__name__.replace('Test', '')
resource_module_path_name = resource_name.replace('Module', '')
resource_module_path_name = re.findall('[A-Z][^A-Z]*', resource_module_path_name)
resource_module_path_name = 'oneview_' + str.join('_', resource_module_path_name).lower()
ansible = __import__('ansible')
oneview_module = ansible.modules.remote_management.oneview
resource_module = getattr(oneview_module, resource_module_path_name)
self.testing_class = getattr(resource_module, resource_name)
testing_module = self.testing_class.__module__.split('.')[-1]
testing_module = getattr(oneview_module, testing_module)
try:
# Load scenarios from module examples (Also checks if it is a valid yaml)
EXAMPLES = yaml.load(testing_module.EXAMPLES, yaml.SafeLoader)
except yaml.scanner.ScannerError:
message = "Something went wrong while parsing yaml from {0}.EXAMPLES".format(self.testing_class.__module__)
raise Exception(message)
return testing_module
def test_main_function_should_call_run_method(self, testing_module, mock_ansible_module):
mock_ansible_module.params = {'config': 'config.json'}
main_func = getattr(testing_module, 'main')
with patch.object(self.testing_class, "run") as mock_run:
main_func()
mock_run.assert_called_once()
class FactsParamsTest(OneViewBaseTest):
def test_should_get_all_using_filters(self, testing_module):
self.resource.get_all.return_value = []
params_get_all_with_filters = dict(
config='config.json',
name=None,
params={
'start': 1,
'count': 3,
'sort': 'name:descending',
'filter': 'purpose=General',
'query': 'imported eq true'
})
self.mock_ansible_module.params = params_get_all_with_filters
self.testing_class().run()
self.resource.get_all.assert_called_once_with(start=1, count=3, sort='name:descending', filter='purpose=General', query='imported eq true')
def test_should_get_all_without_params(self, testing_module):
self.resource.get_all.return_value = []
params_get_all_with_filters = dict(
config='config.json',
name=None
)
self.mock_ansible_module.params = params_get_all_with_filters
self.testing_class().run()
self.resource.get_all.assert_called_once_with()
class OneViewBaseTestCase(object):
mock_ov_client_from_json_file = None
testing_class = None
mock_ansible_module = None
mock_ov_client = None
testing_module = None
EXAMPLES = None
def configure_mocks(self, test_case, testing_class):
"""
Preload mocked OneViewClient instance and AnsibleModule
Args:
test_case (object): class instance (self) that are inheriting from OneViewBaseTestCase
testing_class (object): class being tested
"""
self.testing_class = testing_class
# Define OneView Client Mock (FILE)
patcher_json_file = patch.object(OneViewClient, 'from_json_file')
test_case.addCleanup(patcher_json_file.stop)
self.mock_ov_client_from_json_file = patcher_json_file.start()
# Define OneView Client Mock
self.mock_ov_client = self.mock_ov_client_from_json_file.return_value
# Define Ansible Module Mock
patcher_ansible = patch(ONEVIEW_MODULE_UTILS_PATH + '.AnsibleModule')
test_case.addCleanup(patcher_ansible.stop)
mock_ansible_module = patcher_ansible.start()
self.mock_ansible_module = Mock()
mock_ansible_module.return_value = self.mock_ansible_module
self.__set_module_examples()
def test_main_function_should_call_run_method(self):
self.mock_ansible_module.params = {'config': 'config.json'}
main_func = getattr(self.testing_module, 'main')
with patch.object(self.testing_class, "run") as mock_run:
main_func()
mock_run.assert_called_once()
def __set_module_examples(self):
# Load scenarios from module examples (Also checks if it is a valid yaml)
ansible = __import__('ansible')
testing_module = self.testing_class.__module__.split('.')[-1]
self.testing_module = getattr(ansible.modules.remote_management.oneview, testing_module)
try:
# Load scenarios from module examples (Also checks if it is a valid yaml)
self.EXAMPLES = yaml.load(self.testing_module.EXAMPLES, yaml.SafeLoader)
except yaml.scanner.ScannerError:
message = "Something went wrong while parsing yaml from {0}.EXAMPLES".format(self.testing_class.__module__)
raise Exception(message)
class FactsParamsTestCase(OneViewBaseTestCase):
"""
FactsParamsTestCase has common test for classes that support pass additional
parameters when retrieving all resources.
"""
def configure_client_mock(self, resorce_client):
"""
Args:
resorce_client: Resource client that is being called
"""
self.resource_client = resorce_client
def __validations(self):
if not self.testing_class:
raise Exception("Mocks are not configured, you must call 'configure_mocks' before running this test.")
if not self.resource_client:
raise Exception(
"Mock for the client not configured, you must call 'configure_client_mock' before running this test.")
def test_should_get_all_using_filters(self):
self.__validations()
self.resource_client.get_all.return_value = []
params_get_all_with_filters = dict(
config='config.json',
name=None,
params={
'start': 1,
'count': 3,
'sort': 'name:descending',
'filter': 'purpose=General',
'query': 'imported eq true'
})
self.mock_ansible_module.params = params_get_all_with_filters
self.testing_class().run()
self.resource_client.get_all.assert_called_once_with(start=1, count=3, sort='name:descending',
filter='purpose=General',
query='imported eq true')
def test_should_get_all_without_params(self):
self.__validations()
self.resource_client.get_all.return_value = []
params_get_all_with_filters = dict(
config='config.json',
name=None
)
self.mock_ansible_module.params = params_get_all_with_filters
self.testing_class().run()
self.resource_client.get_all.assert_called_once_with()
| gpl-3.0 |
remipassmoilesel/python_scripts | ncurses/tutorial.py | 1 | 2293 | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# Tutoriel sur Ncurse, bibliotheque d'affichage sur terminaux
# Source: https://docs.python.org/2/howto/curses.html
import time
import subprocess
import sys
# tout le script se déroule dans un try pour intercepter l'appui sur CTRL+C
try:
# intiation de la bibliothèque
import curses
stdscr = curses.initscr()
# Désactiver l'affichage des frappes
curses.noecho()
# Recevoir les entrées même sans appui de la touche entrée
curses.cbreak()
# creer une fenêtre
begin_x = 20; begin_y = 7
height = 5; width = 40
win = curses.newwin(height, width, begin_y, begin_x)
# creer un pad, un type spécial de fenetre qui peut être plus grande que l'écran
pad = curses.newpad(100, 100)
# Affichage de caracteres aleatoires
for y in range(0, 100):
for x in range(0, 100):
try:
pad.addch(y,x, ord('a') + (x*x+y*y) % 26)
except curses.error:
pass
# Displays a section of the pad in the middle of the screen
pad.refresh(0,0, 5,5, 20,75)
#
from random import randrange
charsList = [" "]
def getRandomChar():
return charsList[randrange(0, len(charsList))]
randomEffects = [
curses.COLOR_WHITE,
curses.A_STANDOUT,
curses.A_REVERSE,
]
def getRandomEffect():
return randomEffects[randrange(0, len(randomEffects))]
while True:
for y in range(0, 100):
for x in range(0, 100):
try:
stdscr.addstr(y, x,
getRandomChar(),
getRandomEffect())
#pad.addch(y,x, getRandomChar(), curses.A_BLINK)
except curses.error:
pass
#pad.refresh(0,0, 5,5, 20,75)
stdscr.refresh()
time.sleep(0.2)
except KeyboardInterrupt:
# Terminer un programme curse
curses.nocbreak(); stdscr.keypad(0); curses.echo()
# L'affichage est parfois perturbé après ncurse
subprocess.call("reset")
sys.exit(0)
| gpl-3.0 |
EvanK/ansible | lib/ansible/module_utils/network/ios/ios.py | 14 | 7072 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.connection import Connection, ConnectionError
_DEVICE_CONFIGS = {}
ios_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
'timeout': dict(type='int')
}
ios_argument_spec = {
'provider': dict(type='dict', options=ios_provider_spec),
}
ios_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(removed_in_version=2.9, no_log=True),
'timeout': dict(removed_in_version=2.9, type='int')
}
ios_argument_spec.update(ios_top_spec)
def get_provider_argspec():
return ios_provider_spec
def get_connection(module):
if hasattr(module, '_ios_connection'):
return module._ios_connection
capabilities = get_capabilities(module)
network_api = capabilities.get('network_api')
if network_api == 'cliconf':
module._ios_connection = Connection(module._socket_path)
else:
module.fail_json(msg='Invalid connection type %s' % network_api)
return module._ios_connection
def get_capabilities(module):
if hasattr(module, '_ios_capabilities'):
return module._ios_capabilities
try:
capabilities = Connection(module._socket_path).get_capabilities()
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
module._ios_capabilities = json.loads(capabilities)
return module._ios_capabilities
def check_args(module, warnings):
pass
def get_defaults_flag(module):
connection = get_connection(module)
try:
out = connection.get_defaults_flag()
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return to_text(out, errors='surrogate_then_replace').strip()
def get_config(module, flags=None):
flags = to_list(flags)
section_filter = False
if flags and 'section' in flags[-1]:
section_filter = True
flag_str = ' '.join(flags)
try:
return _DEVICE_CONFIGS[flag_str]
except KeyError:
connection = get_connection(module)
try:
out = connection.get_config(flags=flags)
except ConnectionError as exc:
if section_filter:
# Some ios devices don't understand `| section foo`
out = get_config(module, flags=flags[:-1])
else:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
cfg = to_text(out, errors='surrogate_then_replace').strip()
_DEVICE_CONFIGS[flag_str] = cfg
return cfg
def run_commands(module, commands, check_rc=True, return_timestamps=False):
connection = get_connection(module)
try:
return connection.run_commands(commands=commands, check_rc=check_rc, return_timestamps=return_timestamps)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc))
def load_config(module, commands):
connection = get_connection(module)
try:
resp = connection.edit_config(commands)
return resp.get('response')
except ConnectionError as exc:
module.fail_json(msg=to_text(exc))
def normalize_interface(name):
"""Return the normalized interface name
"""
if not name:
return
def _get_number(name):
digits = ''
for char in name:
if char.isdigit() or char in '/.':
digits += char
return digits
if name.lower().startswith('gi'):
if_type = 'GigabitEthernet'
elif name.lower().startswith('te'):
if_type = 'TenGigabitEthernet'
elif name.lower().startswith('fa'):
if_type = 'FastEthernet'
elif name.lower().startswith('fo'):
if_type = 'FortyGigabitEthernet'
elif name.lower().startswith('et'):
if_type = 'Ethernet'
elif name.lower().startswith('vl'):
if_type = 'Vlan'
elif name.lower().startswith('lo'):
if_type = 'loopback'
elif name.lower().startswith('po'):
if_type = 'port-channel'
elif name.lower().startswith('nv'):
if_type = 'nve'
elif name.lower().startswith('twe'):
if_type = 'TwentyFiveGigE'
elif name.lower().startswith('hu'):
if_type = 'HundredGigE'
else:
if_type = None
number_list = name.split(' ')
if len(number_list) == 2:
if_number = number_list[-1].strip()
else:
if_number = _get_number(name)
if if_type:
proper_interface = if_type + if_number
else:
proper_interface = name
return proper_interface
| gpl-3.0 |
MartinHjelmare/home-assistant | homeassistant/components/synology_chat/notify.py | 7 | 1871 | """SynologyChat platform for notify component."""
import json
import logging
import requests
import voluptuous as vol
from homeassistant.const import CONF_RESOURCE, CONF_VERIFY_SSL
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (ATTR_DATA, PLATFORM_SCHEMA,
BaseNotificationService)
ATTR_FILE_URL = 'file_url'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_RESOURCE): cv.url,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
})
_LOGGER = logging.getLogger(__name__)
def get_service(hass, config, discovery_info=None):
"""Get the Synology Chat notification service."""
resource = config.get(CONF_RESOURCE)
verify_ssl = config.get(CONF_VERIFY_SSL)
return SynologyChatNotificationService(resource, verify_ssl)
class SynologyChatNotificationService(BaseNotificationService):
"""Implementation of a notification service for Synology Chat."""
def __init__(self, resource, verify_ssl):
"""Initialize the service."""
self._resource = resource
self._verify_ssl = verify_ssl
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
data = {
'text': message
}
extended_data = kwargs.get(ATTR_DATA)
file_url = extended_data.get(ATTR_FILE_URL) if extended_data else None
if file_url:
data['file_url'] = file_url
to_send = 'payload={}'.format(json.dumps(data))
response = requests.post(self._resource, data=to_send, timeout=10,
verify=self._verify_ssl)
if response.status_code not in (200, 201):
_LOGGER.exception(
"Error sending message. Response %d: %s:",
response.status_code, response.reason)
| apache-2.0 |
Omegaphora/external_chromium_org | chrome/common/extensions/docs/server2/samples_data_source.py | 25 | 2735 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import traceback
from data_source import DataSource
from extensions_paths import EXAMPLES
from future import All, Future
from platform_util import GetPlatforms
def _GetSampleId(sample_name):
return sample_name.lower().replace(' ', '-')
def GetAcceptedLanguages(request):
if request is None:
return []
accept_language = request.headers.get('Accept-Language', None)
if accept_language is None:
return []
return [lang_with_q.split(';')[0].strip()
for lang_with_q in accept_language.split(',')]
def CreateSamplesView(samples_list, request):
return_list = []
for dict_ in samples_list:
name = dict_['name']
description = dict_['description']
if description is None:
description = ''
if name.startswith('__MSG_') or description.startswith('__MSG_'):
try:
# Copy the sample dict so we don't change the dict in the cache.
sample_data = dict_.copy()
name_key = name[len('__MSG_'):-len('__')]
description_key = description[len('__MSG_'):-len('__')]
locale = sample_data['default_locale']
for lang in GetAcceptedLanguages(request):
if lang in sample_data['locales']:
locale = lang
break
locale_data = sample_data['locales'][locale]
sample_data['name'] = locale_data[name_key]['message']
sample_data['description'] = locale_data[description_key]['message']
sample_data['id'] = _GetSampleId(sample_data['name'])
except Exception:
logging.error(traceback.format_exc())
# Revert the sample to the original dict.
sample_data = dict_
return_list.append(sample_data)
else:
dict_['id'] = _GetSampleId(name)
return_list.append(dict_)
return return_list
class SamplesDataSource(DataSource):
'''Constructs a list of samples and their respective files and api calls.
'''
def __init__(self, server_instance, request):
self._platform_bundle = server_instance.platform_bundle
self._request = request
def _GetImpl(self, platform):
cache = self._platform_bundle.GetSamplesModel(platform).GetCache()
create_view = lambda samp_list: CreateSamplesView(samp_list, self._request)
return cache.GetFromFileListing('' if platform == 'apps'
else EXAMPLES).Then(create_view)
def get(self, platform):
return self._GetImpl(platform).Get()
def GetRefreshPaths(self):
return [platform for platform in GetPlatforms()]
def Refresh(self, path):
return self._GetImpl(path)
| bsd-3-clause |
catapult-project/catapult-csm | dashboard/dashboard/auto_bisect.py | 1 | 9913 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""URL endpoint for a cron job to automatically run bisects."""
import logging
from dashboard import can_bisect
from dashboard import pinpoint_request
from dashboard import start_try_job
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
from dashboard.models import try_job
from dashboard.services import pinpoint_service
_PINPOINT_BOTS = []
class NotBisectableError(Exception):
"""An error indicating that a bisect couldn't be automatically started."""
pass
def StartNewBisectForBug(bug_id):
"""Tries to trigger a bisect job for the alerts associated with a bug.
Args:
bug_id: A bug ID number.
Returns:
If successful, a dict containing "issue_id" and "issue_url" for the
bisect job. Otherwise, a dict containing "error", with some description
of the reason why a job wasn't started.
"""
try:
return _StartBisectForBug(bug_id)
except NotBisectableError as e:
logging.info('New bisect errored out with message: ' + e.message)
return {'error': e.message}
def _StartBisectForBug(bug_id):
anomalies = anomaly.Anomaly.query(anomaly.Anomaly.bug_id == bug_id).fetch()
if not anomalies:
raise NotBisectableError('No Anomaly alerts found for this bug.')
test_anomaly = _ChooseTest(anomalies)
test = None
if test_anomaly:
test = test_anomaly.GetTestMetadataKey().get()
if not test or not can_bisect.IsValidTestForBisect(test.test_path):
raise NotBisectableError('Could not select a test.')
if test.bot_name in _PINPOINT_BOTS:
return _StartPinpointBisect(bug_id, test_anomaly, test)
return _StartRecipeBisect(bug_id, test_anomaly, test)
def _GetPinpointRevisionInfo(revision, test):
repo_to_default_rev = {
'ChromiumPerf': {'default_rev': 'r_chromium', 'pinpoint': 'chromium'}
}
row_parent_key = utils.GetTestContainerKey(test)
row = graph_data.Row.get_by_id(revision, parent=row_parent_key)
if not row:
raise NotBisectableError('No row %s: %s' % (test.key.id(), str(revision)))
if not test.master_name in repo_to_default_rev:
raise NotBisectableError('Unsupported master: %s' % test.master_name)
rev_info = repo_to_default_rev[test.master_name]
if not hasattr(row, rev_info['default_rev']):
raise NotBisectableError('Row has no %s' % rev_info['default_rev'])
return getattr(row, row.a_default_rev), rev_info['pinpoint']
def _StartPinpointBisect(bug_id, test_anomaly, test):
# Convert params to Pinpoint compatible
start_git_hash, start_repository = _GetPinpointRevisionInfo(
test_anomaly.start_revision - 1, test)
end_git_hash, end_repository = _GetPinpointRevisionInfo(
test_anomaly.end_revision, test)
params = {
'test_path': test.test_path,
'start_git_hash': start_git_hash,
'end_git_hash': end_git_hash,
'start_repository': start_repository,
'end_repository': end_repository,
'bug_id': bug_id,
}
results = pinpoint_service.NewJob(
pinpoint_request.PinpointParamsFromBisectParams(params))
# For compatibility with existing bisect, switch these to issueId/url
if 'jobId' in results:
results['issue_id'] = results['jobId']
del results['jobId']
if 'jobUrl' in results:
results['issue_url'] = results['jobUrl']
del results['jobUrl']
return results
def _StartRecipeBisect(bug_id, test_anomaly, test):
bisect_job = _MakeBisectTryJob(bug_id, test_anomaly, test)
bisect_job_key = bisect_job.put()
try:
bisect_result = start_try_job.PerformBisect(bisect_job)
except request_handler.InvalidInputError as e:
bisect_result = {'error': e.message}
if 'error' in bisect_result:
bisect_job_key.delete()
return bisect_result
def _MakeBisectTryJob(bug_id, test_anomaly, test):
"""Tries to automatically select parameters for a bisect job.
Args:
bug_id: A bug ID which some alerts are associated with.
Returns:
A TryJob entity, which has not yet been put in the datastore.
Raises:
NotBisectableError: A valid bisect config could not be created.
"""
good_revision = _GetRevisionForBisect(test_anomaly.start_revision - 1, test)
bad_revision = _GetRevisionForBisect(test_anomaly.end_revision, test)
if not can_bisect.IsValidRevisionForBisect(good_revision):
raise NotBisectableError('Invalid "good" revision: %s.' % good_revision)
if not can_bisect.IsValidRevisionForBisect(bad_revision):
raise NotBisectableError('Invalid "bad" revision: %s.' % bad_revision)
if test_anomaly.start_revision == test_anomaly.end_revision:
raise NotBisectableError(
'Same "good"/"bad" revisions, bisect skipped')
metric = start_try_job.GuessMetric(test.test_path)
story_filter = start_try_job.GuessStoryFilter(test.test_path)
bisect_bot = start_try_job.GuessBisectBot(test.master_name, test.bot_name)
if not bisect_bot:
raise NotBisectableError(
'Could not select a bisect bot: %s for (%s, %s)' % (
bisect_bot, test.master_name, test.bot_name))
new_bisect_config = start_try_job.GetBisectConfig(
bisect_bot=bisect_bot,
master_name=test.master_name,
suite=test.suite_name,
metric=metric,
story_filter=story_filter,
good_revision=good_revision,
bad_revision=bad_revision,
repeat_count=10,
max_time_minutes=20,
bug_id=bug_id)
if 'error' in new_bisect_config:
raise NotBisectableError('Could not make a valid config.')
config_python_string = utils.BisectConfigPythonString(new_bisect_config)
bisect_job = try_job.TryJob(
bot=bisect_bot,
config=config_python_string,
bug_id=bug_id,
master_name=test.master_name,
internal_only=test.internal_only,
job_type='bisect')
return bisect_job
def _ChooseTest(anomalies):
"""Chooses a test to use for a bisect job.
The particular TestMetadata chosen determines the command and metric name that
is chosen. The test to choose could depend on which of the anomalies has the
largest regression size.
Ideally, the choice of bisect bot to use should be based on bisect bot queue
length, and the choice of metric should be based on regression size and noise
level.
However, we don't choose bisect bot and metric independently, since some
regressions only happen for some tests on some platforms; we should generally
only bisect with a given bisect bot on a given metric if we know that the
regression showed up on that platform for that metric.
Args:
anomalies: A non-empty list of Anomaly entities.
Returns:
An Anomaly entity, or None if no valid entity could be chosen.
"""
if not anomalies:
return None
anomalies.sort(cmp=_CompareAnomalyBisectability)
for anomaly_entity in anomalies:
if can_bisect.IsValidTestForBisect(
utils.TestPath(anomaly_entity.GetTestMetadataKey())):
return anomaly_entity
return None
def _CompareAnomalyBisectability(a1, a2):
"""Compares two Anomalies to decide which Anomaly's TestMetadata is better to
use.
TODO(qyearsley): Take other factors into account:
- Consider bisect bot queue length. Note: If there's a simple API to fetch
this from build.chromium.org, that would be best; even if there is not,
it would be possible to fetch the HTML page for the builder and check the
pending list from that.
- Prefer some platforms over others. For example, bisects on Linux may run
faster; also, if we fetch info from build.chromium.org, we can check recent
failures.
- Consider test run time. This may not be feasible without using a list
of approximate test run times for different test suites.
- Consider stddev of test; less noise -> more reliable bisect.
Args:
a1: The first Anomaly entity.
a2: The second Anomaly entity.
Returns:
Negative integer if a1 is better than a2, positive integer if a2 is better
than a1, or zero if they're equally good.
"""
if a1.percent_changed > a2.percent_changed:
return -1
elif a1.percent_changed < a2.percent_changed:
return 1
return 0
def _GetRevisionForBisect(revision, test_key):
"""Gets a start or end revision value which can be used when bisecting.
Note: This logic is parallel to that in elements/chart-container.html
in the method getRevisionForBisect.
Args:
revision: The ID of a Row, not necessarily an actual revision number.
test_key: The ndb.Key for a TestMetadata.
Returns:
An int or string value which can be used when bisecting.
"""
row_parent_key = utils.GetTestContainerKey(test_key)
row = graph_data.Row.get_by_id(revision, parent=row_parent_key)
if row and hasattr(row, 'a_default_rev') and hasattr(row, row.a_default_rev):
return getattr(row, row.a_default_rev)
return revision
def _PrintStartedAndFailedBisectJobs():
"""Prints started and failed bisect jobs in datastore."""
failed_jobs = try_job.TryJob.query(
try_job.TryJob.status == 'failed').fetch()
started_jobs = try_job.TryJob.query(
try_job.TryJob.status == 'started').fetch()
return {
'headline': 'Bisect Jobs',
'results': [
_JobsListResult('Failed jobs', failed_jobs),
_JobsListResult('Started jobs', started_jobs),
]
}
def _JobsListResult(title, jobs):
"""Returns one item in a list of results to be displayed on result.html."""
return {
'name': '%s: %d' % (title, len(jobs)),
'value': '\n'.join(_JobLine(job) for job in jobs),
'class': 'results-pre'
}
def _JobLine(job):
"""Returns a string with information about one TryJob entity."""
config = job.config.replace('\n', '') if job.config else 'No config.'
return 'Bug ID %d. %s' % (job.bug_id, config)
| bsd-3-clause |
gfonk/ansible | lib/ansible/plugins/strategies/linear.py | 10 | 14299 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.playbook.block import Block
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task import Task
from ansible.plugins import action_loader
from ansible.plugins.strategies import StrategyBase
from ansible.template import Templar
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class StrategyModule(StrategyBase):
def _get_next_task_lockstep(self, hosts, iterator):
'''
Returns a list of (host, task) tuples, where the task may
be a noop task to keep the iterator in lock step across
all hosts.
'''
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
host_tasks = {}
display.debug("building list of next tasks for hosts")
for host in hosts:
host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
display.debug("done building task lists")
num_setups = 0
num_tasks = 0
num_rescue = 0
num_always = 0
lowest_cur_block = len(iterator._blocks)
display.debug("counting tasks in each state of execution")
for (k, v) in host_tasks.iteritems():
if v is None:
continue
(s, t) = v
if t is None:
continue
if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
lowest_cur_block = s.cur_block
if s.run_state == PlayIterator.ITERATING_SETUP:
num_setups += 1
elif s.run_state == PlayIterator.ITERATING_TASKS:
num_tasks += 1
elif s.run_state == PlayIterator.ITERATING_RESCUE:
num_rescue += 1
elif s.run_state == PlayIterator.ITERATING_ALWAYS:
num_always += 1
display.debug("done counting tasks in each state of execution")
def _advance_selected_hosts(hosts, cur_block, cur_state):
'''
This helper returns the task for all hosts in the requested
state, otherwise they get a noop dummy task. This also advances
the state of the host, since the given states are determined
while using peek=True.
'''
# we return the values in the order they were originally
# specified in the given hosts array
rvals = []
display.debug("starting to advance hosts")
for host in hosts:
host_state_task = host_tasks[host.name]
if host_state_task is None:
continue
(s, t) = host_state_task
if t is None:
continue
if s.run_state == cur_state and s.cur_block == cur_block:
new_t = iterator.get_next_task_for_host(host)
rvals.append((host, t))
else:
rvals.append((host, noop_task))
display.debug("done advancing hosts to next task")
return rvals
# if any hosts are in ITERATING_SETUP, return the setup task
# while all other hosts get a noop
if num_setups:
display.debug("advancing hosts in ITERATING_SETUP")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)
# if any hosts are in ITERATING_TASKS, return the next normal
# task for these hosts, while all other hosts get a noop
if num_tasks:
display.debug("advancing hosts in ITERATING_TASKS")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)
# if any hosts are in ITERATING_RESCUE, return the next rescue
# task for these hosts, while all other hosts get a noop
if num_rescue:
display.debug("advancing hosts in ITERATING_RESCUE")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)
# if any hosts are in ITERATING_ALWAYS, return the next always
# task for these hosts, while all other hosts get a noop
if num_always:
display.debug("advancing hosts in ITERATING_ALWAYS")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)
# at this point, everything must be ITERATING_COMPLETE, so we
# return None for all hosts in the list
display.debug("all hosts are done, so returning None's for all hosts")
return [(host, None) for host in hosts]
def run(self, iterator, play_context):
'''
The linear strategy is simple - get the next task and queue
it for all hosts, then wait for the queue to drain before
moving on to the next task
'''
# iteratate over each task, while there is one left to run
result = True
work_to_do = True
while work_to_do and not self._tqm._terminated:
try:
self._display.debug("getting the remaining hosts for this loop")
hosts_left = self._inventory.get_hosts(iterator._play.hosts)
self._display.debug("done getting the remaining hosts for this loop")
# queue up this task for each host in the inventory
callback_sent = False
work_to_do = False
host_results = []
host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
# skip control
skip_rest = False
choose_step = True
for (host, task) in host_tasks:
if not task:
continue
run_once = False
work_to_do = True
# test to see if the task across all hosts points to an action plugin which
# sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
# will only send this task to the first host in the list.
try:
action = action_loader.get(task.action, class_only=True)
if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
run_once = True
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
# check to see if this task should be skipped, due to it being a member of a
# role which has already run (and whether that role allows duplicate execution)
if task._role and task._role.has_run(host):
# If there is no metadata, the default behavior is to not allow duplicates,
# if there is metadata, check to see if the allow_duplicates flag was set to true
if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
self._display.debug("'%s' skipped because role has already run" % task)
continue
if task.action == 'meta':
self._execute_meta(task, play_context, iterator)
else:
# handle step if needed, skip meta actions as they are used internally
if self._step and choose_step:
if self._take_step(task):
choose_step = False
else:
skip_rest = True
break
self._display.debug("getting variables")
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
templar = Templar(loader=self._loader, variables=task_vars)
self._display.debug("done getting variables")
if not callback_sent:
display.debug("sending task start callback, copying the task so we can template it temporarily")
saved_name = task.name
display.debug("done copying, going to template now")
try:
task.name = unicode(templar.template(task.name, fail_on_undefined=False))
display.debug("done templating")
except:
# just ignore any errors during task name templating,
# we don't care if it just shows the raw name
display.debug("templating failed for some reason")
pass
display.debug("here goes the callback...")
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
task.name = saved_name
callback_sent = True
display.debug("sending task start callback")
self._blocked_hosts[host.get_name()] = True
self._queue_task(host, task, task_vars, play_context)
results = self._process_pending_results(iterator)
host_results.extend(results)
# if we're bypassing the host loop, break out now
if run_once:
break
# go to next host/task group
if skip_rest:
continue
self._display.debug("done queuing things up, now waiting for results queue to drain")
results = self._wait_on_pending_results(iterator)
host_results.extend(results)
if not work_to_do and len(iterator.get_failed_hosts()) > 0:
self._display.debug("out of hosts to run on")
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result = False
break
try:
included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager)
except AnsibleError as e:
return False
if len(included_files) > 0:
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
all_blocks = dict((host, []) for host in hosts_left)
for included_file in included_files:
# included hosts get the task list while those excluded get an equal-length
# list of noop tasks, to make sure that they continue running in lock-step
try:
new_blocks = self._load_included_file(included_file, iterator=iterator)
except AnsibleError as e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._display.warning(str(e))
continue
for new_block in new_blocks:
noop_block = Block(parent_block=task._block)
noop_block.block = [noop_task for t in new_block.block]
noop_block.always = [noop_task for t in new_block.always]
noop_block.rescue = [noop_task for t in new_block.rescue]
for host in hosts_left:
if host in included_file._hosts:
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task)
final_block = new_block.filter_tagged_tasks(play_context, task_vars)
all_blocks[host].append(final_block)
else:
all_blocks[host].append(noop_block)
for host in hosts_left:
iterator.add_tasks(host, all_blocks[host])
self._display.debug("results queue empty")
except (IOError, EOFError) as e:
self._display.debug("got IOError/EOFError in task loop: %s" % e)
# most likely an abort, return failed
return False
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
return super(StrategyModule, self).run(iterator, play_context, result)
| gpl-3.0 |
ukgovdatascience/classifyintentsapp | app/main/forms.py | 1 | 3159 | from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, BooleanField, SelectField, SubmitField, RadioField
from wtforms.validators import DataRequired, Length, Email, Regexp, InputRequired
from wtforms import ValidationError
from flask_pagedown.fields import PageDownField
from ..models import Role, User, Codes, ProjectCodes, Classified
class NameForm(FlaskForm):
name = StringField('What is your name?', validators=[DataRequired()])
submit = SubmitField('Submit')
class EditProfileForm(FlaskForm):
name = StringField('Real name', validators=[Length(0, 64)])
location = StringField('Location', validators=[Length(0, 64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
class EditProfileAdminForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
username = StringField('Username', validators=[
DataRequired(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
confirmed = BooleanField('Confirmed')
role = SelectField('Role', coerce=int)
submit = SubmitField('Submit')
def __init__(self, user, *args, **kwargs):
super(EditProfileAdminForm, self).__init__(*args, **kwargs)
self.role.choices = [(role.id, role.name)
for role in Role.query.order_by(Role.name).all()]
self.user = user
def validate_email(self, field):
if field.data != self.user.email and \
User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if field.data != self.user.username and \
User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
class ClassifyForm(FlaskForm):
code = RadioField('code_radio', coerce=int, validators=[InputRequired()])
# Default the project code to 1, which should correspond to 'none'
# Better solution would be to determine this dynamically.
# Using ProjectCode.query.filter_by(project_code='none').first()
project_code = RadioField(
'project_code_radio',
coerce=int, default='0', validators=[InputRequired()])
PII_boolean = BooleanField('PII_boolean')
submit = SubmitField('Submit')
@classmethod
def codes(cls):
codes_form = cls()
# Extract codes from Postgres
# Where there is not and end date yet
codes = Codes.query.filter(Codes.end_date.is_(None)).all()
codes_form.code.choices = [(g.code_id, g.code) for g in codes]
# Extract project_codes from Postgres
# Where there is not and end date yet
project_codes = ProjectCodes.query.filter(
ProjectCodes.end_date.is_(None)).all()
codes_form.project_code.choices = [(i.project_code_id, i.project_code) for i in project_codes]
return(codes_form)
| mit |
fdvarela/odoo8 | openerp/service/report.py | 324 | 5148 | # -*- coding: utf-8 -*-
import base64
import logging
import sys
import threading
import openerp
import openerp.report
from openerp import tools
import security
_logger = logging.getLogger(__name__)
# TODO: set a maximum report number per user to avoid DOS attacks
#
# Report state:
# False -> True
self_reports = {}
self_id = 0
self_id_protect = threading.Semaphore()
def dispatch(method, params):
(db, uid, passwd ) = params[0:3]
threading.current_thread().uid = uid
params = params[3:]
if method not in ['report', 'report_get', 'render_report']:
raise KeyError("Method not supported %s" % method)
security.check(db,uid,passwd)
openerp.modules.registry.RegistryManager.check_registry_signaling(db)
fn = globals()['exp_' + method]
res = fn(db, uid, *params)
openerp.modules.registry.RegistryManager.signal_caches_change(db)
return res
def exp_render_report(db, uid, object, ids, datas=None, context=None):
if not datas:
datas={}
if not context:
context={}
self_id_protect.acquire()
global self_id
self_id += 1
id = self_id
self_id_protect.release()
self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
cr = openerp.registry(db).cursor()
try:
result, format = openerp.report.render_report(cr, uid, ids, object, datas, context)
if not result:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb)
self_reports[id]['result'] = result
self_reports[id]['format'] = format
self_reports[id]['state'] = True
except Exception, exception:
_logger.exception('Exception: %s\n', exception)
if hasattr(exception, 'name') and hasattr(exception, 'value'):
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
else:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
self_reports[id]['state'] = True
cr.commit()
cr.close()
return _check_report(id)
def exp_report(db, uid, object, ids, datas=None, context=None):
if not datas:
datas={}
if not context:
context={}
self_id_protect.acquire()
global self_id
self_id += 1
id = self_id
self_id_protect.release()
self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
def go(id, uid, ids, datas, context):
with openerp.api.Environment.manage():
cr = openerp.registry(db).cursor()
try:
result, format = openerp.report.render_report(cr, uid, ids, object, datas, context)
if not result:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb)
self_reports[id]['result'] = result
self_reports[id]['format'] = format
self_reports[id]['state'] = True
except Exception, exception:
_logger.exception('Exception: %s\n', exception)
if hasattr(exception, 'name') and hasattr(exception, 'value'):
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
else:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
self_reports[id]['state'] = True
cr.commit()
cr.close()
return True
threading.Thread(target=go, args=(id, uid, ids, datas, context)).start()
return id
def _check_report(report_id):
result = self_reports[report_id]
exc = result['exception']
if exc:
raise openerp.osv.orm.except_orm(exc.message, exc.traceback)
res = {'state': result['state']}
if res['state']:
if tools.config['reportgz']:
import zlib
res2 = zlib.compress(result['result'])
res['code'] = 'zlib'
else:
#CHECKME: why is this needed???
if isinstance(result['result'], unicode):
res2 = result['result'].encode('latin1', 'replace')
else:
res2 = result['result']
if res2:
res['result'] = base64.encodestring(res2)
res['format'] = result['format']
del self_reports[report_id]
return res
def exp_report_get(db, uid, report_id):
if report_id in self_reports:
if self_reports[report_id]['uid'] == uid:
return _check_report(report_id)
else:
raise Exception, 'AccessDenied'
else:
raise Exception, 'ReportNotFound'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
itzzshirlayyy/Online_Ordering | venv/lib/python2.7/site-packages/setuptools/tests/contexts.py | 59 | 1819 | import tempfile
import os
import shutil
import sys
import contextlib
import site
from ..compat import StringIO
@contextlib.contextmanager
def tempdir(cd=lambda dir:None, **kwargs):
temp_dir = tempfile.mkdtemp(**kwargs)
orig_dir = os.getcwd()
try:
cd(temp_dir)
yield temp_dir
finally:
cd(orig_dir)
shutil.rmtree(temp_dir)
@contextlib.contextmanager
def environment(**replacements):
"""
In a context, patch the environment with replacements. Pass None values
to clear the values.
"""
saved = dict(
(key, os.environ[key])
for key in replacements
if key in os.environ
)
# remove values that are null
remove = (key for (key, value) in replacements.items() if value is None)
for key in list(remove):
os.environ.pop(key, None)
replacements.pop(key)
os.environ.update(replacements)
try:
yield saved
finally:
for key in replacements:
os.environ.pop(key, None)
os.environ.update(saved)
@contextlib.contextmanager
def quiet():
"""
Redirect stdout/stderr to StringIO objects to prevent console output from
distutils commands.
"""
old_stdout = sys.stdout
old_stderr = sys.stderr
new_stdout = sys.stdout = StringIO()
new_stderr = sys.stderr = StringIO()
try:
yield new_stdout, new_stderr
finally:
new_stdout.seek(0)
new_stderr.seek(0)
sys.stdout = old_stdout
sys.stderr = old_stderr
@contextlib.contextmanager
def save_user_site_setting():
saved = site.ENABLE_USER_SITE
try:
yield saved
finally:
site.ENABLE_USER_SITE = saved
@contextlib.contextmanager
def suppress_exceptions(*excs):
try:
yield
except excs:
pass
| mit |
rosswhitfield/mantid | Framework/PythonInterface/mantid/plots/__init__.py | 3 | 1385 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2017 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid package
#
#
"""
Functionality for unpacking mantid objects for plotting with matplotlib.
"""
# This file should be left free of PyQt imports to allow quick importing
# of the main package.
from collections.abc import Iterable # noqa: F401
from matplotlib.projections import register_projection
from matplotlib.scale import register_scale
from mantid.plots import datafunctions, axesfunctions, axesfunctions3D # noqa: F401
from mantid.plots.legend import convert_color_to_hex, LegendProperties # noqa: F401
from mantid.plots.datafunctions import get_normalize_by_bin_width # noqa: F401
from mantid.plots.scales import PowerScale, SquareScale # noqa: F401
from mantid.plots.mantidaxes import MantidAxes, MantidAxes3D, WATERFALL_XOFFSET_DEFAULT, WATERFALL_YOFFSET_DEFAULT # noqa: F401
from mantid.plots.utility import (artists_hidden, autoscale_on_update, legend_set_draggable, MantidAxType) # noqa: F401
register_projection(MantidAxes)
register_projection(MantidAxes3D)
register_scale(PowerScale)
register_scale(SquareScale)
| gpl-3.0 |
orlenko/plei | mezzanine/blog/migrations/0010_category_site_allow_comments.py | 12 | 11028 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.contrib.sites.models import Site
class Migration(SchemaMigration):
def forwards(self, orm):
site = Site.objects.get_current()
# Adding field 'BlogCategory.site'
db.add_column('blog_blogcategory', 'site', self.gf('django.db.models.fields.related.ForeignKey')(default=site.pk, to=orm['sites.Site']), keep_default=False)
# Adding field 'BlogPost.allow_comments'
db.add_column('blog_blogpost', 'allow_comments', self.gf('django.db.models.fields.BooleanField')(default=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'BlogCategory.site'
db.delete_column('blog_blogcategory', 'site_id')
# Deleting field 'BlogPost.allow_comments'
db.delete_column('blog_blogpost', 'allow_comments')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blog.blogcategory': {
'Meta': {'object_name': 'BlogCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'blog.blogpost': {
'Meta': {'ordering': "('-publish_date',)", 'object_name': 'BlogPost'},
'allow_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'blogposts'", 'blank': 'True', 'to': "orm['blog.BlogCategory']"}),
'comments': ('mezzanine.generic.fields.CommentsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.ThreadedComment']"}),
'comments_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.AssignedKeyword']"}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rating': ('mezzanine.generic.fields.RatingField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.Rating']"}),
'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'blogposts'", 'to': "orm['auth.User']"})
},
'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.assignedkeyword': {
'Meta': {'object_name': 'AssignedKeyword'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': "orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.rating': {
'Meta': {'object_name': 'Rating'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.IntegerField', [], {}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'generic.threadedcomment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'ThreadedComment', '_ormbases': ['comments.Comment']},
'by_author': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['comments.Comment']", 'unique': 'True', 'primary_key': 'True'}),
'email_hash': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'replied_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'null': 'True', 'to': "orm['generic.ThreadedComment']"})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['blog']
| bsd-2-clause |
hanw/sonic-lite | hw/bin/bitstream.py | 1 | 8327 | #!/usr/bin/python
# Filename: bitstream.py
#
# Input: 64 bit per line raw bitstream, first bit at lsb.
# Output: 66 bit per line raw bitstream, first bit at lsb.
#
# Author: HanW
# Date: Nov/23/2011
#
from bitstring import *
#
# Helper function --
# Input: 64-bit per line, in hex, first bit as LSB
# Output: 66-bit per line, in binary, first bit at LSB
#
def conv_64_to_66_bin(file_in, file_out):
print ('Convert 64-bit input stream to 66-bit output stream\n')
# convert to binary stream
fin = open(file_in, 'r')
ftmp = open('/tmp/conv_64_to_66.bin', 'w')
for line in fin:
str_bin = BitString(uint=int(line,16), length=64)
str_bin.reverse()
ftmp.write(str_bin.bin[2:])
ftmp.write('\n')
fin.close()
ftmp.close()
# get bitstream
fread = open('/tmp/conv_64_to_66.bin', 'r')
data_bits = fread.read()
# chop to 66 bit segments
fbin = open(file_out, 'w')
tot_l = len(data_bits) / 66
for n in range(tot_l):
str_bin = BitString(uint=int(data_bits[n*66:(n+1)*66],2), length=66)
str_bin.reverse()
fbin.write(str_bin.bin[2:])
fbin.write('\n')
fbin.close()
fread.close()
#
# Input: 64-bit per line, in hex, first bit at LSB
# Output: 66-bit per line, in hex, first bit at LSB
#
def conv_64_to_66(file_in, file_out, spacing=1):
# convert to binary output with 66-bit per line
conv_64_to_66_bin(file_in, '/tmp/conv_64_to_66.66b')
# convert back to hex
fout = open(file_out, 'w')
fbin = open('/tmp/conv_64_to_66.66b', 'r')
for line in fbin:
hbits = '%0x' % (int(line[0:2], 2))
fout.write(hbits)
if (spacing):
fout.write(' ')
lbits = '%016x' % (int(line[2:66],2))
fout.write(lbits)
fout.write('\n')
fbin.close()
fout.close()
print ('Done!\n')
#
# Input: 64-bit per line, in hex, first bit at LSB
# Output: 128-bit per line, in hex, first bit at LSB
#
def conv_64_to_128(file_in, file_out):
# combine two 64-bit line to one 128-bit line
f64 = open(file_in, 'r')
f128 = open(file_out, 'w')
lines = sum(1 for line in f64)
f64.seek(0)
for n in range(lines / 2):
data0 = f64.readline()
data1 = f64.readline()
f128.write(data1[0:len(data1)-1])
f128.write(data0)
f64.close()
f128.close()
#
# Input: 64-bit per line, in hex, first bit at LSB
# Output: 40-bit per line, in hex, first bit at LSB
#
def conv_64_to_40(file_in, file_out):
print ('Convert 64-bit input to 40-bit output stream\n')
conv_64_to_128(file_in, '/tmp/conv_64_to_128.tmp')
# convert to 40 bits
f40 = open(file_out, 'w')
f128 = open('/tmp/conv_64_to_128.tmp', 'r')
lines = sum(1 for line in f128)
f128.seek(0)
for n in range(lines / 2):
data0 = f128.readline()
data1 = f128.readline()
data2 = f128.readline()
data3 = f128.readline()
data4 = f128.readline()
data_sum = data4[0:len(data4)-1] + data3[0:len(data3)-1] + data2[0:len(data2)-1] + data1[0:len(data1)-1] + data0
for m in range (len(data_sum)/10):
f40.write(data_sum[len(data_sum) - 1 - (m+1) * 10 : len(data_sum) - 1 - m * 10])
f40.write('\n')
f128.close()
f40.close()
print('Done!\n')
#
# Input: 64-bit per line, in hex, first bit at LSB
# Output: 66 bit per line, conform to our page format.
#
def conv_64_to_page(file_in, file_out):
print ('Convert 64 to page formatted input\n')
tmp1 = '/tmp/conv_to_page_1.tmp'
tmp2 = '/tmp/conv_to_page_2.tmp'
# Convert 64 to 66 binary
conv_64_to_66_bin(file_in, tmp1)
ftmp = open(tmp1, 'r')
lines = sum(1 for line in ftmp)
ftmp.seek(0)
fpage = open(tmp2, 'w')
for n in range (lines/496):
sync = BitString(uint=0, length=32).bin[2:]
data = ""
for m in range (496):
line = ftmp.readline()
sync = line[64:66] + sync
if (len(sync) % 64 == 0):
fpage.write(BitString(uint=int(sync,2),length=64).hex[2:])
fpage.write('\n')
sync=''
data = data + '%016x' % int(line[0:64],2) + '\n'
fpage.write(data)
fpage.close()
ftmp.close()
# convert to 128-bit per line
conv_64_to_128(tmp2, file_out)
#
# Input: page formatted data
# NOTE: use input generated by conv_64_to_page.
# Output: 66-bit per line.
#
def conv_page_to_66(file_in, file_out):
print('Convert page formatted data to 66-bit per line\n')
fpage = open(file_in, 'r')
fout = open(file_out, 'w')
lines = sum(1 for x in fpage)
fpage.seek(0)
for l in range (lines/256):
sync = ''
for n in range (8):
line = fpage.readline()
sync = line[0:len(line)-1] + sync
syncstr = BitString(uint=int(sync,16), length = 128 * 8).bin[2:]
for m in range (248):
line = fpage.readline()
datastr = BitStream('0x' + line).bin[2:]
num2 = int(datastr[len(datastr)/2 : len(datastr)] + syncstr[992 - m*2 - 2: 992 - m*2],2)
fout.write('%017x' % num2)
fout.write('\n')
num1 = int(datastr[0:len(datastr)/2] + syncstr[992 - m*2 - 4: 992 - m*2 - 2] , 2)
fout.write('%017x' % num1)
fout.write('\n')
fpage.close()
fout.close()
#
# Input: x-bits per line, in hex, first bit at LSB
# Output: bitstream, in binary, one bit per line.
# x is length
# format is 16 for 0x, 2 for 0b, 10 for decimal
#
def conv_x_to_bits (file_in, file_out, x, fmt):
ftmp = open(file_in, 'r')
lines = sum(1 for line in ftmp)
ftmp.seek(0)
fbits = open(file_out, 'w')
for n in range (lines):
l = ftmp.readline()
lbits = BitString(uint = int(l, fmt), length = x).bin[2:]
for m in range (x):
fbits.write(lbits[x-m-1] + '\n')
ftmp.close()
fbits.close()
print('Done!\n')
def conv_bits_to_hex (file_in, file_out):
ftmp = open(file_in, 'r')
lines = sum(1 for line in ftmp)
ftmp.seek(0)
fbits = open(file_out, 'w')
for n in range (lines):
l = ftmp.readline()
lbits = hex(int(l, 2))[2:]
fbits.write(lbits + '\n')
ftmp.close()
fbits.close()
print('Done!\n')
def conv_66_to_hex (file_in, file_out, length):
ftmp = open(file_in, 'r')
lines = sum(1 for line in ftmp)
ftmp.seek(0)
fbits = open(file_out, 'w')
for n in range (lines):
l = ftmp.readline()
lbits = int(l, 2)
fbits.write("%017x" % lbits + '\n')
ftmp.close()
fbits.close()
print('Done!\n')
#
# Convert 64bit per line to 1bit per line
#
def conv_64_to_bitstream(file_in, file_out):
print ('convert 64 to bit stream.\n')
conv_x_to_bits(file_in, file_out, 64, 16)
def conv_32_to_bitstream(file_in, file_out):
print ('convert 32 to bit stream.\n')
conv_x_to_bits(file_in, file_out, 32, 16)
#
# Convert 40bit per line to 1bit per line
#
def conv_40_to_bitstream(file_in, file_out):
print ('Convert 40 to bit stream.\n')
conv_x_to_bits(file_in, file_out, 40, 16)
#
# Convert 66bit per line to 1bit per line
#
def conv_66_to_bitstream(file_in, file_out):
print ('convert 66 to bit stream.\n')
conv_x_to_bits(file_in, file_out, 66, 16)
def conv_bin66_to_bitstream(file_in, file_out):
print ('convert bin 66 to bitstream. \n')
conv_x_to_bits(file_in, file_out, 66, 2)
def conv_bin40_to_bitstream(file_in, file_out):
print ('convert bin 40 to bitstream. \n')
conv_x_to_bits(file_in, file_out, 40, 2)
def conv_bin32_to_bitstream(file_in, file_out):
print ('convert bin 32 to bitstream. \n')
conv_x_to_bits(file_in, file_out, 32, 2)
#
# Convert from 128-bit per line page to 32 bit per line
# Used in Modelsim sonic_app simulation
#
def conv_page_to_32 (file_in, file_out):
print ('convert page to 32 bit.\n')
ftmp = open(file_in, 'r')
lines = sum(1 for line in ftmp)
ftmp.seek(0)
fout = open(file_out, 'w')
for n in range(lines):
l = ftmp.readline()
fout.write(l[24:32] + '\n')
fout.write(l[16:24] + '\n')
fout.write(l[8:16] + '\n')
fout.write(l[0:8] + '\n')
fout.close()
ftmp.close()
| mit |
cloud-fan/spark | examples/src/main/python/mllib/random_forest_regression_example.py | 27 | 2527 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Random Forest Regression Example.
"""
from pyspark import SparkContext
# $example on$
from pyspark.mllib.tree import RandomForest, RandomForestModel
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonRandomForestRegressionExample")
# $example on$
# Load and parse the data file into an RDD of LabeledPoint.
data = MLUtils.loadLibSVMFile(sc, 'data/mllib/sample_libsvm_data.txt')
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a RandomForest model.
# Empty categoricalFeaturesInfo indicates all features are continuous.
# Note: Use larger numTrees in practice.
# Setting featureSubsetStrategy="auto" lets the algorithm choose.
model = RandomForest.trainRegressor(trainingData, categoricalFeaturesInfo={},
numTrees=3, featureSubsetStrategy="auto",
impurity='variance', maxDepth=4, maxBins=32)
# Evaluate model on test instances and compute test error
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testMSE = labelsAndPredictions.map(lambda lp: (lp[0] - lp[1]) * (lp[0] - lp[1])).sum() /\
float(testData.count())
print('Test Mean Squared Error = ' + str(testMSE))
print('Learned regression forest model:')
print(model.toDebugString())
# Save and load model
model.save(sc, "target/tmp/myRandomForestRegressionModel")
sameModel = RandomForestModel.load(sc, "target/tmp/myRandomForestRegressionModel")
# $example off$
| apache-2.0 |
sncn-private/imx6-linux | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
jrmontag/Data-Science-45min-Intros | ml-basis-expansion-101/kernel.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| unlicense |
archf/ansible | lib/ansible/plugins/connection/buildah.py | 8 | 6797 | # Based on the docker connection plugin
#
# Connection plugin for building container images using buildah tool
# https://github.com/projectatomic/buildah
#
# Written by: Tomas Tomecek (https://github.com/TomasTomecek)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION:
connection: buildah
short_description: interact with an existing buildah container
description:
- Run commands or put/fetch files to an existing container using buildah tool.
author: Tomas Tomecek (ttomecek@redhat.com)
version_added: 2.4
options:
remote_addr:
description:
- The ID of the container you want to access.
default: inventory_hostname
config:
vars:
- name: ansible_host
remote_user:
description:
- User specified via name or ID which is used to execute commands inside the container.
config:
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
"""
from __future__ import (absolute_import, division, print_function)
import shlex
import shutil
import subprocess
import ansible.constants as C
from ansible.module_utils._text import to_bytes
from ansible.plugins.connection import ConnectionBase, ensure_connect
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__metaclass__ = type
# this _has to be_ named Connection
class Connection(ConnectionBase):
"""
This is a connection plugin for buildah: it uses buildah binary to interact with the containers
"""
# String used to identify this Connection class from other classes
transport = 'buildah'
has_pipelining = True
become_methods = frozenset(C.BECOME_METHODS)
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self._container_id = self._play_context.remote_addr
self._connected = False
# container filesystem will be mounted here on host
self._mount_point = None
# `buildah inspect` doesn't contain info about what the default user is -- if it's not
# set, it's empty
self.user = self._play_context.remote_user
def _set_user(self):
self._buildah(b"config", [b"--user=" + to_bytes(self.user, errors='surrogate_or_strict')])
def _buildah(self, cmd, cmd_args=None, in_data=None):
"""
run buildah executable
:param cmd: buildah's command to execute (str)
:param cmd_args: list of arguments to pass to the command (list of str/bytes)
:param in_data: data passed to buildah's stdin
:return: return code, stdout, stderr
"""
local_cmd = ['buildah', cmd, '--', self._container_id]
if cmd_args:
local_cmd += cmd_args
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
display.vvv("RUN %s" % (local_cmd,), host=self._container_id)
p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate(input=in_data)
stdout = to_bytes(stdout, errors='surrogate_or_strict')
stderr = to_bytes(stderr, errors='surrogate_or_strict')
return p.returncode, stdout, stderr
def _connect(self):
"""
no persistent connection is being maintained, mount container's filesystem
so we can easily access it
"""
super(Connection, self)._connect()
rc, self._mount_point, stderr = self._buildah("mount")
self._mount_point = self._mount_point.strip()
display.vvvvv("MOUNTPOINT %s RC %s STDERR %r" % (self._mount_point, rc, stderr))
self._connected = True
@ensure_connect
def exec_command(self, cmd, in_data=None, sudoable=False):
""" run specified command in a running OCI container using buildah """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
cmd_bytes = to_bytes(cmd, errors='surrogate_or_strict')
cmd_args_list = shlex.split(cmd_bytes)
rc, stdout, stderr = self._buildah("run", cmd_args_list)
display.vvvvv("STDOUT %r STDERR %r" % (stderr, stderr))
return rc, stdout, stderr
def put_file(self, in_path, out_path):
""" Place a local file located in 'in_path' inside container at 'out_path' """
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._container_id)
real_out_path = self._mount_point + to_bytes(out_path, errors='surrogate_or_strict')
shutil.copyfile(
to_bytes(in_path, errors='surrogate_or_strict'),
to_bytes(real_out_path, errors='surrogate_or_strict')
)
# alternatively, this can be implemented using `buildah copy`:
# rc, stdout, stderr = self._buildah(
# "copy",
# [to_bytes(in_path, errors='surrogate_or_strict'),
# to_bytes(out_path, errors='surrogate_or_strict')]
# )
def fetch_file(self, in_path, out_path):
""" obtain file specified via 'in_path' from the container and place it at 'out_path' """
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._container_id)
real_in_path = self._mount_point + to_bytes(in_path, errors='surrogate_or_strict')
shutil.copyfile(
to_bytes(real_in_path, errors='surrogate_or_strict'),
to_bytes(out_path, errors='surrogate_or_strict')
)
def close(self):
""" unmount container's filesystem """
super(Connection, self).close()
rc, stdout, stderr = self._buildah("umount")
display.vvvvv("RC %s STDOUT %r STDERR %r" % (rc, stdout, stderr))
self._connected = False
| gpl-3.0 |
anastasia-tarasova/indy-sdk | docs/how-tos/rotate-key/python/step4.py | 4 | 1548 | print_log('\n13. Reading new verkey from wallet\n')
verkey_in_wallet = await did.key_for_local_did(wallet_handle, trust_anchor_did)
print_log('Trust Anchor Verkey in wallet: ', verkey_in_wallet)
print_log('\n14. Building GET_NYM request to get Trust Anchor verkey\n')
get_nym_request = await ledger.build_get_nym_request(trust_anchor_did, trust_anchor_did)
print_log('Get NYM request:')
pprint.pprint(json.loads(get_nym_request))
print_log('\n15. Sending GET_NYM request to ledger\n')
get_nym_response_json = await ledger.submit_request(pool_handle, get_nym_request)
get_nym_response = json.loads(get_nym_response_json)
print_log('GET NYM response:')
pprint.pprint(get_nym_response)
print_log('\n16. Comparing Trust Anchor verkeys: written by Steward (original), '
'current in wallet and current from ledger\n')
print_log('Written by Steward: ', trust_anchor_verkey)
print_log('Current in wallet: ', verkey_in_wallet)
verkey_from_ledger = json.loads(get_nym_response['result']['data'])['verkey']
print_log('Current from ledger: ', verkey_from_ledger)
print_log('Matching: ', verkey_from_ledger == verkey_in_wallet != trust_anchor_verkey)
# Do some cleanup.
print_log('\n17. Closing wallet and pool\n')
await wallet.close_wallet(wallet_handle)
await pool.close_pool_ledger(pool_handle)
print_log('\n18. Deleting created wallet\n')
await wallet.delete_wallet(wallet_config, wallet_credentials)
print_log('\n19. Deleting pool ledger config')
await pool.delete_pool_ledger_config(pool_name)
| apache-2.0 |
babelsberg/babelsberg-r | topaz/objects/constraintobject.py | 1 | 12412 | from rpython.rlib import jit
from topaz.celldict import CellDict, VersionTag
from topaz.constraintinterpreter import ConstrainedVariable
from topaz.module import ClassDef, ModuleDef
from topaz.objects.hashobject import W_HashObject
from topaz.objects.objectobject import W_Object, W_RootObject
from topaz.objects.procobject import W_ProcObject
from topaz.utils.cache import Cache
# Marker class for constraint solver objects
class W_ConstraintMarkerObject(W_Object):
_attrs_ = []
classdef = ClassDef("ConstraintObject", W_Object.classdef)
def api(classdef, name):
@classdef.method(name)
def method(self, space, args_w):
raise space.error(
space.w_NotImplementedError,
"%s should have implemented %s" % (
space.getclass(self).name,
name
)
)
api(classdef, "begin_assign") # assert equality constraint
api(classdef, "assign") # run solver
api(classdef, "end_assign") # remove equality constraint
api(classdef, "value") # current value
api(classdef, "readonly!") # make read-only
api(classdef, "writable!") # make writable
api(classdef, "finalize") # remove from solver internal structure
@classdef.singleton_method("allocate")
def singleton_method_allocate(self, space):
return W_ConstraintMarkerObject(space, self)
@classdef.method("and")
def method_and(self, space, w_rhs):
w_constraint = space.current_constraint()
if w_constraint:
w_constraint.add_constraint_object(self)
return w_rhs
else:
return space.send(
self,
"method_missing",
[space.newstr_fromstr("and"), w_rhs]
)
@classdef.method("or")
def method_or(self, space, w_rhs):
w_constraint = space.current_constraint()
if w_constraint:
w_constraint.add_constraint_object(self)
return self
else:
return space.send(
self,
"method_missing",
[space.newstr_fromstr("or"), w_rhs]
)
class W_ConstraintObject(W_ConstraintMarkerObject):
_attrs_ = ["w_strength", "block", "enabled",
"constraint_objects_w", "constraint_variables_w",
"assignments_w", "w_solver", "last_cvar", "is_enum_variable"]
classdef = ClassDef("Constraint", W_ConstraintMarkerObject.classdef)
def __init__(self, space):
W_Object.__init__(self, space)
self.w_strength = None
self.block = None
self.enabled = False
self.constraint_objects_w = []
self.constraint_variables_w = []
self.assignments_w = []
self.last_cvar = None
def get_constraint_objects(self):
return self.constraint_objects_w
def add_constraint_object(self, w_value):
self.constraint_objects_w.append(w_value)
def remove_constraint_objects(self):
del self.constraint_objects_w[:]
def has_constraint_objects(self):
return len(self.constraint_objects_w) > 0
def add_constraint_variable(self, c_var):
if c_var not in self.constraint_variables_w:
self.constraint_variables_w.append(c_var)
self.last_cvar = c_var
def last_constraint_variable(self):
return self.last_cvar
def add_assignment(self, space, c_var, w_constraint):
if c_var in self.assignments_w:
raise space.error(
space.w_RuntimeError,
"multiply assigned variable in constraint execution"
)
self.assignments_w.append(c_var)
@jit.elidable
def isidentity(self):
for w_constraint_object in self.constraint_objects_w:
if not isinstance(w_constraint_object, W_IdentityConstraintObject):
return False
return True
@classdef.method("enable")
def method_enable(self, space):
if not self.enabled:
for w_constraint_object in self.constraint_objects_w:
self.enable_constraint_object(space, w_constraint_object)
self.enabled = True
return space.w_true
else:
return space.w_nil
def enable_constraint_object(self, space, w_constraint_object):
if w_constraint_object is space.w_true:
w_stderr = space.find_const(space.w_object, "STDERR")
assert isinstance(w_stderr, W_RootObject)
space.send(
w_stderr,
"puts",
[space.newstr_fromstr("Warning: Constraint expression returned true, re-running it whenever the value changes")]
)
elif w_constraint_object is space.w_false or w_constraint_object is space.w_nil:
raise space.error(
space.w_ArgumentError,
"constraint block returned false-y, cannot assert that (did you `require' your solver?)"
)
elif not space.respond_to(w_constraint_object, "enable"):
raise space.error(
space.w_TypeError,
("constraint block did an object (%s:%s) that doesn't respond to #enable " +
"(did you `require' your solver?)") % (
space.any_to_s(w_constraint_object),
space.getclass(w_constraint_object).name
)
)
else:
arg_w = [] if self.w_strength is None else [self.w_strength]
space.send(w_constraint_object, "enable", arg_w)
@classdef.method("disable")
def method_disable(self, space):
if self.enabled:
for w_constraint_object in self.constraint_objects_w:
if space.respond_to(w_constraint_object, "disable"):
space.send(w_constraint_object, "disable")
self.enabled = False
return space.w_true
else:
return space.w_nil
@classdef.method("recalculate")
def method_recalculate(self, space, w_c_cause):
if self.enabled:
if self.isidentity():
for w_constraint_object in self.constraint_objects_w:
assert isinstance(w_constraint_object, W_IdentityConstraintObject)
if (w_constraint_object.c_this is w_c_cause or
w_constraint_object.c_that is w_c_cause):
# do not recalculate if an assignment to one
# of the identity-constrained variables is
# happening
return
space.send(self, "disable")
del self.constraint_objects_w[:]
self.run_predicate(space)
space.send(self, "enable")
def run_predicate(self, space):
with space.constraint_construction(self):
w_constraint_object = space.invoke_block(self.block, [])
self.add_constraint_object(w_constraint_object)
@classdef.method("primitive_constraints")
def method_solver_constraints(self, space):
return space.newarray(self.constraint_objects_w[:])
@classdef.method("constraint_variables")
def method_solver_constraints(self, space):
vars_w = []
for w_var in self.constraint_variables_w:
if w_var._is_solveable(self.get_solver()):
vars_w.append(w_var._get_external_variable(self.get_solver()))
return space.newarray(vars_w)
@classdef.method("predicate")
def method_constraint_block(self, space):
return self.block
@classdef.method("strength")
def method_strength(self, space):
return self.w_strength
@classdef.method("strength=")
def method_strength(self, space, w_strength):
enabled = self.enabled
if enabled:
space.send(self, "disable")
self.w_strength = w_strength
if enabled:
space.send(self, "enable")
return self.w_strength
@classdef.method("solver")
def method_solver(self, space):
return self.w_solver or space.w_nil
@classdef.method("initialize")
def method_initialize(self, space, w_strength=None, w_options=None, block=None):
if not block:
raise space.error(space.w_ArgumentError, "no constraint predicate given")
if self.block:
raise space.error(space.w_ArgumentError, "cannot re-initialize Constraint")
if w_strength and space.is_kind_of(w_strength, space.w_hash):
w_options = w_strength
w_strength = space.send(w_strength, "[]", [space.newsymbol("priority")])
if w_strength is space.w_nil:
w_strength = None
self.block = block
self.w_strength = w_strength
self.w_solver = None
if w_options:
if space.is_true(space.send(w_options, "has_key?", [space.newsymbol("solver")])):
self.set_solver(space, space.send(w_options, "[]", [space.newsymbol("solver")]))
self.run_predicate(space)
return self
def set_solver(self, space, w_solver):
assert self.w_solver is None
self.w_solver = w_solver
for c_var in self.constraint_variables_w:
c_var._set_solver_for_unbound_constraint(self, w_solver)
def get_solver(self):
return self.w_solver
@classdef.singleton_method("allocate")
def singleton_method_allocate(self, space):
return W_ConstraintObject(space)
@classdef.method("value")
def method_return_value(self, space):
# last added constraint object is the return value
return self.constraint_objects_w[-1]
class W_IdentityConstraintObject(W_ConstraintMarkerObject):
_attrs_ = ["enabled", "c_this", "c_that"]
classdef = ClassDef("IdentityConstraint", W_ConstraintMarkerObject.classdef)
@classdef.method("initialize")
def method_initialize(self, space, w_this, w_that, w_c_this, w_c_that):
w_current_constraint = space.current_constraint()
if not w_current_constraint:
raise space.error(
space.w_RuntimeError,
"Identity constraints must be created in constraint expressions"
)
if w_c_this is space.w_nil or w_c_that is space.w_nil:
raise space.error(
space.w_ArgumentError,
"could not find two variables with an identity to constraint. Maybe one or both sides are not calculated from variables?"
)
assert isinstance(w_c_this, ConstrainedVariable)
assert isinstance(w_c_that, ConstrainedVariable)
if ((w_c_this.is_solveable(space) and w_this is not w_c_this.get_external_variable(space)) or
(not w_c_this.is_solveable(space) and w_this is not space.get_value(w_c_this))):
raise space.error(
space.w_ArgumentError,
"the value returned from the left hand side of the identity expression cannot be identity constrained (it may be a calculated value)"
)
if ((w_c_that.is_solveable(space) and w_that is not w_c_that.get_external_variable(space)) or
(not w_c_that.is_solveable(space) and w_that is not space.get_value(w_c_that))):
raise space.error(
space.w_ArgumentError,
"the value returned from the right hand side of the identity expression cannot be identity constrained (it may be a calculated value)"
)
if not w_current_constraint.isidentity():
raise space.error(
space.w_RuntimeError,
"identity constraints are not valid in combination with other constraints in the same constraint expression"
)
self.enabled = False
self.c_this = w_c_this
self.c_that = w_c_that
@classdef.method("enable")
def method_enable(self, space):
if not self.enabled:
self.c_that.constrain_identity(space, self.c_this)
self.enabled = True
@classdef.method("disable")
def method_disable(self, space):
if self.enabled:
self.c_that.unconstrain_identity(space, self.c_this)
self.enabled = False
@classdef.singleton_method("allocate")
def singleton_method_allocate(self, space):
return W_IdentityConstraintObject(space)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.