repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
ghchinoy/tensorflow | tensorflow/contrib/distributions/python/ops/relaxed_bernoulli.py | 35 | 9112 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The RelaxedBernoulli distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import logistic
from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid import Sigmoid
# Bijectors must be directly imported because `remove_undocumented` prevents
# individual file imports.
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class RelaxedBernoulli(transformed_distribution.TransformedDistribution):
"""RelaxedBernoulli distribution with temperature and logits parameters.
The RelaxedBernoulli is a distribution over the unit interval (0,1), which
continuously approximates a Bernoulli. The degree of approximation is
controlled by a temperature: as the temperature goes to 0 the
RelaxedBernoulli becomes discrete with a distribution described by the
`logits` or `probs` parameters, as the temperature goes to infinity the
RelaxedBernoulli becomes the constant distribution that is identically 0.5.
The RelaxedBernoulli distribution is a reparameterized continuous
distribution that is the binary special case of the RelaxedOneHotCategorical
distribution (Maddison et al., 2016; Jang et al., 2016). For details on the
binary special case see the appendix of Maddison et al. (2016) where it is
referred to as BinConcrete. If you use this distribution, please cite both
papers.
Some care needs to be taken for loss functions that depend on the
log-probability of RelaxedBernoullis, because computing log-probabilities of
the RelaxedBernoulli can suffer from underflow issues. In many case loss
functions such as these are invariant under invertible transformations of
the random variables. The KL divergence, found in the variational autoencoder
loss, is an example. Because RelaxedBernoullis are sampled by a Logistic
random variable followed by a `tf.sigmoid` op, one solution is to treat
the Logistic as the random variable and `tf.sigmoid` as downstream. The
KL divergences of two Logistics, which are always followed by a `tf.sigmoid`
op, is equivalent to evaluating KL divergences of RelaxedBernoulli samples.
See Maddison et al., 2016 for more details where this distribution is called
the BinConcrete.
An alternative approach is to evaluate Bernoulli log probability or KL
directly on relaxed samples, as done in Jang et al., 2016. In this case,
guarantees on the loss are usually violated. For instance, using a Bernoulli
KL in a relaxed ELBO is no longer a lower bound on the log marginal
probability of the observation. Thus care and early stopping are important.
#### Examples
Creates three continuous distributions, which approximate 3 Bernoullis with
probabilities (0.1, 0.5, 0.4). Samples from these distributions will be in
the unit interval (0,1).
```python
temperature = 0.5
p = [0.1, 0.5, 0.4]
dist = RelaxedBernoulli(temperature, probs=p)
```
Creates three continuous distributions, which approximate 3 Bernoullis with
logits (-2, 2, 0). Samples from these distributions will be in
the unit interval (0,1).
```python
temperature = 0.5
logits = [-2, 2, 0]
dist = RelaxedBernoulli(temperature, logits=logits)
```
Creates three continuous distributions, whose sigmoid approximate 3 Bernoullis
with logits (-2, 2, 0).
```python
temperature = 0.5
logits = [-2, 2, 0]
dist = Logistic(logits/temperature, 1./temperature)
samples = dist.sample()
sigmoid_samples = tf.sigmoid(samples)
# sigmoid_samples has the same distribution as samples from
# RelaxedBernoulli(temperature, logits=logits)
```
Creates three continuous distributions, which approximate 3 Bernoullis with
logits (-2, 2, 0). Samples from these distributions will be in
the unit interval (0,1). Because the temperature is very low, samples from
these distributions are almost discrete, usually taking values very close to 0
or 1.
```python
temperature = 1e-5
logits = [-2, 2, 0]
dist = RelaxedBernoulli(temperature, logits=logits)
```
Creates three continuous distributions, which approximate 3 Bernoullis with
logits (-2, 2, 0). Samples from these distributions will be in
the unit interval (0,1). Because the temperature is very high, samples from
these distributions are usually close to the (0.5, 0.5, 0.5) vector.
```python
temperature = 100
logits = [-2, 2, 0]
dist = RelaxedBernoulli(temperature, logits=logits)
```
Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The Concrete Distribution:
A Continuous Relaxation of Discrete Random Variables. 2016.
Eric Jang, Shixiang Gu, and Ben Poole. Categorical Reparameterization with
Gumbel-Softmax. 2016.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
temperature,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="RelaxedBernoulli"):
"""Construct RelaxedBernoulli distributions.
Args:
temperature: An 0-D `Tensor`, representing the temperature
of a set of RelaxedBernoulli distributions. The temperature should be
positive.
logits: An N-D `Tensor` representing the log-odds
of a positive event. Each entry in the `Tensor` parametrizes
an independent RelaxedBernoulli distribution where the probability of an
event is sigmoid(logits). Only one of `logits` or `probs` should be
passed in.
probs: An N-D `Tensor` representing the probability of a positive event.
Each entry in the `Tensor` parameterizes an independent Bernoulli
distribution. Only one of `logits` or `probs` should be passed in.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: If both `probs` and `logits` are passed, or if neither.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[logits, probs, temperature]) as name:
with ops.control_dependencies([check_ops.assert_positive(temperature)]
if validate_args else []):
self._temperature = array_ops.identity(temperature, name="temperature")
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits, probs=probs, validate_args=validate_args)
super(RelaxedBernoulli, self).__init__(
distribution=logistic.Logistic(
self._logits / self._temperature,
1. / self._temperature,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name + "/Logistic"),
bijector=Sigmoid(validate_args=validate_args),
validate_args=validate_args,
name=name)
self._parameters = parameters
@staticmethod
def _param_shapes(sample_shape):
return {"logits": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def temperature(self):
"""Distribution parameter for the location."""
return self._temperature
@property
def logits(self):
"""Log-odds of `1`."""
return self._logits
@property
def probs(self):
"""Probability of `1`."""
return self._probs
| apache-2.0 |
denisff/python-for-android | python3-alpha/python3-src/Lib/lib2to3/tests/support.py | 146 | 1713 | """Support code for test_*.py files"""
# Author: Collin Winter
# Python imports
import unittest
import sys
import os
import os.path
import re
from textwrap import dedent
# Local imports
from lib2to3 import pytree, refactor
from lib2to3.pgen2 import driver
test_dir = os.path.dirname(__file__)
proj_dir = os.path.normpath(os.path.join(test_dir, ".."))
grammar_path = os.path.join(test_dir, "..", "Grammar.txt")
grammar = driver.load_grammar(grammar_path)
driver = driver.Driver(grammar, convert=pytree.convert)
def parse_string(string):
return driver.parse_string(reformat(string), debug=True)
def run_all_tests(test_mod=None, tests=None):
if tests is None:
tests = unittest.TestLoader().loadTestsFromModule(test_mod)
unittest.TextTestRunner(verbosity=2).run(tests)
def reformat(string):
return dedent(string) + "\n\n"
def get_refactorer(fixer_pkg="lib2to3", fixers=None, options=None):
"""
A convenience function for creating a RefactoringTool for tests.
fixers is a list of fixers for the RefactoringTool to use. By default
"lib2to3.fixes.*" is used. options is an optional dictionary of options to
be passed to the RefactoringTool.
"""
if fixers is not None:
fixers = [fixer_pkg + ".fixes.fix_" + fix for fix in fixers]
else:
fixers = refactor.get_fixers_from_package(fixer_pkg + ".fixes")
options = options or {}
return refactor.RefactoringTool(fixers, options, explicit=True)
def all_project_files():
for dirpath, dirnames, filenames in os.walk(proj_dir):
for filename in filenames:
if filename.endswith(".py"):
yield os.path.join(dirpath, filename)
TestCase = unittest.TestCase
| apache-2.0 |
timm/timmnix | pypy3-v5.5.0-linux64/lib-python/3/test/test_devpoll.py | 3 | 3573 | # Test case for the select.devpoll() function
# Initial tests are copied as is from "test_poll.py"
import os, select, random, unittest, sys
from test.support import TESTFN, run_unittest, cpython_only
try:
select.devpoll
except AttributeError:
raise unittest.SkipTest("select.devpoll not defined -- skipping test_devpoll")
def find_ready_matching(ready, flag):
match = []
for fd, mode in ready:
if mode & flag:
match.append(fd)
return match
class DevPollTests(unittest.TestCase):
def test_devpoll1(self):
# Basic functional test of poll object
# Create a bunch of pipe and test that poll works with them.
p = select.devpoll()
NUM_PIPES = 12
MSG = b" This is a test."
MSG_LEN = len(MSG)
readers = []
writers = []
r2w = {}
w2r = {}
for i in range(NUM_PIPES):
rd, wr = os.pipe()
p.register(rd)
p.modify(rd, select.POLLIN)
p.register(wr, select.POLLOUT)
readers.append(rd)
writers.append(wr)
r2w[rd] = wr
w2r[wr] = rd
bufs = []
while writers:
ready = p.poll()
ready_writers = find_ready_matching(ready, select.POLLOUT)
if not ready_writers:
self.fail("no pipes ready for writing")
wr = random.choice(ready_writers)
os.write(wr, MSG)
ready = p.poll()
ready_readers = find_ready_matching(ready, select.POLLIN)
if not ready_readers:
self.fail("no pipes ready for reading")
self.assertEqual([w2r[wr]], ready_readers)
rd = ready_readers[0]
buf = os.read(rd, MSG_LEN)
self.assertEqual(len(buf), MSG_LEN)
bufs.append(buf)
os.close(r2w[rd]) ; os.close(rd)
p.unregister(r2w[rd])
p.unregister(rd)
writers.remove(r2w[rd])
self.assertEqual(bufs, [MSG] * NUM_PIPES)
def test_timeout_overflow(self):
pollster = select.devpoll()
w, r = os.pipe()
pollster.register(w)
pollster.poll(-1)
self.assertRaises(OverflowError, pollster.poll, -2)
self.assertRaises(OverflowError, pollster.poll, -1 << 31)
self.assertRaises(OverflowError, pollster.poll, -1 << 64)
pollster.poll(0)
pollster.poll(1)
pollster.poll(1 << 30)
self.assertRaises(OverflowError, pollster.poll, 1 << 31)
self.assertRaises(OverflowError, pollster.poll, 1 << 63)
self.assertRaises(OverflowError, pollster.poll, 1 << 64)
def test_events_mask_overflow(self):
pollster = select.devpoll()
w, r = os.pipe()
pollster.register(w)
# Issue #17919
self.assertRaises(OverflowError, pollster.register, 0, -1)
self.assertRaises(OverflowError, pollster.register, 0, 1 << 64)
self.assertRaises(OverflowError, pollster.modify, 1, -1)
self.assertRaises(OverflowError, pollster.modify, 1, 1 << 64)
@cpython_only
def test_events_mask_overflow_c_limits(self):
from _testcapi import USHRT_MAX
pollster = select.devpoll()
w, r = os.pipe()
pollster.register(w)
# Issue #17919
self.assertRaises(OverflowError, pollster.register, 0, USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.modify, 1, USHRT_MAX + 1)
def test_main():
run_unittest(DevPollTests)
if __name__ == '__main__':
test_main()
| mit |
cloudcomputinghust/IoT | platform_manager/api/views.py | 1 | 16318 | from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from rest_framework.views import APIView
from . import models
# from .utils import restrict_search
from django.http import HttpResponse
import json
import http.client
import os.path
from wsgiref.util import FileWrapper
from .models import *
from influxdb import InfluxDBClient
KUBE_API_DOMAIN = '128.199.242.5:8080'
SENSOR_ORDINATOR_SERVICE_HOST = 'localhost:9090'
CONTENT_TYPE = {'JSON': "application/json", 'TEXT': 'text/plain'}
RESPONSE_JSON_TYPE_DEFINE = 'JSON'
RESPONSE_TEXT_TYPE_DEFINE = 'TEXT'
# ------------------------------------------------------------------------------------
# Exposed service
# ------------------------------------------------------------------------------------
# CRUD with platform
class PlatformView(APIView):
@csrf_exempt
def get(self, request):
message = self._get_platform(request)
return HttpResponse(json.dumps({"status": "ok", "message": message}), content_type="application/json")
@csrf_exempt
def post(self, request):
message = self._deploy_platform()
return HttpResponse(json.dumps({"status": "ok", "message": message}), content_type="application/json")
# TODO
@csrf_exempt
def put(self, request):
pass
@csrf_exempt
def delete(self, request):
if request.DELETE.get('platform_id'):
platform_id = request.DELETE.get('sensor_id')
message = self._delete_sensor(platform_id=platform_id)
return HttpResponse(json.dumps({"status": "ok", "message": message}), content_type="application/json")
def _get_platform(self, request):
if request.GET.get('platform_id'):
platform_id = request.GET.getid('platform_id')
platform_detail = PlatformDeploymentModel.objects.get_platform_detail_by_id(platform_id=platform_id)
if platform_detail:
return HttpResponse(json.dumps({"status": "ok", "message": platform_detail}), content_type='text/plain')
return success_response(message="This platform is not existed")
else:
platform_details = PlatformDeploymentModel.objects.get_all_platform_detail()
return HttpResponse(json.dumps({"status": "ok", "message": platform_details}), content_type='text/plain')
def _deploy_platform(self):
return deploy_platform()
def _delete_sensor(self, platform_id):
# /api/v1/namespaces/kube-system/replicationcontrollers/openhab-platform
namespace = 'kube-system'
uri_api = '/api/v1/namespaces/{namespace}/replicationcontrollers/{platform_id}'.format(namespace=namespace,
platform_id=platform_id)
con = http.client.HTTPConnection(KUBE_API_DOMAIN)
header = {"Content-type": "application/json"}
con.request('DELETE', uri_api, '', header)
response = con.getresponse()
raw = response.read().decode()
return raw
def _scale_platform(self):
return scale_platform()
class PlatformRegistration(APIView):
def get(self, request):
'''
register platform (temp use this replace for post method)
:param request:
:return:
'''
data = dict(request.GET)
if data.get('platform'):
### register to PR
platform_detail = data['platform'][0].split(',')
cluster_ip = platform_detail[0]
platform_id = platform_detail[1]
# add to db
item = PlatformDeploymentModel(platform_id=platform_id, platform_ip=cluster_ip)
item.save()
### register to PA
message = assign_sensor_for_platform(platform_id=platform_id)
return success_response(message=message)
# get all registration
message = PlatformAssignmentModel.objects.get_all_assignment()
return success_response(message)
def post(self, request):
'''
register platform
:param request:
:return:
'''
return
# TODO
class PlatformAssignment(APIView):
def put(self, request):
assign_status = request.PUT.get('assign_status')[0]
platform_detail = request.PUT.get('platform')[0].split(',')
sensor_id = request.PUT.get('sensor_id')[0]
platform_id = platform_detail[1]
# post_receive_platform_register
# update db, change temp -> active
message = PlatformAssignmentModel.objects.update_assigment_status(sensor_id=sensor_id, platform_id=platform_id,
assign_status=1)
return success_response(message)
def post(self, request):
# post_receive_sensor_register
data = dict(request.POST)
sensor_id = data['sensor_id'][0]
# create assignment
item = PlatformAssignmentModel(sensor_id=sensor_id, platform_id='')
item.save()
return success_response()
def get(self, request):
# get_get_sensor_assign_with_platform
'''
return sensor config assigned with platform
:param: platform id
:return: file config
'''
platform_detail = request.GET.get('platform').split(',')
platform_id = platform_detail[1]
resource_type = request.GET.get('resource')
# Goi len platform assign tim nhung thang co temp assign platform, roi tra ve sensor define
if resource_type and resource_type == 'config_file':
sensor_id = request.GET.get('sensor_id')
# call to co-ordinator de lay config
uri = '/sensor/register?sensor_id={sensor_id}&resource=defined_file'.format(sensor_id=sensor_id)
# localhost:9090/sensor/register?sensor_id=sensor_1&resource=defined_file
con = http.client.HTTPConnection(SENSOR_ORDINATOR_SERVICE_HOST)
header = {"Content-type": "text/plain"}
con.request('GET', uri, '', header)
response = con.getresponse()
raw = response.read()
response = HttpResponse(raw, content_type='text/plain')
response['Content-Length'] = len(raw)
return response
# Goi len platform assign tim nhung thang co temp assign platform, roi tra ve sensor id
if resource_type and resource_type == 'sensor_id':
message = PlatformAssignmentModel.objects.get_sensor_id_assigned_temp(status=0, platform_id=platform_id)
response = HttpResponse(message, content_type='text/plain')
response['Content-Length'] = len(message)
return response
action = request.GET.get('action')
if action and action == 'register_succeed':
sensor_id = request.GET.get('sensor_id')
if platform_id and sensor_id:
# update db, change temp -> active
message = PlatformAssignmentModel.objects.update_assigment_status(sensor_id=sensor_id,
platform_id=platform_id,
assign_status=1)
return success_response(message=message)
# ------------------------------------------------------------------------------------
# Daemon service
# ------------------------------------------------------------------------------------
# TODO
class Scheduler:
def engine_scheduler(self):
# TODO: - collect platform monitored metric - scale and assign node for platform - re-assign platform for sensor
return
def _collect_metric_data(self):
client = InfluxDBClient('188.166.238.158', 32485, 'root', 'root', 'k8s')
result = client.query('SELECT sum("value") FROM "memory/usage" WHERE "type" = \'node\' AND time > now() - 1d GROUP BY time(1d), "nodename" fill(null);')
# ('memory/usage', {'nodename': '128.199.242.5'}) - - [{'sum': 1275429384192, 'time': '2017-02-25T00:00:00Z'},
# {'sum': 1038484692992, 'time': '2017-02-26T00:00:00Z'}]
return
def _get_platform(self):
pass
def _deploy_platform(self):
return deploy_platform()
def _deploy_scale_platform(self):
return scale_platform()
def _delete_platform(self):
pass
def _scale_platform(self):
pass
# ------------------------------------------------------------------------------------
# Utils function
# ------------------------------------------------------------------------------------
def deploy_platform():
namespace = 'kube-system'
platform_name = 'openhab-platform'
platform_config = 'openhab-cfg'
co_ordinator_config_name = 'CO_ORDINATOR_DOMAIN'
node_selector = {"fog_node": "worker_1"}
deploy_api = '/api/v1/namespaces/{namespace}/replicationcontrollers'.format(namespace=namespace)
con = http.client.HTTPConnection(KUBE_API_DOMAIN)
header = {"Content-type": "application/json"}
body = {
"kind": "ReplicationController",
"apiVersion": "v1",
"metadata": {
"name": platform_name,
"namespace": "kube-system"
},
"spec": {
"replicas": 1,
"selector": {"app": platform_name},
"template": {
"metadata": {
"name": platform_name,
"labels": {"app": platform_name}
},
"spec": {
"containers": [{
"name": platform_name,
"image": "huanphan/openhab:0.3",
# "ports": [
# {
# "hostPort": 8080,
# "containerPort": 8080
# }
# ],
"volumeMounts": [
{
"name": platform_config,
"mountPath": "/openhab/configurations/openhab.cfg",
"subPath": "openhab.cfg"
}
],
"env": [
{
"name": co_ordinator_config_name,
"valueFrom": {
"configMapKeyRef": {
"name": "co-ordinator-config",
"key": "co-ordinator.domain"
}
}
}
]
}],
"volumes": [{
"name": platform_config,
"configMap": {
"name": platform_config
}
}],
"restartPolicy": "Always",
"nodeSelector": node_selector
}
}
}
}
con.request('POST', deploy_api, json.dumps(body).encode('utf-8'), header)
response = con.getresponse()
raw = response.read().decode()
return raw
def scale_platform():
# get current replicas
platform_name = 'openhab-platform'
namespace = 'kube-system'
platform_config = 'openhab-cfg'
co_ordinator_config_name = 'CO_ORDINATOR_DOMAIN'
node_selector = {"fog_node": "worker_1"}
con = http.client.HTTPConnection(KUBE_API_DOMAIN)
header = {"Content-type": "application/json"}
uri = '/api/v1/namespaces/{namespace}/replicationcontrollers/{name}/scale'.format(namespace=namespace,
name=platform_name)
con.request('GET', uri, '', header)
response = con.getresponse()
data = json.loads(response.read().decode())
# if there aren't any platform instance, then we create a new one
if not data.get('spec', ''):
print(deploy_platform())
return 1
current_replicas = data['spec']['replicas']
# scale
# uri = '/api/v1/proxy/namespaces/{namespace}/services/kubernetes-dashboard' \
# '/api/v1/replicationcontroller/{namespace}/{name}/update/pod'.format(namespace=namespace,
# name=platform_name)
# body = {
# 'replicas': current_replicas + 1
# }
uri = '/api/v1/namespaces/{namespace}/replicationcontrollers/{platform_name}'.format(namespace=namespace,
platform_name=platform_name)
body = {
"kind": "ReplicationController",
"apiVersion": "v1",
"metadata": {
"name": platform_name,
"namespace": "kube-system"
},
"spec": {
"replicas": int(current_replicas+1),
"selector": {"app": platform_name},
"template": {
"metadata": {
"name": platform_name,
"labels": {"app": platform_name}
},
"spec": {
"containers": [{
"name": platform_name,
"image": "huanphan/openhab:0.3",
# "ports": [
# {
# "hostPort": 8080,
# "containerPort": 8080
# }
# ],
"volumeMounts": [
{
"name": platform_config,
"mountPath": "/openhab/configurations/openhab.cfg",
"subPath": "openhab.cfg"
}
],
"env": [
{
"name": co_ordinator_config_name,
"valueFrom": {
"configMapKeyRef": {
"name": "co-ordinator-config",
"key": "co-ordinator.domain"
}
}
}
]
}],
"volumes": [{
"name": platform_config,
"configMap": {
"name": platform_config
}
}],
"restartPolicy": "Always",
"nodeSelector": node_selector
}
}
}
}
con.request('POST', uri, json.dumps(body).encode('utf-8'), header)
response = con.getresponse()
if response.read().decode() == '':
return current_replicas + 1
return False
def assign_sensor_for_platform(platform_id):
# TODO: - assign platform for sensor by two algorithm: + round-robin + first-in first-serve
# Get sensor id
sensor_id = PlatformAssignmentModel.objects.get_sensor_not_assign()
# update tam thoi
message = PlatformAssignmentModel.objects.update_assignment_by_sensor_id(sensor_id=sensor_id,
platform_id=platform_id, assign_status=0)
return message
# TODO
def assign_platform_on_node():
# collect monitor metric from node
# schedule
return
def success_response(message='No message'):
return HttpResponse(json.dumps({"status": "ok", "message": message}), content_type="application/json")
def error_response(message='No message'):
return HttpResponse(json.dumps({"status": "error", "message": message}), content_type="application/json")
# client = InfluxDBClient('188.166.238.158', 32485, 'root', 'root', 'k8s')
# client.query('SELECT sum("value") FROM "memory/usage" WHERE "type" = \'node\' AND "nodename" = \'128.199.242.5\' AND time > now() - 1d GROUP BY time(10m), "nodename" fill(null);') | mit |
lucashmorais/x-Bench | mozmill-env/python/Lib/doctest.py | 44 | 105210 | # Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'SKIP',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re
import unittest, difflib, pdb, tempfile
import warnings
from StringIO import StringIO
from collections import namedtuple
TestResults = namedtuple('TestResults', 'failed attempted')
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
SKIP = register_optionflag('SKIP')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
SKIP |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _load_testfile(filename, package, module_relative):
if module_relative:
package = _normalize_module(package, 3)
filename = _module_relative_path(package, filename)
if hasattr(package, '__loader__'):
if hasattr(package.__loader__, 'get_data'):
file_contents = package.__loader__.get_data(filename)
# get_data() opens files as 'rb', so one must do the equivalent
# conversion as universal newlines would do.
return file_contents.replace(os.linesep, '\n'), filename
with open(filename, 'U') as f:
return f.read(), filename
# Use sys.stdout encoding for ouput.
_encoding = getattr(sys.__stdout__, 'encoding', None) or 'utf-8'
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in `s`, and return the result.
If the string `s` is Unicode, it is encoded using the stdout
encoding and the `backslashreplace` error handler.
"""
if isinstance(s, unicode):
s = s.encode(_encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
if not self.buf:
# Reset it to an empty string, to make sure it's not unicode.
self.buf = ''
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
def _strip_exception_details(msg):
# Support for IGNORE_EXCEPTION_DETAIL.
# Get rid of everything except the exception name; in particular, drop
# the possibly dotted module path (if any) and the exception message (if
# any). We assume that a colon is never part of a dotted name, or of an
# exception name.
# E.g., given
# "foo.bar.MyError: la di da"
# return "MyError"
# Or for "abc.def" or "abc.def:\n" return "def".
start, end = 0, len(msg)
# The exception name must appear on the first line.
i = msg.find("\n")
if i >= 0:
end = i
# retain up to the first colon (if any)
i = msg.find(':', 0, end)
if i >= 0:
end = i
# retain just the exception name
i = msg.rfind('.', 0, end)
if i >= 0:
start = i+1
return msg[start: end]
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
pdb.Pdb.__init__(self, stdout=out)
# still use input() to get user input
self.use_rawinput = 1
def set_trace(self, frame=None):
self.__debugger_used = True
if frame is None:
frame = sys._getframe().f_back
pdb.Pdb.set_trace(self, frame)
def set_continue(self):
# Calling set_continue unconditionally would break unit test
# coverage reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError, 'Expected a module: %r' % module
if path.startswith('/'):
raise ValueError, 'Module-relative files may not have absolute paths'
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that precede the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.source == other.source and \
self.want == other.want and \
self.lineno == other.lineno and \
self.indent == other.indent and \
self.options == other.options and \
self.exc_msg == other.exc_msg
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.source, self.want, self.lineno, self.indent,
self.exc_msg))
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.examples == other.examples and \
self.docstring == other.docstring and \
self.globs == other.globs and \
self.name == other.name and \
self.filename == other.filename and \
self.lineno == other.lineno
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.docstring, self.name, self.filename, self.lineno))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.+$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
if module is not None:
# Supply the module globals in case the module was
# originally loaded via a PEP 302 loader and
# file is not a valid filesystem path
source_lines = linecache.getlines(file, module.__dict__)
else:
# No access to a loader, so assume it's a normal
# filesystem path
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__' # provide a default module name
# Recursively explore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
# Sort the tests by alpha order of names, for consistency in
# verbose-mode output. This was a feature of doctest in Pythons
# <= 2.3 that got lost by accident in 2.4. It was repaired in
# 2.4.4 and 2.5.
tests.sort()
return tests
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> tests.sort(key = lambda test: test.name)
>>> for test in tests:
... print test.name, '->', runner.run(test)
_TestClass -> TestResults(failed=0, attempted=2)
_TestClass.__init__ -> TestResults(failed=0, attempted=2)
_TestClass.get -> TestResults(failed=0, attempted=2)
_TestClass.square -> TestResults(failed=0, attempted=1)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then suppress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
continue
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
if check(_strip_exception_details(example.exc_msg),
_strip_exception_details(exc_msg),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return TestResults(failures, tries)
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>.+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
source = example.source
if isinstance(source, unicode):
source = source.encode('ascii', 'backslashreplace')
return source.splitlines(True)
else:
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
# Make sure sys.displayhook just prints the value to stdout
save_displayhook = sys.displayhook
sys.displayhook = sys.__displayhook__
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
sys.displayhook = save_displayhook
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print self.DIVIDER
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self._name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return TestResults(totalf, totalt)
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
# Don't print here by default, since doing
# so breaks some of the buildbots
#print "*** DocTestRunner.merge: '" + name + "' in both" \
# " testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
TestResults(failed=0, attempted=1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, report=True,
optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See help(doctest) for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser(),
encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg "encoding" specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
text, filename = _load_testfile(filename, package, module_relative)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
if encoding is not None:
text = text.decode(encoding)
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.optionflags = optionflags
self.testfinder = DocTestFinder()
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print "Running string", name
(f,t) = self.testrunner.run(test)
if self.verbose:
print f, "of", t, "examples failed in string", name
return TestResults(f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return TestResults(f,t)
def rundict(self, d, name, module=None):
import types
m = types.ModuleType(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import types
m = types.ModuleType(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> import doctest
>>> old = doctest._unittest_reportflags
>>> doctest.set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> doctest.set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexpected
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test, clear_globs=False)
self.tearDown()
def id(self):
return self._dt_test.name
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._dt_test == other._dt_test and \
self._dt_optionflags == other._dt_optionflags and \
self._dt_setUp == other._dt_setUp and \
self._dt_tearDown == other._dt_tearDown and \
self._dt_checker == other._dt_checker
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self._dt_optionflags, self._dt_setUp, self._dt_tearDown,
self._dt_checker))
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
class SkipDocTestCase(DocTestCase):
def __init__(self, module):
self.module = module
DocTestCase.__init__(self, None)
def setUp(self):
self.skipTest("DocTestSuite will not work with -O2 and above")
def test_skip(self):
pass
def shortDescription(self):
return "Skipping tests from %s" % self.module.__name__
__str__ = shortDescription
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if not tests and sys.flags.optimize >=2:
# Skip doctests when running with -O2
suite = unittest.TestSuite()
suite.addTest(SkipDocTestCase(module))
return suite
elif not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
# It is probably a bug that this exception is not also raised if the
# number of doctest examples in tests is zero (i.e. if no doctest
# examples were found). However, we should probably not be raising
# an exception at all here, though it is too late to make this change
# for a maintenance release. See also issue #14649.
raise ValueError(module, "has no docstrings")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(),
encoding=None, **options):
if globs is None:
globs = {}
else:
globs = globs.copy()
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
doc, path = _load_testfile(path, package, module_relative)
if "__file__" not in globs:
globs["__file__"] = path
# Find the file and read it.
name = os.path.basename(path)
# If an encoding is specified, use it to convert the file to unicode
if encoding is not None:
doc = doc.decode(encoding)
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
encoding
An encoding that will be used to convert the files to unicode.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
<BLANKLINE>
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
# Add a courtesy newline to prevent exec from choking (see bug #1172785)
return '\n'.join(output) + '\n'
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print sys.exc_info()[1]
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
testfiles = [arg for arg in sys.argv[1:] if arg and arg[0] != '-']
if not testfiles:
name = os.path.basename(sys.argv[0])
if '__loader__' in globals(): # python -m
name, _ = os.path.splitext(name)
print("usage: {0} [-v] file ...".format(name))
return 2
for filename in testfiles:
if filename.endswith(".py"):
# It is a module -- insert its dir into sys.path and try to
# import it. If it is part of a package, that possibly
# won't work because of package imports.
dirname, filename = os.path.split(filename)
sys.path.insert(0, dirname)
m = __import__(filename[:-3])
del sys.path[0]
failures, _ = testmod(m)
else:
failures, _ = testfile(filename, module_relative=False)
if failures:
return 1
return 0
if __name__ == "__main__":
sys.exit(_test())
| mit |
silentfuzzle/calibre | src/calibre/gui2/book_details.py | 11 | 27090 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import cPickle
from binascii import unhexlify
from functools import partial
from PyQt5.Qt import (QPixmap, QSize, QWidget, Qt, pyqtSignal, QUrl, QIcon,
QPropertyAnimation, QEasingCurve, QApplication, QFontInfo, QAction,
QSizePolicy, QPainter, QRect, pyqtProperty, QLayout, QPalette, QMenu,
QPen, QColor)
from PyQt5.QtWebKitWidgets import QWebView
from calibre import fit_image
from calibre.gui2.dnd import (dnd_has_image, dnd_get_image, dnd_get_files,
IMAGE_EXTENSIONS, dnd_has_extension)
from calibre.ebooks import BOOK_EXTENSIONS
from calibre.ebooks.metadata.book.base import (field_metadata, Metadata)
from calibre.ebooks.metadata.book.render import mi_to_html
from calibre.gui2 import (config, open_url, pixmap_to_data, gprefs, rating_font)
from calibre.utils.config import tweaks
def render_html(mi, css, vertical, widget, all_fields=False, render_data_func=None): # {{{
table, comment_fields = (render_data_func or render_data)(mi, all_fields=all_fields,
use_roman_numbers=config['use_roman_numerals_for_series_number'])
def color_to_string(col):
ans = '#000000'
if col.isValid():
col = col.toRgb()
if col.isValid():
ans = unicode(col.name())
return ans
fi = QFontInfo(QApplication.font(widget))
f = fi.pixelSize() + 1 + int(tweaks['change_book_details_font_size_by'])
fam = unicode(fi.family()).strip().replace('"', '')
if not fam:
fam = 'sans-serif'
c = color_to_string(QApplication.palette().color(QPalette.Normal,
QPalette.WindowText))
templ = u'''\
<html>
<head>
<style type="text/css">
body, td {
background-color: transparent;
font-size: %dpx;
font-family: "%s",sans-serif;
color: %s
}
</style>
<style type="text/css">
%s
</style>
</head>
<body>
%%s
</body>
<html>
'''%(f, fam, c, css)
comments = u''
if comment_fields:
comments = '\n'.join(u'<div>%s</div>' % x for x in comment_fields)
right_pane = u'<div id="comments" class="comments">%s</div>'%comments
if vertical:
ans = templ%(table+right_pane)
else:
ans = templ%(u'<table><tr><td valign="top" '
'style="padding-right:2em; width:40%%">%s</td><td valign="top">%s</td></tr></table>'
% (table, right_pane))
return ans
def get_field_list(fm, use_defaults=False):
from calibre.gui2.ui import get_gui
db = get_gui().current_db
if use_defaults:
src = db.prefs.defaults
else:
old_val = gprefs.get('book_display_fields', None)
if old_val is not None and not db.prefs.has_setting(
'book_display_fields'):
src = gprefs
else:
src = db.prefs
fieldlist = list(src['book_display_fields'])
names = frozenset([x[0] for x in fieldlist])
for field in fm.displayable_field_keys():
if field not in names:
fieldlist.append((field, True))
available = frozenset(fm.displayable_field_keys())
return [(f, d) for f, d in fieldlist if f in available]
def render_data(mi, use_roman_numbers=True, all_fields=False):
field_list = get_field_list(getattr(mi, 'field_metadata', field_metadata))
field_list = [(x, all_fields or display) for x, display in field_list]
return mi_to_html(mi, field_list=field_list, use_roman_numbers=use_roman_numbers,
rating_font=rating_font(), default_author_link=gprefs.get('default_author_link'))
# }}}
def details_context_menu_event(view, ev, book_info): # {{{
p = view.page()
mf = p.mainFrame()
r = mf.hitTestContent(ev.pos())
url = unicode(r.linkUrl().toString(QUrl.None)).strip()
menu = p.createStandardContextMenu()
ca = view.pageAction(p.Copy)
for action in list(menu.actions()):
if action is not ca:
menu.removeAction(action)
if not r.isNull():
if url.startswith('format:'):
parts = url.split(':')
try:
book_id, fmt = int(parts[1]), parts[2].upper()
except:
import traceback
traceback.print_exc()
else:
from calibre.gui2.ui import get_gui
from calibre.ebooks.oeb.polish.main import SUPPORTED
db = get_gui().current_db.new_api
ofmt = fmt.upper() if fmt.startswith('ORIGINAL_') else 'ORIGINAL_' + fmt
nfmt = ofmt[len('ORIGINAL_'):]
fmts = {x.upper() for x in db.formats(book_id)}
for a, t in [('remove', _('Delete the %s format')),
('save', _('Save the %s format to disk')),
('restore', _('Restore the %s format')),
('compare', ''),
]:
if a == 'restore' and not fmt.startswith('ORIGINAL_'):
continue
if a == 'compare':
if ofmt not in fmts or nfmt not in SUPPORTED:
continue
t = _('Compare to the %s format') % (fmt[9:] if fmt.startswith('ORIGINAL_') else ofmt)
else:
t = t % fmt
ac = getattr(book_info, '%s_format_action'%a)
ac.current_fmt = (book_id, fmt)
ac.setText(t)
menu.addAction(ac)
if not fmt.upper().startswith('ORIGINAL_'):
from calibre.gui2.open_with import populate_menu, edit_programs
m = QMenu(_('Open %s with...') % fmt.upper())
populate_menu(m, partial(book_info.open_with, book_id, fmt), fmt)
if len(m.actions()) == 0:
menu.addAction(_('Open %s with...') % fmt.upper(), partial(book_info.choose_open_with, book_id, fmt))
else:
m.addSeparator()
m.addAction(_('Add other application for %s files...') % fmt.upper(), partial(book_info.choose_open_with, book_id, fmt))
m.addAction(_('Edit Open With applications...'), partial(edit_programs, fmt, book_info))
menu.addMenu(m)
ac = book_info.copy_link_action
ac.current_url = r.linkElement().attribute('data-full-path')
if ac.current_url:
ac.setText(_('&Copy path to file'))
menu.addAction(ac)
else:
el = r.linkElement()
data = el.attribute('data-item')
author = el.toPlainText() if unicode(el.attribute('calibre-data')) == u'authors' else None
if not url.startswith('search:'):
for a, t in [('copy', _('&Copy Link')),
]:
ac = getattr(book_info, '%s_link_action'%a)
ac.current_url = url
if url.startswith('path:'):
ac.current_url = el.attribute('title')
ac.setText(t)
menu.addAction(ac)
if author is not None:
ac = book_info.manage_author_action
ac.current_fmt = author
ac.setText(_('Manage %s') % author)
menu.addAction(ac)
if data:
try:
field, value, book_id = cPickle.loads(unhexlify(data))
except Exception:
field = value = book_id = None
if field:
ac = book_info.remove_item_action
ac.data = (field, value, book_id)
ac.setText(_('Remove %s from this book') % value)
menu.addAction(ac)
if len(menu.actions()) > 0:
menu.exec_(ev.globalPos())
# }}}
class CoverView(QWidget): # {{{
cover_changed = pyqtSignal(object, object)
cover_removed = pyqtSignal(object)
open_cover_with = pyqtSignal(object, object)
def __init__(self, vertical, parent=None):
QWidget.__init__(self, parent)
self._current_pixmap_size = QSize(120, 120)
self.vertical = vertical
self.animation = QPropertyAnimation(self, b'current_pixmap_size', self)
self.animation.setEasingCurve(QEasingCurve(QEasingCurve.OutExpo))
self.animation.setDuration(1000)
self.animation.setStartValue(QSize(0, 0))
self.animation.valueChanged.connect(self.value_changed)
self.setSizePolicy(
QSizePolicy.Expanding if vertical else QSizePolicy.Minimum,
QSizePolicy.Expanding)
self.default_pixmap = QPixmap(I('book.png'))
self.pixmap = self.default_pixmap
self.pwidth = self.pheight = None
self.data = {}
self.do_layout()
def value_changed(self, val):
self.update()
def setCurrentPixmapSize(self, val):
self._current_pixmap_size = val
def do_layout(self):
if self.rect().width() == 0 or self.rect().height() == 0:
return
pixmap = self.pixmap
pwidth, pheight = pixmap.width(), pixmap.height()
try:
self.pwidth, self.pheight = fit_image(pwidth, pheight,
self.rect().width(), self.rect().height())[1:]
except:
self.pwidth, self.pheight = self.rect().width()-1, \
self.rect().height()-1
self.current_pixmap_size = QSize(self.pwidth, self.pheight)
self.animation.setEndValue(self.current_pixmap_size)
def show_data(self, data):
self.animation.stop()
same_item = getattr(data, 'id', True) == self.data.get('id', False)
self.data = {'id':data.get('id', None)}
if data.cover_data[1]:
self.pixmap = QPixmap.fromImage(data.cover_data[1])
if self.pixmap.isNull() or self.pixmap.width() < 5 or \
self.pixmap.height() < 5:
self.pixmap = self.default_pixmap
else:
self.pixmap = self.default_pixmap
self.do_layout()
self.update()
if (not same_item and not config['disable_animations'] and
self.isVisible()):
self.animation.start()
def paintEvent(self, event):
canvas_size = self.rect()
width = self.current_pixmap_size.width()
extrax = canvas_size.width() - width
if extrax < 0:
extrax = 0
x = int(extrax/2.)
height = self.current_pixmap_size.height()
extray = canvas_size.height() - height
if extray < 0:
extray = 0
y = int(extray/2.)
target = QRect(x, y, width, height)
p = QPainter(self)
p.setRenderHints(QPainter.Antialiasing | QPainter.SmoothPixmapTransform)
p.drawPixmap(target, self.pixmap.scaled(target.size(),
Qt.KeepAspectRatio, Qt.SmoothTransformation))
if gprefs['bd_overlay_cover_size']:
sztgt = target.adjusted(0, 0, 0, -4)
f = p.font()
f.setBold(True)
p.setFont(f)
sz = u'\u00a0%d x %d\u00a0'%(self.pixmap.width(), self.pixmap.height())
flags = Qt.AlignBottom|Qt.AlignRight|Qt.TextSingleLine
szrect = p.boundingRect(sztgt, flags, sz)
p.fillRect(szrect.adjusted(0, 0, 0, 4), QColor(0, 0, 0, 200))
p.setPen(QPen(QColor(255,255,255)))
p.drawText(sztgt, flags, sz)
p.end()
current_pixmap_size = pyqtProperty('QSize',
fget=lambda self: self._current_pixmap_size,
fset=setCurrentPixmapSize
)
def contextMenuEvent(self, ev):
from calibre.gui2.open_with import populate_menu, edit_programs
cm = QMenu(self)
paste = cm.addAction(_('Paste Cover'))
copy = cm.addAction(_('Copy Cover'))
remove = cm.addAction(_('Remove Cover'))
gc = cm.addAction(_('Generate Cover from metadata'))
if not QApplication.instance().clipboard().mimeData().hasImage():
paste.setEnabled(False)
copy.triggered.connect(self.copy_to_clipboard)
paste.triggered.connect(self.paste_from_clipboard)
remove.triggered.connect(self.remove_cover)
gc.triggered.connect(self.generate_cover)
m = QMenu(_('Open cover with...'))
populate_menu(m, self.open_with, 'cover_image')
if len(m.actions()) == 0:
cm.addAction(_('Open cover with...'), self.choose_open_with)
else:
m.addSeparator()
m.addAction(_('Add another application to open cover...'), self.choose_open_with)
m.addAction(_('Edit Open With applications...'), partial(edit_programs, 'cover_image', self))
cm.addMenu(m)
cm.exec_(ev.globalPos())
def open_with(self, entry):
id_ = self.data.get('id', None)
if id_ is not None:
self.open_cover_with.emit(id_, entry)
def choose_open_with(self):
from calibre.gui2.open_with import choose_program
entry = choose_program('cover_image', self)
if entry is not None:
self.open_with(entry)
def copy_to_clipboard(self):
QApplication.instance().clipboard().setPixmap(self.pixmap)
def paste_from_clipboard(self, pmap=None):
if not isinstance(pmap, QPixmap):
cb = QApplication.instance().clipboard()
pmap = cb.pixmap()
if pmap.isNull() and cb.supportsSelection():
pmap = cb.pixmap(cb.Selection)
if not pmap.isNull():
self.update_cover(pmap)
def update_cover(self, pmap=None, cdata=None):
if pmap is None:
pmap = QPixmap()
pmap.loadFromData(cdata)
if pmap.isNull():
return
self.pixmap = pmap
self.do_layout()
self.update()
self.update_tooltip(getattr(self.parent(), 'current_path', ''))
if not config['disable_animations']:
self.animation.start()
id_ = self.data.get('id', None)
if id_ is not None:
self.cover_changed.emit(id_, cdata or pixmap_to_data(pmap))
def generate_cover(self, *args):
book_id = self.data.get('id')
if book_id is not None:
from calibre.ebooks.covers import generate_cover
from calibre.gui2.ui import get_gui
mi = get_gui().current_db.new_api.get_metadata(book_id)
cdata = generate_cover(mi)
self.update_cover(cdata=cdata)
def remove_cover(self):
id_ = self.data.get('id', None)
self.pixmap = self.default_pixmap
self.do_layout()
self.update()
if id_ is not None:
self.cover_removed.emit(id_)
def update_tooltip(self, current_path):
try:
sz = self.pixmap.size()
except:
sz = QSize(0, 0)
self.setToolTip(
'<p>'+_('Double-click to open Book Details window') +
'<br><br>' + _('Path') + ': ' + current_path +
'<br><br>' + _('Cover size: %(width)d x %(height)d')%dict(
width=sz.width(), height=sz.height())
)
# }}}
# Book Info {{{
class BookInfo(QWebView):
link_clicked = pyqtSignal(object)
remove_format = pyqtSignal(int, object)
remove_item = pyqtSignal(int, object, object)
save_format = pyqtSignal(int, object)
restore_format = pyqtSignal(int, object)
compare_format = pyqtSignal(int, object)
copy_link = pyqtSignal(object)
manage_author = pyqtSignal(object)
open_fmt_with = pyqtSignal(int, object, object)
def __init__(self, vertical, parent=None):
QWebView.__init__(self, parent)
s = self.settings()
s.setAttribute(s.JavascriptEnabled, False)
self.vertical = vertical
self.page().setLinkDelegationPolicy(self.page().DelegateAllLinks)
self.linkClicked.connect(self.link_activated)
self._link_clicked = False
self.setAttribute(Qt.WA_OpaquePaintEvent, False)
palette = self.palette()
self.setAcceptDrops(False)
palette.setBrush(QPalette.Base, Qt.transparent)
self.page().setPalette(palette)
self.css = P('templates/book_details.css', data=True).decode('utf-8')
for x, icon in [
('remove_format', 'trash.png'), ('save_format', 'save.png'),
('restore_format', 'edit-undo.png'), ('copy_link','edit-copy.png'),
('manage_author', 'user_profile.png'), ('compare_format', 'diff.png')]:
ac = QAction(QIcon(I(icon)), '', self)
ac.current_fmt = None
ac.current_url = None
ac.triggered.connect(getattr(self, '%s_triggerred'%x))
setattr(self, '%s_action'%x, ac)
self.remove_item_action = ac = QAction(QIcon(I('minus.png')), '...', self)
ac.data = (None, None, None)
ac.triggered.connect(self.remove_item_triggered)
self.setFocusPolicy(Qt.NoFocus)
def remove_item_triggered(self):
field, value, book_id = self.remove_item_action.data
if field:
self.remove_item.emit(book_id, field, value)
def context_action_triggered(self, which):
f = getattr(self, '%s_action'%which).current_fmt
url = getattr(self, '%s_action'%which).current_url
if f and 'format' in which:
book_id, fmt = f
getattr(self, which).emit(book_id, fmt)
if url and 'link' in which:
getattr(self, which).emit(url)
def remove_format_triggerred(self):
self.context_action_triggered('remove_format')
def save_format_triggerred(self):
self.context_action_triggered('save_format')
def restore_format_triggerred(self):
self.context_action_triggered('restore_format')
def compare_format_triggerred(self):
self.context_action_triggered('compare_format')
def copy_link_triggerred(self):
self.context_action_triggered('copy_link')
def manage_author_triggerred(self):
self.manage_author.emit(self.manage_author_action.current_fmt)
def link_activated(self, link):
self._link_clicked = True
if unicode(link.scheme()) in ('http', 'https'):
return open_url(link)
link = unicode(link.toString(QUrl.None))
self.link_clicked.emit(link)
def turnoff_scrollbar(self, *args):
self.page().mainFrame().setScrollBarPolicy(Qt.Horizontal, Qt.ScrollBarAlwaysOff)
def show_data(self, mi):
html = render_html(mi, self.css, self.vertical, self.parent())
self.setHtml(html)
def mouseDoubleClickEvent(self, ev):
swidth = self.page().mainFrame().scrollBarGeometry(Qt.Vertical).width()
sheight = self.page().mainFrame().scrollBarGeometry(Qt.Horizontal).height()
if self.width() - ev.x() < swidth or \
self.height() - ev.y() < sheight:
# Filter out double clicks on the scroll bar
ev.accept()
else:
ev.ignore()
def contextMenuEvent(self, ev):
details_context_menu_event(self, ev, self)
def open_with(self, book_id, fmt, entry):
self.open_fmt_with.emit(book_id, fmt, entry)
def choose_open_with(self, book_id, fmt):
from calibre.gui2.open_with import choose_program
entry = choose_program(fmt, self)
if entry is not None:
self.open_with(book_id, fmt, entry)
# }}}
class DetailsLayout(QLayout): # {{{
def __init__(self, vertical, parent):
QLayout.__init__(self, parent)
self.vertical = vertical
self._children = []
self.min_size = QSize(190, 200) if vertical else QSize(120, 120)
self.setContentsMargins(0, 0, 0, 0)
def minimumSize(self):
return QSize(self.min_size)
def addItem(self, child):
if len(self._children) > 2:
raise ValueError('This layout can only manage two children')
self._children.append(child)
def itemAt(self, i):
try:
return self._children[i]
except:
pass
return None
def takeAt(self, i):
try:
self._children.pop(i)
except:
pass
return None
def count(self):
return len(self._children)
def sizeHint(self):
return QSize(self.min_size)
def setGeometry(self, r):
QLayout.setGeometry(self, r)
self.do_layout(r)
def cover_height(self, r):
if not self._children[0].widget().isVisible():
return 0
mh = min(int(r.height()/2.), int(4/3. * r.width())+1)
try:
ph = self._children[0].widget().pixmap.height()
except:
ph = 0
if ph > 0:
mh = min(mh, ph)
return mh
def cover_width(self, r):
if not self._children[0].widget().isVisible():
return 0
mw = 1 + int(3/4. * r.height())
try:
pw = self._children[0].widget().pixmap.width()
except:
pw = 0
if pw > 0:
mw = min(mw, pw)
return mw
def do_layout(self, rect):
if len(self._children) != 2:
return
left, top, right, bottom = self.getContentsMargins()
r = rect.adjusted(+left, +top, -right, -bottom)
x = r.x()
y = r.y()
cover, details = self._children
if self.vertical:
ch = self.cover_height(r)
cover.setGeometry(QRect(x, y, r.width(), ch))
cover.widget().do_layout()
y += ch + 5
details.setGeometry(QRect(x, y, r.width(), r.height()-ch-5))
else:
cw = self.cover_width(r)
cover.setGeometry(QRect(x, y, cw, r.height()))
cover.widget().do_layout()
x += cw + 5
details.setGeometry(QRect(x, y, r.width() - cw - 5, r.height()))
# }}}
class BookDetails(QWidget): # {{{
show_book_info = pyqtSignal()
open_containing_folder = pyqtSignal(int)
view_specific_format = pyqtSignal(int, object)
search_requested = pyqtSignal(object)
remove_specific_format = pyqtSignal(int, object)
remove_metadata_item = pyqtSignal(int, object, object)
save_specific_format = pyqtSignal(int, object)
restore_specific_format = pyqtSignal(int, object)
compare_specific_format = pyqtSignal(int, object)
copy_link = pyqtSignal(object)
remote_file_dropped = pyqtSignal(object, object)
files_dropped = pyqtSignal(object, object)
cover_changed = pyqtSignal(object, object)
open_cover_with = pyqtSignal(object, object)
cover_removed = pyqtSignal(object)
view_device_book = pyqtSignal(object)
manage_author = pyqtSignal(object)
open_fmt_with = pyqtSignal(int, object, object)
# Drag 'n drop {{{
DROPABBLE_EXTENSIONS = IMAGE_EXTENSIONS+BOOK_EXTENSIONS
def dragEnterEvent(self, event):
md = event.mimeData()
if dnd_has_extension(md, self.DROPABBLE_EXTENSIONS) or \
dnd_has_image(md):
event.acceptProposedAction()
def dropEvent(self, event):
event.setDropAction(Qt.CopyAction)
md = event.mimeData()
x, y = dnd_get_image(md)
if x is not None:
# We have an image, set cover
event.accept()
if y is None:
# Local image
self.cover_view.paste_from_clipboard(x)
self.update_layout()
else:
self.remote_file_dropped.emit(x, y)
# We do not support setting cover *and* adding formats for
# a remote drop, anyway, so return
return
# Now look for ebook files
urls, filenames = dnd_get_files(md, BOOK_EXTENSIONS)
if not urls:
# Nothing found
return
if not filenames:
# Local files
self.files_dropped.emit(event, urls)
else:
# Remote files, use the first file
self.remote_file_dropped.emit(urls[0], filenames[0])
event.accept()
def dragMoveEvent(self, event):
event.acceptProposedAction()
# }}}
def __init__(self, vertical, parent=None):
QWidget.__init__(self, parent)
self.setAcceptDrops(True)
self._layout = DetailsLayout(vertical, self)
self.setLayout(self._layout)
self.current_path = ''
self.cover_view = CoverView(vertical, self)
self.cover_view.cover_changed.connect(self.cover_changed.emit)
self.cover_view.open_cover_with.connect(self.open_cover_with.emit)
self.cover_view.cover_removed.connect(self.cover_removed.emit)
self._layout.addWidget(self.cover_view)
self.book_info = BookInfo(vertical, self)
self._layout.addWidget(self.book_info)
self.book_info.link_clicked.connect(self.handle_click)
self.book_info.remove_format.connect(self.remove_specific_format)
self.book_info.remove_item.connect(self.remove_metadata_item)
self.book_info.open_fmt_with.connect(self.open_fmt_with)
self.book_info.save_format.connect(self.save_specific_format)
self.book_info.restore_format.connect(self.restore_specific_format)
self.book_info.compare_format.connect(self.compare_specific_format)
self.book_info.copy_link.connect(self.copy_link)
self.book_info.manage_author.connect(self.manage_author)
self.setCursor(Qt.PointingHandCursor)
def handle_click(self, link):
typ, val = link.partition(':')[0::2]
if typ == 'path':
self.open_containing_folder.emit(int(val))
elif typ == 'format':
id_, fmt = val.split(':')
self.view_specific_format.emit(int(id_), fmt)
elif typ == 'devpath':
self.view_device_book.emit(val)
elif typ == 'search':
self.search_requested.emit(unhexlify(val).decode('utf-8'))
else:
try:
open_url(QUrl(link, QUrl.TolerantMode))
except:
import traceback
traceback.print_exc()
def mouseDoubleClickEvent(self, ev):
ev.accept()
self.show_book_info.emit()
def show_data(self, data):
self.book_info.show_data(data)
self.cover_view.show_data(data)
self.current_path = getattr(data, u'path', u'')
self.update_layout()
def update_layout(self):
self.cover_view.setVisible(gprefs['bd_show_cover'])
self._layout.do_layout(self.rect())
self.cover_view.update_tooltip(self.current_path)
def reset_info(self):
self.show_data(Metadata(_('Unknown')))
# }}}
| gpl-3.0 |
KohlsTechnology/ansible | lib/ansible/modules/network/slxos/slxos_facts.py | 15 | 13814 | #!/usr/bin/python
#
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: slxos_facts
version_added: "2.6"
author: "Lindsay Hill (@LindsayHill)"
short_description: Collect facts from devices running Extreme SLX-OS
description:
- Collects a base set of device facts from a remote device that
is running SLX-OS. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
notes:
- Tested against SLX-OS 17s.1.02
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: ['!config']
"""
EXAMPLES = """
# Collect all facts from the device
- slxos_facts:
gather_subset: all
# Collect only the config and default facts
- slxos_facts:
gather_subset:
- config
# Do not collect hardware facts
- slxos_facts:
gather_subset:
- "!hardware"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_model:
description: The model name returned from the device
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the remote device
returned: always
type: str
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: string
# hardware
ansible_net_memfree_mb:
description: The available free memory on the remote device in Mb
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in Mb
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All Primary IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
import re
from ansible.module_utils.network.slxos.slxos import run_commands
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
class FactsBase(object):
COMMANDS = list()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
def populate(self):
self.responses = run_commands(self.module, self.COMMANDS)
def run(self, cmd):
return run_commands(self.module, cmd)
class Default(FactsBase):
COMMANDS = [
'show version',
'show inventory chassis',
r'show running-config | include host\-name'
]
def populate(self):
super(Default, self).populate()
data = self.responses[0]
if data:
self.facts['version'] = self.parse_version(data)
data = self.responses[1]
if data:
self.facts['model'] = self.parse_model(data)
self.facts['serialnum'] = self.parse_serialnum(data)
data = self.responses[2]
if data:
self.facts['hostname'] = self.parse_hostname(data)
def parse_version(self, data):
match = re.search(r'SLX-OS Operating System Version: (\S+)', data)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'SID:(\S+)', data, re.M)
if match:
return match.group(1)
def parse_hostname(self, data):
match = re.search(r'switch-attributes host-name (\S+)', data, re.M)
if match:
return match.group(1)
def parse_serialnum(self, data):
match = re.search(r'SN:(\S+)', data, re.M)
if match:
return match.group(1)
class Hardware(FactsBase):
COMMANDS = [
'show process memory summary'
]
def populate(self):
super(Hardware, self).populate()
data = self.responses[0]
if data:
self.facts['memtotal_mb'] = int(round(int(self.parse_memtotal(data)) / 1024, 0))
self.facts['memfree_mb'] = int(round(int(self.parse_memfree(data)) / 1024, 0))
def parse_memtotal(self, data):
match = re.search(r'TotalMemory: (\d+)\s', data, re.M)
if match:
return match.group(1)
def parse_memfree(self, data):
match = re.search(r'Total Free: (\d+)\s', data, re.M)
if match:
return match.group(1)
class Config(FactsBase):
COMMANDS = ['show running-config']
def populate(self):
super(Config, self).populate()
data = self.responses[0]
if data:
self.facts['config'] = data
class Interfaces(FactsBase):
COMMANDS = [
'show interface',
'show ipv6 interface brief',
r'show lldp nei detail | inc ^Local\ Interface|^Remote\ Interface|^System\ Name'
]
def populate(self):
super(Interfaces, self).populate()
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
data = self.responses[0]
if data:
interfaces = self.parse_interfaces(data)
self.facts['interfaces'] = self.populate_interfaces(interfaces)
self.populate_ipv4_interfaces(interfaces)
data = self.responses[1]
if data:
self.populate_ipv6_interfaces(data)
data = self.responses[2]
if data:
self.facts['neighbors'] = self.parse_neighbors(data)
def populate_interfaces(self, interfaces):
facts = dict()
for key, value in iteritems(interfaces):
intf = dict()
intf['description'] = self.parse_description(value)
intf['macaddress'] = self.parse_macaddress(value)
intf['mtu'] = self.parse_mtu(value)
intf['bandwidth'] = self.parse_bandwidth(value)
intf['duplex'] = self.parse_duplex(value)
intf['lineprotocol'] = self.parse_lineprotocol(value)
intf['operstatus'] = self.parse_operstatus(value)
intf['type'] = self.parse_type(value)
facts[key] = intf
return facts
def populate_ipv4_interfaces(self, data):
for key, value in data.items():
self.facts['interfaces'][key]['ipv4'] = list()
primary_address = addresses = []
primary_address = re.findall(r'Primary Internet Address is (\S+)', value, re.M)
addresses = re.findall(r'Secondary Internet Address is (\S+)', value, re.M)
if len(primary_address) == 0:
continue
addresses.append(primary_address[0])
for address in addresses:
addr, subnet = address.split("/")
ipv4 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv4')
self.facts['interfaces'][key]['ipv4'].append(ipv4)
# Only gets primary IPv6 addresses
def populate_ipv6_interfaces(self, data):
interfaces = re.split('=+', data)[1].strip()
matches = re.findall(r'(\S+ \S+) +[\w-]+.+\s+([\d:/]+)', interfaces, re.M)
for match in matches:
interface = match[0]
self.facts['interfaces'][interface]['ipv6'] = list()
address, masklen = match[1].split('/')
ipv6 = dict(address=address, masklen=int(masklen))
self.add_ip_address(ipv6['address'], 'ipv6')
self.facts['interfaces'][interface]['ipv6'].append(ipv6)
def add_ip_address(self, address, family):
if family == 'ipv4':
self.facts['all_ipv4_addresses'].append(address)
else:
self.facts['all_ipv6_addresses'].append(address)
def parse_neighbors(self, neighbors):
facts = dict()
lines = neighbors.split('Local Interface: ')
if len(lines) == 0:
return facts
for line in lines:
match = re.search(r'(\w+ \S+)\s+\(Local Int.+?\)[\s\S]+Remote Interface: (\S+.+?) \(Remote Int.+?\)[\s\S]+System Name: (\S+)', line, re.M)
if match:
intf = match.group(1)
if intf not in facts:
facts[intf] = list()
fact = dict()
fact['host'] = match.group(3)
fact['port'] = match.group(2)
facts[intf].append(fact)
return facts
def parse_interfaces(self, data):
parsed = dict()
for interface in data.split('\n\n'):
match = re.match(r'^(\S+ \S+)', interface, re.M)
if not match:
continue
else:
parsed[match.group(1)] = interface
return parsed
def parse_description(self, data):
match = re.search(r'Description: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_macaddress(self, data):
match = re.search(r'Hardware is Ethernet, address is (\S+)', data)
if match:
return match.group(1)
def parse_ipv4(self, data):
match = re.search(r'Primary Internet Address is ([^\s,]+)', data)
if match:
addr, masklen = match.group(1).split('/')
return dict(address=addr, masklen=int(masklen))
def parse_mtu(self, data):
match = re.search(r'MTU (\d+) bytes', data)
if match:
return int(match.group(1))
def parse_bandwidth(self, data):
match = re.search(r'LineSpeed Actual\s+:\s(.+)', data)
if match:
return match.group(1)
def parse_duplex(self, data):
match = re.search(r'Duplex: (\S+)', data, re.M)
if match:
return match.group(1)
def parse_type(self, data):
match = re.search(r'Hardware is (.+),', data, re.M)
if match:
return match.group(1)
def parse_lineprotocol(self, data):
match = re.search(r'line protocol is (\S+)', data, re.M)
if match:
return match.group(1)
def parse_operstatus(self, data):
match = re.match(r'^(?:.+) is (.+),', data, re.M)
if match:
return match.group(1)
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
"""main entry point for module execution
"""
argument_spec = dict(
gather_subset=dict(default=["!config"], type='list')
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
warnings = list()
module.exit_json(ansible_facts=ansible_facts)
if __name__ == '__main__':
main()
| gpl-3.0 |
pedrobaeza/odoo | addons/l10n_fr/l10n_fr.py | 39 | 2117 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class l10n_fr_report(osv.osv):
_name = 'l10n.fr.report'
_description = 'Report for l10n_fr'
_columns = {
'code': fields.char('Code', size=64),
'name': fields.char('Name', size=128),
'line_ids': fields.one2many('l10n.fr.line', 'report_id', 'Lines'),
}
_sql_constraints = [
('code_uniq', 'unique (code)','The code report must be unique !')
]
class l10n_fr_line(osv.osv):
_name = 'l10n.fr.line'
_description = 'Report Lines for l10n_fr'
_columns = {
'code': fields.char('Variable Name', size=64),
'definition': fields.char('Definition', size=512),
'name': fields.char('Name', size=256),
'report_id': fields.many2one('l10n.fr.report', 'Report'),
}
_sql_constraints = [
('code_uniq', 'unique (code)', 'The variable name must be unique !')
]
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'siret': fields.char('SIRET', size=64),
'ape': fields.char('APE', size=64),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Tinche/ubuntu-make | tests/__init__.py | 14 | 1532 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Ensure we keep a sane formatting syntax"""
import os
import pep8
from .tools import get_root_dir
import umake
from unittest import TestCase
class CodeCheck(TestCase):
def test_pep8(self):
"""Proceed a pep8 checking
Note that we have a .pep8 config file for maximum line length tweak
and excluding the virtualenv dir."""
pep8style = pep8.StyleGuide(config_file=os.path.join(get_root_dir(), '.pep8'))
# we want to use either local or system umake, but always local tests files
umake_dir = os.path.dirname(umake.__file__)
results = pep8style.check_files([umake_dir, os.path.join(get_root_dir(), "tests"),
os.path.join(get_root_dir(), "bin")])
self.assertEqual(results.get_statistics(), [])
| gpl-3.0 |
ryansb/boto | boto/ec2/placementgroup.py | 150 | 2002 | # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Placement Group
"""
from boto.ec2.ec2object import EC2Object
from boto.exception import BotoClientError
class PlacementGroup(EC2Object):
def __init__(self, connection=None, name=None, strategy=None, state=None):
super(PlacementGroup, self).__init__(connection)
self.name = name
self.strategy = strategy
self.state = state
def __repr__(self):
return 'PlacementGroup:%s' % self.name
def endElement(self, name, value, connection):
if name == 'groupName':
self.name = value
elif name == 'strategy':
self.strategy = value
elif name == 'state':
self.state = value
else:
setattr(self, name, value)
def delete(self, dry_run=False):
return self.connection.delete_placement_group(
self.name,
dry_run=dry_run
)
| mit |
coderbone/SickRage | lib/unidecode/x017.py | 252 | 4190 | data = (
'[?]', # 0x00
'[?]', # 0x01
'[?]', # 0x02
'[?]', # 0x03
'[?]', # 0x04
'[?]', # 0x05
'[?]', # 0x06
'[?]', # 0x07
'[?]', # 0x08
'[?]', # 0x09
'[?]', # 0x0a
'[?]', # 0x0b
'[?]', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'[?]', # 0x0f
'[?]', # 0x10
'[?]', # 0x11
'[?]', # 0x12
'[?]', # 0x13
'[?]', # 0x14
'[?]', # 0x15
'[?]', # 0x16
'[?]', # 0x17
'[?]', # 0x18
'[?]', # 0x19
'[?]', # 0x1a
'[?]', # 0x1b
'[?]', # 0x1c
'[?]', # 0x1d
'[?]', # 0x1e
'[?]', # 0x1f
'[?]', # 0x20
'[?]', # 0x21
'[?]', # 0x22
'[?]', # 0x23
'[?]', # 0x24
'[?]', # 0x25
'[?]', # 0x26
'[?]', # 0x27
'[?]', # 0x28
'[?]', # 0x29
'[?]', # 0x2a
'[?]', # 0x2b
'[?]', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'[?]', # 0x30
'[?]', # 0x31
'[?]', # 0x32
'[?]', # 0x33
'[?]', # 0x34
'[?]', # 0x35
'[?]', # 0x36
'[?]', # 0x37
'[?]', # 0x38
'[?]', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'[?]', # 0x47
'[?]', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'k', # 0x80
'kh', # 0x81
'g', # 0x82
'gh', # 0x83
'ng', # 0x84
'c', # 0x85
'ch', # 0x86
'j', # 0x87
'jh', # 0x88
'ny', # 0x89
't', # 0x8a
'tth', # 0x8b
'd', # 0x8c
'ddh', # 0x8d
'nn', # 0x8e
't', # 0x8f
'th', # 0x90
'd', # 0x91
'dh', # 0x92
'n', # 0x93
'p', # 0x94
'ph', # 0x95
'b', # 0x96
'bh', # 0x97
'm', # 0x98
'y', # 0x99
'r', # 0x9a
'l', # 0x9b
'v', # 0x9c
'sh', # 0x9d
'ss', # 0x9e
's', # 0x9f
'h', # 0xa0
'l', # 0xa1
'q', # 0xa2
'a', # 0xa3
'aa', # 0xa4
'i', # 0xa5
'ii', # 0xa6
'u', # 0xa7
'uk', # 0xa8
'uu', # 0xa9
'uuv', # 0xaa
'ry', # 0xab
'ryy', # 0xac
'ly', # 0xad
'lyy', # 0xae
'e', # 0xaf
'ai', # 0xb0
'oo', # 0xb1
'oo', # 0xb2
'au', # 0xb3
'a', # 0xb4
'aa', # 0xb5
'aa', # 0xb6
'i', # 0xb7
'ii', # 0xb8
'y', # 0xb9
'yy', # 0xba
'u', # 0xbb
'uu', # 0xbc
'ua', # 0xbd
'oe', # 0xbe
'ya', # 0xbf
'ie', # 0xc0
'e', # 0xc1
'ae', # 0xc2
'ai', # 0xc3
'oo', # 0xc4
'au', # 0xc5
'M', # 0xc6
'H', # 0xc7
'a`', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'r', # 0xcc
'', # 0xcd
'!', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'.', # 0xd4
' // ', # 0xd5
':', # 0xd6
'+', # 0xd7
'++', # 0xd8
' * ', # 0xd9
' /// ', # 0xda
'KR', # 0xdb
'\'', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'0', # 0xe0
'1', # 0xe1
'2', # 0xe2
'3', # 0xe3
'4', # 0xe4
'5', # 0xe5
'6', # 0xe6
'7', # 0xe7
'8', # 0xe8
'9', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-3.0 |
lnielsen/invenio | invenio/base/scripts/demosite.py | 1 | 4961 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Perform demosite operations."""
from __future__ import print_function
import warnings
warnings.warn("Use of `inveniomanage demosite populate` is being deprecated. "
"Please use `uploader` module to insert demo records.",
PendingDeprecationWarning)
import os
import pkg_resources
import sys
from invenio.ext.script import Manager
manager = Manager(usage=__doc__)
# Shortcuts for manager options to keep code DRY.
option_yes_i_know = manager.option('--yes-i-know', action='store_true',
dest='yes_i_know', help='use with care!')
option_default_data = manager.option('--no-data', action='store_false',
dest='default_data',
help='do not populate tables with '
'default data')
option_file = manager.option('-f', '--file', dest='files',
action='append', help='data file to use')
option_jobid = manager.option('-j', '--job-id', dest='job_id', type=int,
default=0, help='bibsched starting job id')
option_extrainfo = manager.option('-e', '--extra-info', dest='extra_info',
action='append',
help='extraneous parameters')
option_packages = manager.option('-p', '--packages', dest='packages',
action='append',
default=[],
help='package import name (repeteable)')
@option_packages
@option_default_data
@option_file
@option_jobid
@option_extrainfo
def populate(packages=[], default_data=True, files=None,
job_id=0, extra_info=None):
"""Load demo records. Useful for testing purposes."""
if not default_data:
print('>>> Default data has been skiped (--no-data).')
return
if not packages:
packages = ['invenio_demosite.base']
from werkzeug.utils import import_string
from invenio.config import CFG_PREFIX
map(import_string, packages)
from invenio.ext.sqlalchemy import db
print(">>> Going to load demo records...")
db.session.execute("TRUNCATE schTASK")
db.session.commit()
if files is None:
files = [pkg_resources.resource_filename(
'invenio',
os.path.join('testsuite', 'data', 'demo_record_marc_data.xml'))]
# upload demo site files:
bibupload_flags = '-i'
if extra_info is not None and 'force-recids' in extra_info:
bibupload_flags = '-i -r --force'
for f in files:
job_id += 1
for cmd in ["%s/bin/bibupload -u admin %s %s" % (CFG_PREFIX, bibupload_flags, f),
"%s/bin/bibupload %d" % (CFG_PREFIX, job_id)]:
if os.system(cmd):
print("ERROR: failed execution of", cmd)
sys.exit(1)
for cmd in ["bin/bibdocfile --textify --with-ocr --recid 97",
"bin/bibdocfile --textify --all",
"bin/bibindex -u admin",
"bin/bibindex %d" % (job_id + 1,),
"bin/bibindex -u admin -w global",
"bin/bibindex %d" % (job_id + 2,),
"bin/bibreformat -u admin -o HB",
"bin/bibreformat %d" % (job_id + 3,),
"bin/webcoll -u admin",
"bin/webcoll %d" % (job_id + 4,),
"bin/bibrank -u admin",
"bin/bibrank %d" % (job_id + 5,),
"bin/bibsort -u admin -R",
"bin/bibsort %d" % (job_id + 6,),
"bin/oairepositoryupdater -u admin",
"bin/oairepositoryupdater %d" % (job_id + 7,),
"bin/bibupload %d" % (job_id + 8,)]:
cmd = os.path.join(CFG_PREFIX, cmd)
if os.system(cmd):
print("ERROR: failed execution of", cmd)
sys.exit(1)
print(">>> Demo records loaded successfully.")
def main():
"""Start the commandline manager."""
from invenio.base.factory import create_app
app = create_app()
manager.app = app
manager.run()
if __name__ == '__main__':
main()
| gpl-2.0 |
Revanth47/addons-server | src/olympia/amo/tests/test_middleware.py | 4 | 3972 | # -*- coding: utf-8 -*-
from django import test
from django.test.client import RequestFactory
import pytest
from commonware.middleware import ScrubRequestOnException
from mock import patch
from pyquery import PyQuery as pq
from olympia.amo.tests import TestCase
from olympia.amo.middleware import AuthenticationMiddlewareWithoutAPI
from olympia.amo.urlresolvers import reverse
from olympia.zadmin.models import Config
pytestmark = pytest.mark.django_db
class TestMiddleware(TestCase):
def test_no_vary_cookie(self):
# We don't break good usage of Vary.
response = test.Client().get('/')
assert response['Vary'] == 'Accept-Language, User-Agent, X-Mobile'
# But we do prevent Vary: Cookie.
response = test.Client().get('/', follow=True)
assert response['Vary'] == 'X-Mobile, User-Agent'
@patch('django.contrib.auth.middleware.'
'AuthenticationMiddleware.process_request')
def test_authentication_used_outside_the_api(self, process_request):
req = RequestFactory().get('/')
AuthenticationMiddlewareWithoutAPI().process_request(req)
assert process_request.called
@patch('django.contrib.sessions.middleware.'
'SessionMiddleware.process_request')
def test_authentication_not_used_with_the_api(self, process_request):
req = RequestFactory().get('/api/lol')
AuthenticationMiddlewareWithoutAPI().process_request(req)
assert not process_request.called
@patch('django.contrib.auth.middleware.'
'AuthenticationMiddleware.process_request')
def test_authentication_is_used_with_accounts_auth(self, process_request):
req = RequestFactory().get('/api/v3/accounts/authenticate/')
AuthenticationMiddlewareWithoutAPI().process_request(req)
assert process_request.called
def test_redirect_with_unicode_get():
response = test.Client().get(
'/da/firefox/addon/5457?from=/da/firefox/'
'addon/5457%3Fadvancedsearch%3D1&lang=ja&utm_source=Google+%E3'
'%83%90%E3%82%BA&utm_medium=twitter&utm_term=Google+%E3%83%90%'
'E3%82%BA')
assert response.status_code == 301
assert 'utm_term=Google+%E3%83%90%E3%82%BA' in response['Location']
def test_source_with_wrong_unicode_get():
# The following url is a string (bytes), not unicode.
response = test.Client().get('/firefox/collections/mozmj/autumn/'
'?source=firefoxsocialmedia\x14\x85')
assert response.status_code == 301
assert response['Location'].endswith('?source=firefoxsocialmedia%14')
def test_trailing_slash_middleware():
response = test.Client().get(u'/en-US/about/?xxx=\xc3')
assert response.status_code == 301
assert response['Location'].endswith('/en-US/about?xxx=%C3%83')
class AdminMessageTest(TestCase):
def test_message(self):
c = Config.objects.create(key='site_notice', value='ET Sighted.')
r = self.client.get(reverse('home'), follow=True)
doc = pq(r.content)
assert doc('#site-notice').text() == 'ET Sighted.'
c.delete()
r = self.client.get(reverse('home'), follow=True)
doc = pq(r.content)
assert len(doc('#site-notice')) == 0
class TestNoDjangoDebugToolbar(TestCase):
"""Make sure the Django Debug Toolbar isn't available when DEBUG=False."""
def test_no_django_debug_toolbar(self):
with self.settings(DEBUG=False):
res = self.client.get(reverse('home'), follow=True)
assert 'djDebug' not in res.content
assert 'debug_toolbar' not in res.content
def test_hide_password_middleware():
request = RequestFactory().post('/', dict(x=1, password=2, password2=2))
request.POST._mutable = False
ScrubRequestOnException().process_exception(request, Exception())
assert request.POST['x'] == '1'
assert request.POST['password'] == '******'
assert request.POST['password2'] == '******'
| bsd-3-clause |
photoninger/ansible | lib/ansible/modules/files/iso_extract.py | 101 | 6645 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
# Copyright: (c) 2016, Matt Robinson <git@nerdoftheherd.com>
# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
author:
- Jeroen Hoekx (@jhoekx)
- Matt Robinson (@ribbons)
- Dag Wieers (@dagwieers)
module: iso_extract
short_description: Extract files from an ISO image
description:
- This module has two possible ways of operation.
- If 7zip is installed on the system, this module extracts files from an ISO
into a temporary directory and copies files to a given destination,
if needed.
- If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module
mounts the ISO image to a temporary location, and copies files to a given
destination, if needed.
version_added: '2.3'
requirements:
- Either 7z (from I(7zip) or I(p7zip) package)
- Or mount capabilities (root-access, or CAP_SYS_ADMIN capability on Linux)
options:
image:
description:
- The ISO image to extract files from.
required: yes
aliases: [ path, src ]
dest:
description:
- The destination directory to extract files to.
required: yes
files:
description:
- A list of files to extract from the image.
- Extracting directories does not work.
required: yes
force:
description:
- If C(yes), which will replace the remote file when contents are different than the source.
- If C(no), the file will only be extracted and copied if the destination does not already exist.
type: bool
default: 'yes'
aliases: [ thirsty ]
version_added: '2.4'
executable:
description:
- The path to the C(7z) executable to use for extracting files from the ISO.
default: '7z'
version_added: '2.4'
notes:
- Only the file checksum (content) is taken into account when extracting files
from the ISO image. If C(force=no), only checks the presence of the file.
- In Ansible v2.3 this module was using C(mount) and C(umount) commands only,
requiring root access. This is no longer needed with the introduction of 7zip
for extraction.
'''
EXAMPLES = r'''
- name: Extract kernel and ramdisk from a LiveCD
iso_extract:
image: /tmp/rear-test.iso
dest: /tmp/virt-rear/
files:
- isolinux/kernel
- isolinux/initrd.cgz
'''
RETURN = r'''
#
'''
import os.path
import shutil
import tempfile
try: # python 3.3+
from shlex import quote
except ImportError: # older python
from pipes import quote
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
image=dict(type='path', required=True, aliases=['path', 'src']),
dest=dict(type='path', required=True),
files=dict(type='list', required=True),
force=dict(type='bool', default=True, aliases=['thirsty']),
executable=dict(type='path'), # No default on purpose
),
supports_check_mode=True,
)
image = module.params['image']
dest = module.params['dest']
files = module.params['files']
force = module.params['force']
executable = module.params['executable']
result = dict(
changed=False,
dest=dest,
image=image,
)
# We want to know if the user provided it or not, so we set default here
if executable is None:
executable = '7z'
binary = module.get_bin_path(executable, None)
# When executable was provided and binary not found, warn user !
if module.params['executable'] is not None and not binary:
module.warn("Executable '%s' is not found on the system, trying to mount ISO instead." % executable)
if not os.path.exists(dest):
module.fail_json(msg="Directory '%s' does not exist" % dest)
if not os.path.exists(os.path.dirname(image)):
module.fail_json(msg="ISO image '%s' does not exist" % image)
result['files'] = []
extract_files = list(files)
if not force:
# Check if we have to process any files based on existence
for f in files:
dest_file = os.path.join(dest, os.path.basename(f))
if os.path.exists(dest_file):
result['files'].append(dict(
checksum=None,
dest=dest_file,
src=f,
))
extract_files.remove(f)
if not extract_files:
module.exit_json(**result)
tmp_dir = tempfile.mkdtemp()
# Use 7zip when we have a binary, otherwise try to mount
if binary:
cmd = '%s x "%s" -o"%s" %s' % (binary, image, tmp_dir, ' '.join([quote(f) for f in extract_files]))
else:
cmd = 'mount -o loop,ro "%s" "%s"' % (image, tmp_dir)
rc, out, err = module.run_command(cmd)
if rc != 0:
result.update(dict(
cmd=cmd,
rc=rc,
stderr=err,
stdout=out,
))
shutil.rmtree(tmp_dir)
if binary:
module.fail_json(msg="Failed to extract from ISO image '%s' to '%s'" % (image, tmp_dir), **result)
else:
module.fail_json(msg="Failed to mount ISO image '%s' to '%s', and we could not find executable '%s'." % (image, tmp_dir, executable), **result)
try:
for f in extract_files:
tmp_src = os.path.join(tmp_dir, f)
if not os.path.exists(tmp_src):
module.fail_json(msg="Failed to extract '%s' from ISO image" % f, **result)
src_checksum = module.sha1(tmp_src)
dest_file = os.path.join(dest, os.path.basename(f))
if os.path.exists(dest_file):
dest_checksum = module.sha1(dest_file)
else:
dest_checksum = None
result['files'].append(dict(
checksum=src_checksum,
dest=dest_file,
src=f,
))
if src_checksum != dest_checksum:
if not module.check_mode:
shutil.copy(tmp_src, dest_file)
result['changed'] = True
finally:
if not binary:
module.run_command('umount "%s"' % tmp_dir)
shutil.rmtree(tmp_dir)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
jandrest2018/TWJ-2017-A | 04 Angular/C-Web/node_modules/node-gyp/gyp/tools/pretty_gyp.py | 2618 | 4756 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pretty-prints the contents of a GYP file."""
import sys
import re
# Regex to remove comments when we're counting braces.
COMMENT_RE = re.compile(r'\s*#.*')
# Regex to remove quoted strings when we're counting braces.
# It takes into account quoted quotes, and makes sure that the quotes match.
# NOTE: It does not handle quotes that span more than one line, or
# cases where an escaped quote is preceeded by an escaped backslash.
QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
QUOTE_RE = re.compile(QUOTE_RE_STR)
def comment_replace(matchobj):
return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3))
def mask_comments(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)(#)(.*)')
return [search_re.sub(comment_replace, line) for line in input]
def quote_replace(matchobj):
return "%s%s%s%s" % (matchobj.group(1),
matchobj.group(2),
'x'*len(matchobj.group(3)),
matchobj.group(2))
def mask_quotes(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)' + QUOTE_RE_STR)
return [search_re.sub(quote_replace, line) for line in input]
def do_split(input, masked_input, search_re):
output = []
mask_output = []
for (line, masked_line) in zip(input, masked_input):
m = search_re.match(masked_line)
while m:
split = len(m.group(1))
line = line[:split] + r'\n' + line[split:]
masked_line = masked_line[:split] + r'\n' + masked_line[split:]
m = search_re.match(masked_line)
output.extend(line.split(r'\n'))
mask_output.extend(masked_line.split(r'\n'))
return (output, mask_output)
def split_double_braces(input):
"""Masks out the quotes and comments, and then splits appropriate
lines (lines that matche the double_*_brace re's above) before
indenting them below.
These are used to split lines which have multiple braces on them, so
that the indentation looks prettier when all laid out (e.g. closing
braces make a nice diagonal line).
"""
double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])')
double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])')
masked_input = mask_quotes(input)
masked_input = mask_comments(masked_input)
(output, mask_output) = do_split(input, masked_input, double_open_brace_re)
(output, mask_output) = do_split(output, mask_output, double_close_brace_re)
return output
def count_braces(line):
"""keeps track of the number of braces on a given line and returns the result.
It starts at zero and subtracts for closed braces, and adds for open braces.
"""
open_braces = ['[', '(', '{']
close_braces = [']', ')', '}']
closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$')
cnt = 0
stripline = COMMENT_RE.sub(r'', line)
stripline = QUOTE_RE.sub(r"''", stripline)
for char in stripline:
for brace in open_braces:
if char == brace:
cnt += 1
for brace in close_braces:
if char == brace:
cnt -= 1
after = False
if cnt > 0:
after = True
# This catches the special case of a closing brace having something
# other than just whitespace ahead of it -- we don't want to
# unindent that until after this line is printed so it stays with
# the previous indentation level.
if cnt < 0 and closing_prefix_re.match(stripline):
after = True
return (cnt, after)
def prettyprint_input(lines):
"""Does the main work of indenting the input based on the brace counts."""
indent = 0
basic_offset = 2
last_line = ""
for line in lines:
if COMMENT_RE.match(line):
print line
else:
line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix.
if len(line) > 0:
(brace_diff, after) = count_braces(line)
if brace_diff != 0:
if after:
print " " * (basic_offset * indent) + line
indent += brace_diff
else:
indent += brace_diff
print " " * (basic_offset * indent) + line
else:
print " " * (basic_offset * indent) + line
else:
print ""
last_line = line
def main():
if len(sys.argv) > 1:
data = open(sys.argv[1]).read().splitlines()
else:
data = sys.stdin.read().splitlines()
# Split up the double braces.
lines = split_double_braces(data)
# Indent and print the output.
prettyprint_input(lines)
return 0
if __name__ == '__main__':
sys.exit(main())
| mit |
firemark/pixelopolis | docsrc/conf.py | 1 | 3489 | # Pixelopolis documentation build configuration file, created by
# sphinx-quickstart on Sun May 24 16:38:18 2020.
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
needs_sphinx = '1.0'
extensions = [
'sphinx.ext.githubpages',
'sphinx.ext.graphviz',
'sphinx.ext.todo',
'css_options',
'sphinx.ext.autosectionlabel',
]
autosectionlabel_prefix_document = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
project = 'Pixelopolis'
copyright = '2020, Firemark'
author = 'Firemark'
version = '0.1.3'
release = '0.1.3'
language = 'en'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'relations.html',
'globaltoc.html',
'sourcelink.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pixelopolisdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Pixelopolis.tex', u'Pixelopolis Documentation',
u'Firemark', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pixelopolis', u'Pixelopolis Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Pixelopolis', u'Pixelopolis Documentation',
author, 'Pixelopolis', 'One line description of project.',
'Miscellaneous'),
]
| mit |
cloudbase/neutron | neutron/conf/wsgi.py | 9 | 1262 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_service import wsgi
from neutron._i18n import _
socket_opts = [
cfg.IntOpt('backlog',
default=4096,
help=_("Number of backlog requests to configure "
"the socket with")),
cfg.IntOpt('retry_until_window',
default=30,
help=_("Number of seconds to keep retrying to listen")),
cfg.BoolOpt('use_ssl',
default=False,
help=_('Enable SSL on the API server')),
]
def register_socket_opts(cfg=cfg.CONF):
cfg.register_opts(socket_opts)
wsgi.register_opts(cfg)
| apache-2.0 |
aldian/tensorflow | tensorflow/contrib/keras/api/keras/preprocessing/__init__.py | 132 | 1094 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras data preprocessing utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.api.keras.preprocessing import image
from tensorflow.contrib.keras.api.keras.preprocessing import sequence
from tensorflow.contrib.keras.api.keras.preprocessing import text
del absolute_import
del division
del print_function
| apache-2.0 |
percyfal/snakemakelib | snakemakelib/report/picard.py | 1 | 25057 | # Copyright (c) 2014 Per Unneberg
import os
import sys
import re
import csv
import texttable as tt
import collections
from snakemakelib.report.utils import Template
import matplotlib
matplotlib.use('Agg')
from pylab import *
import matplotlib.pyplot as plt
import numpy as np
# http://stackoverflow.com/questions/2170900/get-first-list-index-containing-sub-string-in-python
def index_containing_substring(the_list, substring):
for i, s in enumerate(the_list):
if substring in s:
return i
return -1
def _raw(x):
return (x, None)
def _convert_input(x):
if re.match("^[0-9]+$", x):
return int(x)
elif re.match("^[0-9,.]+$", x):
return float(x.replace(",", "."))
else:
return str(x)
def _read_picard_metrics(f):
with open(f) as fh:
data = fh.readlines()
# Find histogram line
i_hist = index_containing_substring(data, "## HISTOGRAM")
if i_hist == -1:
i = len(data)
else:
i = i_hist
metrics = [[_convert_input(y) for y in x.rstrip("\n").split("\t")] for x in data[0:i] if not re.match("^[ #\n]", x)]
if i_hist == -1:
return (metrics, None)
hist = [[_convert_input(y) for y in x.rstrip("\n").split("\t")] for x in data[i_hist:len(data)] if not re.match("^[ #\n]", x)]
return (metrics, hist)
def _indent_texttable_for_rst(ttab, indent=4, add_spacing=True):
"""Texttable needs to be indented for rst.
:param ttab: texttable object
:param indent: indentation (should be 4 *spaces* for rst documents)
:param add_spacing_row: add additional empty row below class directives
:returns: reformatted texttable object as string
"""
output = ttab.draw()
new_output = []
for row in output.split("\n"):
new_output.append(" " * indent + row)
if re.search('.. class::', row):
new_row = [" " if x != "|" else x for x in row]
new_output.append(" " * indent + "".join(new_row))
return "\n".join(new_output)
def _make_unique(l):
cnt = collections.Counter(l)
luniq = []
d = {}
for c in l:
if not c in d.keys():
d[c] = 0
else:
d[c] += 1
luniq.append("{c}.{sfx}".format(c=c, sfx=d[c]) if d[c]>0 else c)
return luniq
def make_rst_table(data, header=None, indent=True):
"""Make rst table with :py:mod:`Texttable`.
Args:
data (list): data frame to be printed
header (list): column header names
indent (bool): indent table for rst
Returns:
rst-formatted table
"""
if data is None:
return ""
else:
tab_tt = tt.Texttable()
tab_tt.set_precision(2)
if not header is None:
data[0] = header
w = [len(c) + 2 for c in data[0]]
for r in data:
for i in range(0, len(r)):
w[i] = max(w[i], len(r[i]) + 2)
tab_tt.add_rows(data)
tab_tt.set_cols_width(w)
tab_tt.set_cols_align("r" * len(data[0]))
if indent:
return _indent_texttable_for_rst(tab_tt)
else:
return tab_tt.draw()
class DataFrame(object):
"""Light weight data frame object
A data frame is represented as an OrderedDict.
Args:
*args: if provided, must be a list of lists that upon initialization is converted to an OrderedDict. The first list is treated as a header.
"""
_format = collections.OrderedDict()
_tp = Template()
def __init__(self, *args, **fmt):
if (len(args[0]) == 1):
self._colnames = args[0]
self._data = [collections.OrderedDict([(args[0][0], row[0])]) for row in args[1:]]
else:
reader = csv.DictReader([",".join([str(y) for y in x]) for x in args])
self._colnames = reader.fieldnames
self._data = [collections.OrderedDict([(k, row[k]) for k in self._colnames]) for row in reader]
if fmt:
self._format = collections.OrderedDict(fmt)
def __str__(self):
return "{cls} object with {rows} rows, {columns} columns".format(cls=self.__class__, rows=self.dim[0], columns=self.dim[1])
def __iter__(self):
self.index = 0
return self
def __next__(self):
if len(self._data) > self.index:
self.index += 1
return self._data[self.index - 1]
else:
raise StopIteration
def __getitem__(self, columns):
a = [columns] + [[row[c] for c in columns] for row in self._data]
return self.__class__(*a)
def __setitem__(self, key, val):
# if val is a list must be equal in length to no. rows
if isinstance(val, list):
for (row,v) in zip(self._data, val):
row.update([(key, v)])
else:
_ = [row.update([(key, val)]) for row in self._data]
if not key in self.colnames:
self.colnames.append(key)
def x(self, column=None, indices=None, ctype=None):
column = self.colnames[0] if not column else column
x = [row[column] for row in self._data]
if indices:
x = [self._data[i][column] for i in indices]
if ctype:
x = [ctype(y) for y in x]
return x
# Copy definition of x
y=x
@property
def colnames(self):
return self._colnames
@property
def data(self):
return self._data
@property
def dim(self):
return (len(self._data), len(self._data[0]))
def as_list(self):
return [self._colnames] + [[self._format[c][1](row[c]) for c in self._colnames] for row in self._data]
def _format_field(self, value, spec, ctype):
if value == '?':
spec = 's'
ctype = str
elif value == "":
spec = 's'
ctype = str
value = ctype(value)
return self._tp.format_field(value, spec)
def set_format(self, **fmt):
self._format = collections.OrderedDict(fmt)
def rst(self, raw=False):
ttab = make_rst_table(self.summary.split("\n"))
return _indent_texttable_for_rst(ttab)
def summary(self, fmt = None, ctype = None, sep="\t", raw=False):
columns = self.colnames
fmt = {k:v[0] for (k,v) in list(self._format.items()) if k in columns} if fmt is None else {k:v for (k,v) in zip(columns, fmt)}
ctype = {k:v[1] for (k,v) in list(self._format.items()) if k in columns} if ctype is None else {k:v for (k,v) in zip(columns, ctype)}
if not fmt:
raise ValueError("No format defined for {cls}; please use derived subclass".format(cls=__class__))
if raw:
return "\n".join([sep.join([x for x in columns])] + [sep.join([r[c] for c in columns]) for r in self])
return "\n".join([sep.join([x for x in columns])] + [sep.join([self._format_field(r[c], fmt[c], ctype[c]) for c in columns]) for r in self])
class PicardMetrics(object):
"""Generic class to store metrics section from Picard Metrics reports.
See also class PicardHistMetrics for reports that provide metrics
and histogram information.
Args:
*args: if provided, must be a list of lists that upon initialization is converted to an OrderedDict. The first list is treated as a header.
filename (str): file from which to collect metrics
identifier (str): unique identifier for object, e.g. sample or unit name
Returns:
An instance of class PicardMetrics
"""
_format = collections.OrderedDict()
_tp = Template()
def __init__(self, *args, filename=None, identifier=None):
self._id = identifier
if filename is None and not args:
raise ValueError("please supply either filename or args to instantiate class")
self._set_vars(identifier, filename)
if not self.filename is None and not args:
(args, _) = _read_picard_metrics(self.filename)
self._metrics = DataFrame(*args, **self._format)
def _set_vars(self, identifier, filename):
self._id = identifier if not identifier is None else filename
self._filename = str(filename)
def __str__(self):
return "{cls} object with a metrics field with {rows} rows, {columns} columns".format(cls=self.__class__, rows=self.metrics.dim[0], columns=self.metrics.dim[1])
def __getitem__(self, columns):
a = [columns] + [[row[c] for c in columns] for row in self._metrics._data]
m = self.__class__(*a, identifier=self.id, filename=self.filename)
fmt = collections.OrderedDict([(c, self.metrics._format[c]) for c in columns])
m.set_format(**fmt)
return m
@property
def metrics(self):
return self._metrics
@property
def id(self):
return self._id
@property
def filename(self):
return self._filename
def x(self, column=None, indices=None):
return self.metrics.x(column, indices, ctype=self._format[column][1])
y=x
def summary(self, fmt = None, ctype = None, sep="\t", raw=False):
return self.metrics.summary(fmt, ctype, sep, raw)
def as_list(self):
return self.metrics.as_list()
def set_format(self, **fmt):
self._format = collections.OrderedDict(fmt)
self._metrics.set_format(**fmt)
def add_column(self, col, val, **fmt):
"""Add column to metrics"""
self.metrics[col] = val
self._metrics._format.update(fmt)
self._format.update(fmt)
class PicardHistMetrics(PicardMetrics):
"""Generic class to store metrics section from Picard Histogram
Metrics reports.
In addition to metrics data the class also stores histogram
values.
Args:
name (str): unique identifier for object, e.g. sample or unit name
filename (str): file from which to collect metrics
hist (list): list of lists, where the first entry holds the
names of the values
*args (list): if provided, must be a list of lists that upon
initialization is converted to an OrderedDict. The first list
is treated as a header.
Returns:
An instance of class PicardHistMetrics
"""
def __init__(self, *args, identifier=None, filename=None, hist=None):
# NB: __init__ should call super, but with current
# implementation would require reading the metrics file twice!
if filename is None and hist is None:
raise ValueError("please provide argument hist when not reading from file")
if filename is None and not args:
raise ValueError("please supply either filename or args to instantiate class")
self._set_vars(identifier, filename)
if not self.filename is None and not args:
(args, hist) = _read_picard_metrics(self.filename)
self._metrics = DataFrame(*args, **self._format)
if hist:
fmt = collections.OrderedDict([(x, type(y)) for x,y in zip(hist[0], hist[1])])
self._hist = DataFrame(*hist, **fmt)
def __getitem__(self, columns):
a = [columns] + [[row[c] for c in columns] for row in self.metrics._data]
h = [self.hist.colnames] + [[row[c] for c in self.hist.colnames] for row in self.hist._data]
return self.__class__(*a, identifier=self.id, filename=self.filename, hist=h)
@property
def hist(self):
return self._hist
class AlignMetrics(PicardMetrics):
_format = collections.OrderedDict([('CATEGORY', ('s', str)), ('TOTAL_READS', ('3.2h', int)),
('PF_READS', ('3.2h', int)), ('PCT_PF_READS', ('3.2%', float)),
('PF_NOISE_READS', ('3.2h', int)), ('PF_READS_ALIGNED', ('3.2h', int)),
('PCT_PF_READS_ALIGNED', ('3.2%', float)), ('PF_ALIGNED_BASES', ('3.2h', int)),
('PF_HQ_ALIGNED_READS', ('3.2h', int)), ('PF_HQ_ALIGNED_BASES', ('3.2h', int)),
('PF_HQ_ALIGNED_Q20_BASES', ('3.2h', int)), ('PF_HQ_MEDIAN_MISMATCHES', ('', int)),
('PF_MISMATCH_RATE', ('3.2f', float)), ('PF_HQ_ERROR_RATE', ('3.2f', float)), ('PF_INDEL_RATE', ('3.2f', float)),
('MEAN_READ_LENGTH', ('3.2f', float)), ('READS_ALIGNED_IN_PAIRS', ('3.2h', int)),
('PCT_READS_ALIGNED_IN_PAIRS', ('3.2%', float)), ('BAD_CYCLES', ('3.2h', int)), ('STRAND_BALANCE', ('3.2f', float)),
('PCT_CHIMERAS', ('3.2%', float)), ('PCT_ADAPTER', ('3.2%', float)), ('SAMPLE', ('s', str)),
('LIBRARY', ('s', str)), ('READ_GROUP', ('s', str))])
def __init__(self, *args, identifier=None, filename=None):
super(AlignMetrics, self).__init__(*args, identifier=identifier, filename=filename)
def category(self, category="PAIR"):
"""Retrieve subset object with only one alignment category"""
a = [self.metrics.colnames] + [[row[c] for c in self.metrics.colnames] for row in self._metrics if row['CATEGORY'] == category]
return AlignMetrics(*a, filename=self.filename, identifier=self.id)
class InsertMetrics(PicardHistMetrics):
_format = collections.OrderedDict([('MEDIAN_INSERT_SIZE', ('', int)), ('MEDIAN_ABSOLUTE_DEVIATION', ('', int)),
('MIN_INSERT_SIZE', ('', int)), ('MAX_INSERT_SIZE', ('', int)),
('MEAN_INSERT_SIZE', ('3.3f', float)), ('STANDARD_DEVIATION', ('3.3f', float)),
('READ_PAIRS', ('3.2h', int)), ('PAIR_ORIENTATION', ('s', str)),
('WIDTH_OF_10_PERCENT', ('', int)), ('WIDTH_OF_20_PERCENT', ('', int)),
('WIDTH_OF_30_PERCENT', ('', int)), ('WIDTH_OF_40_PERCENT', ('', int)),
('WIDTH_OF_50_PERCENT', ('', int)), ('WIDTH_OF_60_PERCENT', ('', int)),
('WIDTH_OF_70_PERCENT', ('', int)), ('WIDTH_OF_80_PERCENT', ('', int)),
('WIDTH_OF_90_PERCENT', ('', int)), ('WIDTH_OF_99_PERCENT', ('', int)),
('SAMPLE', ('s', str)), ('LIBRARY', ('s', str)), ('READ_GROUP', ('s', str))])
def __init__(self, *args, identifier=None, filename=None, hist=None):
super(InsertMetrics, self).__init__(*args, identifier=identifier, filename=filename, hist=hist)
class HsMetrics(PicardMetrics):
_format = collections.OrderedDict([('BAIT_SET', ('s', str)), ('GENOME_SIZE', ('3.2h', int)),
('BAIT_TERRITORY', ('3.2h', int)), ('TARGET_TERRITORY', ('3.2h', int)),
('BAIT_DESIGN_EFFICIENCY', ('3.2f', float)), ('TOTAL_READS', ('3.2h', int)),
('PF_READS', ('3.2h', int)), ('PF_UNIQUE_READS', ('3.2h', int)), ('PCT_PF_READS', ('3.2%', float)),
('PCT_PF_UQ_READS', ('3.2%', float)), ('PF_UQ_READS_ALIGNED', ('3.2h', int)),
('PCT_PF_UQ_READS_ALIGNED', ('3.2%', float)), ('PF_UQ_BASES_ALIGNED', ('3.2h', int)),
('ON_BAIT_BASES', ('3.2h', int)), ('NEAR_BAIT_BASES', ('3.2h', int)), ('OFF_BAIT_BASES', ('3.2h', int)),
('ON_TARGET_BASES', ('3.2h', int)), ('PCT_SELECTED_BASES', ('3.2%', float)), ('PCT_OFF_BAIT', ('3.2%', float)),
('ON_BAIT_VS_SELECTED', ('3.2f', float)), ('MEAN_BAIT_COVERAGE', ('3.2f', float)),
('MEAN_TARGET_COVERAGE', ('3.2f', float)), ('PCT_USABLE_BASES_ON_BAIT', ('3.2%', float)),
('PCT_USABLE_BASES_ON_TARGET', ('3.2%', float)), ('FOLD_ENRICHMENT', ('3.2f', float)),
('ZERO_CVG_TARGETS_PCT', ('3.2%', float)), ('FOLD_80_BASE_PENALTY', ('3.2f', float)),
('PCT_TARGET_BASES_2X', ('3.2%', float)), ('PCT_TARGET_BASES_10X', ('3.2%', float)),
('PCT_TARGET_BASES_20X', ('3.2%', float)), ('PCT_TARGET_BASES_30X', ('3.2%', float)),
('PCT_TARGET_BASES_40X', ('3.2%', float)), ('PCT_TARGET_BASES_50X', ('3.2%', float)),
('PCT_TARGET_BASES_100X', ('3.2%', float)), ('HS_LIBRARY_SIZE', ('3.2h', int)), ('HS_PENALTY_10X', ('3.2f', float)),
('HS_PENALTY_20X', ('3.2f', float)), ('HS_PENALTY_30X', ('3.2f', float)), ('HS_PENALTY_40X', ('3.2f', float)),
('HS_PENALTY_50X', ('3.2f', float)), ('HS_PENALTY_100X', ('3.2f', float)), ('AT_DROPOUT', ('3.2f', float)),
('GC_DROPOUT', ('3.2f', float)), ('SAMPLE', ('s', str)), ('LIBRARY', ('s', str)), ('READ_GROUP', ('s', str))])
def __init__(self, *args, identifier=None, filename=None):
super(HsMetrics, self).__init__(*args, identifier=identifier, filename=filename)
class DuplicationMetrics(PicardHistMetrics):
_format = collections.OrderedDict([('LIBRARY', ('s', str)), ('UNPAIRED_READS_EXAMINED', ('3.2h', int)),
('READ_PAIRS_EXAMINED', ('3.2h', int)), ('UNMAPPED_READS', ('3.2h', int)),
('UNPAIRED_READ_DUPLICATES', ('3.2h', int)), ('READ_PAIR_DUPLICATES', ('3.2h', int)),
('READ_PAIR_OPTICAL_DUPLICATES', ('3.2f', float)),
('PERCENT_DUPLICATION', ('3.2%', float)), ('ESTIMATED_LIBRARY_SIZE', ('3.2h', int))])
def __init__(self, *args, identifier=None, filename=None, hist=None):
super(DuplicationMetrics, self).__init__(*args, identifier=identifier, filename=filename, hist=hist)
self._prune_empty_rows()
def _prune_empty_rows(self):
"""Prune empty rows in metrics. This could happen if there is no read
group information and all metrics are stored in 'Unknown
library'
"""
if (self.metrics.dim[0] != 2):
return
self.metrics._data = [self._metrics.data[1]]
def combine_metrics(metrics, mergename = "PicardMetrics_merge", uniquify=False):
"""Convert list of metrics objects to PicardMetrics object
Args:
metrics (list of tuples): list of metrics objects
mergename (str): id to give to merged objet
uniquify (bool): convert columns to unique names, appending .# where # is the count of the duplicated column
Returns:
new PicardMetrics object with format specifications set for summary operations
"""
nrows = set([nr for sublist in [[m.metrics.dim[0] for m in mtup] for mtup in metrics] for nr in sublist])
if len(set(nrows)) > 1:
raise ValueError("not all metrics of equal length; most probably you need to subset an AlignMetrics class to one category")
ncols = set([nr for sublist in [[m.metrics.dim[1] for m in mtup] for mtup in metrics] for nr in sublist])
if len(set(ncols)) > len(metrics[0]):
raise ValueError("not all metrics tuples have same set of columns; refusing to merge")
colnames = [c for sublist in [m.metrics.colnames for m in metrics[0]] for c in sublist]
args = [_make_unique(colnames)] if uniquify else [colnames]
for mtup in metrics:
rowargs = []
fmtlist = []
for m in mtup:
rowargs += [m.metrics.data[0][k] for k in m.metrics.colnames]
fmtlist += [(k,v) for k,v in m._format.items()]
args.append(rowargs)
pm = PicardMetrics(*args, identifier=mergename)
fmt = collections.OrderedDict(fmtlist)
if uniquify:
for c in args[0]:
m = re.match("(.*)\.[0-9]+$", c)
if m:
fmt[c] = fmt[m.group(1)]
pm.set_format(**fmt)
return (pm)
def qc_plots(inputfiles, config, output):
"""Generate qc plots for picard QC summary report"""
samples = config['samples']
# Collect pm metrics and plot
mlist =(list(
zip(
[AlignMetrics(filename=x).category() for x in inputfiles if x.endswith(config['alnmetrics'])],
[InsertMetrics(filename=x) for x in inputfiles if x.endswith(config['insmetrics'])],
[DuplicationMetrics(filename=x) for x in inputfiles if x.endswith(config['dupmetrics'])],
[HsMetrics(filename=x) for x in inputfiles if x.endswith(config['hsmetrics'])]
)
))
pm = combine_metrics(mlist)
pm.add_column('PCT_ON_TARGET', [str(100 * float(x)/float(y)) for (x,y) in zip(pm.x('ON_TARGET_BASES'), pm.y('PF_UQ_BASES_ALIGNED'))], **{'PCT_ON_TARGET' : ('3.2f', float)})
pm.metrics['SAMPLE'] = samples
# Sequence statistics plot
plt.clf()
sdup = [int(50 + 500 * x) for x in pm.x('PERCENT_DUPLICATION')]
plt.scatter(pm.x('PCT_PF_READS_ALIGNED'), pm.y('TOTAL_READS'), s=sdup, alpha=0.5)
plt.xlabel(r'Percent aligned', fontsize=14)
plt.yscale('log', **{'basey':10})
plt.xticks(arange(0,1.1,0.1), range(0,110,10))
plt.ylabel(r'Read count', fontsize=14)
plt.title("Sequence summary.\nPoint sizes correspond to duplication levels.", fontsize=14)
plt.tight_layout()
plt.savefig(output.seqstats)
plt.close()
# Alignment metrics
plt.clf()
n = len(samples)
plt.xlim(0, n+2)
plt.xticks(range(1,n+1), [x for x in samples], rotation=90)
plt.ylim(0,1)
plt.yticks(arange(0,1.1,0.1), range(0,110,10))
plt.plot(range(1,n+1), pm.x('PCT_PF_READS_ALIGNED'), "o")
plt.xlabel(r'Sample', fontsize=14)
plt.ylabel(r'Percent aligned', fontsize=14)
plt.tight_layout()
plt.savefig(output.alnmet)
plt.close()
# Duplication metrics
plt.clf()
plt.xlim(0, n+2)
plt.xticks(range(1,n+1), [x for x in samples], rotation=90)
plt.ylim(0,1)
plt.yticks(arange(0,1.1,0.1), range(0,110,10))
plt.plot(range(1,n+1), pm.x('PERCENT_DUPLICATION'), "o")
plt.xlabel(r'Sample', fontsize=14)
plt.ylabel(r'Percent duplication', fontsize=14)
plt.tight_layout()
plt.savefig(output.dupmet)
plt.close()
# Insert metrics
plt.clf()
plt.xlim(0, n+2)
plt.xticks(range(1,n+1), [x for x in samples], rotation=90)
plt.plot(range(1,n+1), pm.x('MEAN_INSERT_SIZE'), "o")
plt.xlabel(r'Sample', fontsize=14)
plt.ylabel(r'Mean insert size', fontsize=14)
plt.tight_layout()
plt.savefig(output.insmet)
plt.close()
# Target metrics
plt.clf()
plt.xlim(0, n+2)
plt.xticks(range(1,n+1), [x for x in samples], rotation=90)
plt.plot(range(1,n+1), pm.x('PCT_ON_TARGET'), "o")
plt.xlabel(r'Sample', fontsize=14)
plt.ylabel(r'Percent on target', fontsize=14)
plt.tight_layout()
plt.savefig(output.targetmet)
plt.close()
# Target metrics
plt.clf()
plt.plot(pm.x('PERCENT_DUPLICATION'), pm.y('PCT_ON_TARGET'), "o")
plt.xlabel(r'Percent duplication', fontsize=14)
plt.ylabel(r'Percent on target', fontsize=14)
plt.tight_layout()
plt.savefig(output.target2dup)
plt.close()
# Hs metrics
columns = config['columns']
hticks = config['hticks']
hsmetrics = pm[columns]
# Hs boxplot metrics
plt.clf()
plt.ylim(0,1)
plt.yticks(arange(0,1.1,0.1), range(0,110,10))
plt.boxplot(np.array(hsmetrics.as_list()[1:]))
plt.xticks(range(1,len(hticks)+1), [x for x in hticks])
plt.savefig(output.hsmet)
plt.close()
nsubplots = int(math.ceil(n/9))
k = 0
for i_subplot in range(0, nsubplots):
plt.clf()
f, axarr = plt.subplots(3, 3, sharex='col', sharey='row')
for i in range(0, 3):
for j in range(0, 3):
if k < n:
x = range(1, len(hticks) + 1)
axarr[i,j].plot(x, hsmetrics.as_list()[1:][k], "o")
axarr[i,j].set_xticks(x)
axarr[i,j].set_title(samples[k])
axarr[i,j].set_xlim(0, (len(hticks) + 1))
axarr[i,j].set_ylim(-0.05, 1.05)
axarr[i,j].set_yticks(arange(0,1.1,0.1))
axarr[i,j].set_yticklabels(range(0,110,10))
axarr[i,j].set_xticklabels([h for h in hticks], rotation=45)
else:
axarr[i,j].axis('off')
k += 1
plt.savefig(output.hsmetsub[i_subplot])
plt.close()
# Write csv summary file
pmsum = pm[config['summarycolumns']]
with open(output.summarytable, "w") as fh:
fh.write(pmsum.metrics.summary(sep=","))
# Finally write entire merged metrics data frame
with open(output.metricstable, "w") as fh:
fh.write(pm.metrics.summary(raw=True, sep=","))
| mit |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/nltk/corpus/reader/api.py | 17 | 17161 | # Natural Language Toolkit: API for Corpus Readers
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Steven Bird <sb@ldc.upenn.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
API for corpus readers.
"""
import os
import re
from collections import defaultdict
from nltk.data import PathPointer, FileSystemPathPointer, ZipFilePathPointer
from nltk.sourcedstring import SourcedStringStream
from util import *
class CorpusReader(object):
"""
A base class for "corpus reader" classes, each of which can be
used to read a specific corpus format. Each individual corpus
reader instance is used to read a specific corpus, consisting of
one or more files under a common root directory. Each file is
identified by its ``file identifier``, which is the relative path
to the file from the root directory.
A separate subclass is be defined for each corpus format. These
subclasses define one or more methods that provide 'views' on the
corpus contents, such as ``words()`` (for a list of words) and
``parsed_sents()`` (for a list of parsed sentences). Called with
no arguments, these methods will return the contents of the entire
corpus. For most corpora, these methods define one or more
selection arguments, such as ``fileids`` or ``categories``, which can
be used to select which portion of the corpus should be returned.
"""
def __init__(self, root, fileids, encoding=None, tag_mapping_function=None):
"""
:type root: PathPointer or str
:param root: A path pointer identifying the root directory for
this corpus. If a string is specified, then it will be
converted to a ``PathPointer`` automatically.
:param fileids: A list of the files that make up this corpus.
This list can either be specified explicitly, as a list of
strings; or implicitly, as a regular expression over file
paths. The absolute path for each file will be constructed
by joining the reader's root to each file name.
:param encoding: The default unicode encoding for the files
that make up the corpus. The value of ``encoding`` can be any
of the following:
- A string: ``encoding`` is the encoding name for all files.
- A dictionary: ``encoding[file_id]`` is the encoding
name for the file whose identifier is ``file_id``. If
``file_id`` is not in ``encoding``, then the file
contents will be processed using non-unicode byte strings.
- A list: ``encoding`` should be a list of ``(regexp, encoding)``
tuples. The encoding for a file whose identifier is ``file_id``
will be the ``encoding`` value for the first tuple whose
``regexp`` matches the ``file_id``. If no tuple's ``regexp``
matches the ``file_id``, the file contents will be processed
using non-unicode byte strings.
- None: the file contents of all files will be
processed using non-unicode byte strings.
:param tag_mapping_function: A function for normalizing or
simplifying the POS tags returned by the tagged_words()
or tagged_sents() methods.
"""
# Convert the root to a path pointer, if necessary.
if isinstance(root, basestring):
m = re.match('(.*\.zip)/?(.*)$|', root)
zipfile, zipentry = m.groups()
if zipfile:
root = ZipFilePathPointer(zipfile, zipentry)
else:
root = FileSystemPathPointer(root)
elif not isinstance(root, PathPointer):
raise TypeError('CorpusReader: expected a string or a PathPointer')
# If `fileids` is a regexp, then expand it.
if isinstance(fileids, basestring):
fileids = find_corpus_fileids(root, fileids)
self._fileids = fileids
"""A list of the relative paths for the fileids that make up
this corpus."""
self._root = root
"""The root directory for this corpus."""
# If encoding was specified as a list of regexps, then convert
# it to a dictionary.
if isinstance(encoding, list):
encoding_dict = {}
for fileid in self._fileids:
for x in encoding:
(regexp, enc) = x
if re.match(regexp, fileid):
encoding_dict[fileid] = enc
break
encoding = encoding_dict
self._encoding = encoding
"""The default unicode encoding for the fileids that make up
this corpus. If ``encoding`` is None, then the file
contents are processed using byte strings (str)."""
self._tag_mapping_function = tag_mapping_function
def __repr__(self):
if isinstance(self._root, ZipFilePathPointer):
path = '%s/%s' % (self._root.zipfile.filename, self._root.entry)
else:
path = '%s' % self._root.path
return '<%s in %r>' % (self.__class__.__name__, path)
def readme(self):
"""
Return the contents of the corpus README file, if it exists.
"""
return self.open("README").read()
def fileids(self):
"""
Return a list of file identifiers for the fileids that make up
this corpus.
"""
return self._fileids
def abspath(self, fileid):
"""
Return the absolute path for the given file.
:type file: str
:param file: The file identifier for the file whose path
should be returned.
:rtype: PathPointer
"""
return self._root.join(fileid)
def abspaths(self, fileids=None, include_encoding=False,
include_fileid=False):
"""
Return a list of the absolute paths for all fileids in this corpus;
or for the given list of fileids, if specified.
:type fileids: None or str or list
:param fileids: Specifies the set of fileids for which paths should
be returned. Can be None, for all fileids; a list of
file identifiers, for a specified set of fileids; or a single
file identifier, for a single file. Note that the return
value is always a list of paths, even if ``fileids`` is a
single file identifier.
:param include_encoding: If true, then return a list of
``(path_pointer, encoding)`` tuples.
:rtype: list(PathPointer)
"""
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, basestring):
fileids = [fileids]
paths = [self._root.join(f) for f in fileids]
if include_encoding and include_fileid:
return zip(paths, [self.encoding(f) for f in fileids], fileids)
elif include_fileid:
return zip(paths, fileids)
elif include_encoding:
return zip(paths, [self.encoding(f) for f in fileids])
else:
return paths
def open(self, file, sourced=False):
"""
Return an open stream that can be used to read the given file.
If the file's encoding is not None, then the stream will
automatically decode the file's contents into unicode.
:param file: The file identifier of the file to read.
"""
encoding = self.encoding(file)
stream = self._root.join(file).open(encoding)
if sourced:
stream = SourcedStringStream(stream, file)
return stream
def encoding(self, file):
"""
Return the unicode encoding for the given corpus file, if known.
If the encoding is unknown, or if the given file should be
processed using byte strings (str), then return None.
"""
if isinstance(self._encoding, dict):
return self._encoding.get(file)
else:
return self._encoding
def _get_root(self): return self._root
root = property(_get_root, doc="""
The directory where this corpus is stored.
:type: PathPointer""")
######################################################################
#{ Corpora containing categorized items
######################################################################
class CategorizedCorpusReader(object):
"""
A mixin class used to aid in the implementation of corpus readers
for categorized corpora. This class defines the method
``categories()``, which returns a list of the categories for the
corpus or for a specified set of fileids; and overrides ``fileids()``
to take a ``categories`` argument, restricting the set of fileids to
be returned.
Subclasses are expected to:
- Call ``__init__()`` to set up the mapping.
- Override all view methods to accept a ``categories`` parameter,
which can be used *instead* of the ``fileids`` parameter, to
select which fileids should be included in the returned view.
"""
def __init__(self, kwargs):
"""
Initialize this mapping based on keyword arguments, as
follows:
- cat_pattern: A regular expression pattern used to find the
category for each file identifier. The pattern will be
applied to each file identifier, and the first matching
group will be used as the category label for that file.
- cat_map: A dictionary, mapping from file identifiers to
category labels.
- cat_file: The name of a file that contains the mapping
from file identifiers to categories. The argument
``cat_delimiter`` can be used to specify a delimiter.
The corresponding argument will be deleted from ``kwargs``. If
more than one argument is specified, an exception will be
raised.
"""
self._f2c = None #: file-to-category mapping
self._c2f = None #: category-to-file mapping
self._pattern = None #: regexp specifying the mapping
self._map = None #: dict specifying the mapping
self._file = None #: fileid of file containing the mapping
self._delimiter = None #: delimiter for ``self._file``
if 'cat_pattern' in kwargs:
self._pattern = kwargs['cat_pattern']
del kwargs['cat_pattern']
elif 'cat_map' in kwargs:
self._map = kwargs['cat_map']
del kwargs['cat_map']
elif 'cat_file' in kwargs:
self._file = kwargs['cat_file']
del kwargs['cat_file']
if 'cat_delimiter' in kwargs:
self._delimiter = kwargs['cat_delimiter']
del kwargs['cat_delimiter']
else:
raise ValueError('Expected keyword argument cat_pattern or '
'cat_map or cat_file.')
if ('cat_pattern' in kwargs or 'cat_map' in kwargs or
'cat_file' in kwargs):
raise ValueError('Specify exactly one of: cat_pattern, '
'cat_map, cat_file.')
def _init(self):
self._f2c = defaultdict(set)
self._c2f = defaultdict(set)
if self._pattern is not None:
for file_id in self._fileids:
category = re.match(self._pattern, file_id).group(1)
self._add(file_id, category)
elif self._map is not None:
for (file_id, categories) in self._map.items():
for category in categories:
self._add(file_id, category)
elif self._file is not None:
for line in self.open(self._file).readlines():
line = line.strip()
file_id, categories = line.split(self._delimiter, 1)
if file_id not in self.fileids():
raise ValueError('In category mapping file %s: %s '
'not found' % (self._file, file_id))
for category in categories.split(self._delimiter):
self._add(file_id, category)
def _add(self, file_id, category):
self._f2c[file_id].add(category)
self._c2f[category].add(file_id)
def categories(self, fileids=None):
"""
Return a list of the categories that are defined for this corpus,
or for the file(s) if it is given.
"""
if self._f2c is None:
self._init()
if fileids is None:
return sorted(self._c2f)
if isinstance(fileids, basestring):
fileids = [fileids]
return sorted(set.union(*[self._f2c[d] for d in fileids]))
def fileids(self, categories=None):
"""
Return a list of file identifiers for the files that make up
this corpus, or that make up the given category(s) if specified.
"""
if categories is None:
return super(CategorizedCorpusReader, self).fileids()
elif isinstance(categories, basestring):
if self._f2c is None:
self._init()
if categories in self._c2f:
return sorted(self._c2f[categories])
else:
raise ValueError('Category %s not found' % categories)
else:
if self._f2c is None:
self._init()
return sorted(set.union(*[self._c2f[c] for c in categories]))
######################################################################
#{ Treebank readers
######################################################################
#[xx] is it worth it to factor this out?
class SyntaxCorpusReader(CorpusReader):
"""
An abstract base class for reading corpora consisting of
syntactically parsed text. Subclasses should define:
- ``__init__``, which specifies the location of the corpus
and a method for detecting the sentence blocks in corpus files.
- ``_read_block``, which reads a block from the input stream.
- ``_word``, which takes a block and returns a list of list of words.
- ``_tag``, which takes a block and returns a list of list of tagged
words.
- ``_parse``, which takes a block and returns a list of parsed
sentences.
"""
def _parse(self, s):
raise NotImplementedError()
def _word(self, s):
raise NotImplementedError()
def _tag(self, s):
raise NotImplementedError()
def _read_block(self, stream):
raise NotImplementedError()
def raw(self, fileids=None):
if fileids is None: fileids = self._fileids
elif isinstance(fileids, basestring): fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def parsed_sents(self, fileids=None):
reader = self._read_parsed_sent_block
return concat([StreamBackedCorpusView(fileid, reader, encoding=enc)
for fileid, enc in self.abspaths(fileids, True)])
def tagged_sents(self, fileids=None, simplify_tags=False):
def reader(stream):
return self._read_tagged_sent_block(stream, simplify_tags)
return concat([StreamBackedCorpusView(fileid, reader, encoding=enc)
for fileid, enc in self.abspaths(fileids, True)])
def sents(self, fileids=None):
reader = self._read_sent_block
return concat([StreamBackedCorpusView(fileid, reader, encoding=enc)
for fileid, enc in self.abspaths(fileids, True)])
def tagged_words(self, fileids=None, simplify_tags=False):
def reader(stream):
return self._read_tagged_word_block(stream, simplify_tags)
return concat([StreamBackedCorpusView(fileid, reader, encoding=enc)
for fileid, enc in self.abspaths(fileids, True)])
def words(self, fileids=None):
return concat([StreamBackedCorpusView(fileid,
self._read_word_block,
encoding=enc)
for fileid, enc in self.abspaths(fileids, True)])
#------------------------------------------------------------
#{ Block Readers
def _read_word_block(self, stream):
return sum(self._read_sent_block(stream), [])
def _read_tagged_word_block(self, stream, simplify_tags=False):
return sum(self._read_tagged_sent_block(stream, simplify_tags), [])
def _read_sent_block(self, stream):
return filter(None, [self._word(t) for t in self._read_block(stream)])
def _read_tagged_sent_block(self, stream, simplify_tags=False):
return filter(None, [self._tag(t, simplify_tags)
for t in self._read_block(stream)])
def _read_parsed_sent_block(self, stream):
return filter(None, [self._parse(t) for t in self._read_block(stream)])
#} End of Block Readers
#------------------------------------------------------------
| agpl-3.0 |
willprice/weboob | modules/creditmutuel/module.py | 7 | 3016 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Julien Veyssier
# Copyright(C) 2012-2013 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from decimal import Decimal
import string
from weboob.capabilities.bank import CapBank, AccountNotFound, Recipient, Account
from weboob.tools.backend import Module, BackendConfig
from weboob.tools.value import ValueBackendPassword
from .browser import CreditMutuelBrowser
__all__ = ['CreditMutuelModule']
class CreditMutuelModule(Module, CapBank):
NAME = 'creditmutuel'
MAINTAINER = u'Julien Veyssier'
EMAIL = 'julien.veyssier@aiur.fr'
VERSION = '1.1'
DESCRIPTION = u'Crédit Mutuel'
LICENSE = 'AGPLv3+'
CONFIG = BackendConfig(ValueBackendPassword('login', label='Identifiant', regexp='^\d{1,13}\w$', masked=False),
ValueBackendPassword('password', label='Mot de passe'))
BROWSER = CreditMutuelBrowser
def create_default_browser(self):
return self.create_browser(self.config['login'].get(), self.config['password'].get())
def iter_accounts(self):
for account in self.browser.get_accounts_list():
yield account
def get_account(self, _id):
account = self.browser.get_account(_id)
if account:
return account
else:
raise AccountNotFound()
def iter_coming(self, account):
for tr in self.browser.get_history(account):
if tr._is_coming:
yield tr
def iter_history(self, account):
for tr in self.browser.get_history(account):
if not tr._is_coming:
yield tr
def iter_transfer_recipients(self, ignored):
for account in self.browser.get_accounts_list():
recipient = Recipient()
recipient.id = account.id
recipient.label = account.label
yield recipient
def transfer(self, account, to, amount, reason=None):
if isinstance(account, Account):
account = account.id
account = str(account).strip(string.letters)
to = str(to).strip(string.letters)
try:
assert account.isdigit()
assert to.isdigit()
amount = Decimal(amount)
except (AssertionError, ValueError):
raise AccountNotFound()
return self.browser.transfer(account, to, amount, reason)
| agpl-3.0 |
endorphinl/horizon | horizon/test/helpers.py | 17 | 13705 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import django
import logging
import os
import socket
import time
from django.contrib.auth.middleware import AuthenticationMiddleware # noqa
from django.contrib.auth.models import Permission # noqa
from django.contrib.auth.models import User # noqa
from django.contrib.contenttypes.models import ContentType # noqa
from django.contrib.messages.storage import default_storage # noqa
from django.contrib.sessions.backends.base import SessionBase # noqa
from django.core.handlers import wsgi
from django import http
from django import test as django_test
from django.test.client import RequestFactory # noqa
from django.test import testcases
from django.utils.encoding import force_text
from django.utils import unittest
if django.VERSION < (1, 7):
from django.test import LiveServerTestCase # noqa
else:
from django.contrib.staticfiles.testing \
import StaticLiveServerTestCase as LiveServerTestCase # noqa
LOG = logging.getLogger(__name__)
try:
from selenium.webdriver.support import ui as selenium_ui
import xvfbwrapper # Only needed when running the Selenium tests headless
from horizon.test.webdriver import WebDriver # noqa
except ImportError as e:
# NOTE(saschpe): Several distribution can't ship selenium due to its
# non-free license. So they have to patch it out of test-requirements.txt
# Avoid import failure and force not running selenium tests.
LOG.warning("{0}, force WITH_SELENIUM=False".format(str(e)))
os.environ['WITH_SELENIUM'] = ''
from mox3 import mox
from horizon import middleware
# Makes output of failing mox tests much easier to read.
wsgi.WSGIRequest.__repr__ = lambda self: "<class 'django.http.HttpRequest'>"
class SessionStore(SessionBase):
"""Dict like object for simulating sessions in unittests."""
def load(self):
self.create()
return {}
def create(self):
self.modified = True
def save(self, must_create=False):
self._session_key = self._get_session_key()
self.modified = True
def exists(self, session_key=None):
return False
def delete(self, session_key=None):
self._session_key = ''
self._session_cache = {}
self.modified = True
def cycle_key(self):
self.save()
@classmethod
def clear_expired(cls):
pass
class RequestFactoryWithMessages(RequestFactory):
def get(self, *args, **kwargs):
req = super(RequestFactoryWithMessages, self).get(*args, **kwargs)
req.user = User()
req.session = SessionStore()
req._messages = default_storage(req)
return req
def post(self, *args, **kwargs):
req = super(RequestFactoryWithMessages, self).post(*args, **kwargs)
req.user = User()
req.session = SessionStore()
req._messages = default_storage(req)
return req
@unittest.skipIf(os.environ.get('SKIP_UNITTESTS', False),
"The SKIP_UNITTESTS env variable is set.")
class TestCase(django_test.TestCase):
"""Specialized base test case class for Horizon which gives access to
numerous additional features:
* The ``mox`` mocking framework via ``self.mox``.
* A ``RequestFactory`` class which supports Django's ``contrib.messages``
framework via ``self.factory``.
* A ready-to-go request object via ``self.request``.
"""
def setUp(self):
super(TestCase, self).setUp()
self.mox = mox.Mox()
self._setup_test_data()
self._setup_factory()
self._setup_user()
self._setup_request()
middleware.HorizonMiddleware().process_request(self.request)
AuthenticationMiddleware().process_request(self.request)
os.environ["HORIZON_TEST_RUN"] = "True"
def _setup_test_data(self):
pass
def _setup_factory(self):
self.factory = RequestFactoryWithMessages()
def _setup_user(self):
self.user = User.objects.create_user(username='test', password='test')
self.assertTrue(self.client.login(username="test", password="test"))
def _setup_request(self):
self.request = http.HttpRequest()
self.request.session = self.client._session()
def tearDown(self):
super(TestCase, self).tearDown()
self.mox.UnsetStubs()
self.mox.VerifyAll()
del os.environ["HORIZON_TEST_RUN"]
def set_permissions(self, permissions=None):
perm_ids = Permission.objects.values_list('id', flat=True)
self.user.user_permissions.remove(*perm_ids)
for name in permissions:
ct, create = ContentType.objects.get_or_create(model=name,
app_label='horizon')
perm, create = Permission.objects.get_or_create(codename=name,
content_type=ct,
name=name)
self.user.user_permissions.add(perm)
if hasattr(self.user, "_perm_cache"):
del self.user._perm_cache
def assertNoMessages(self, response=None):
"""Asserts that no messages have been attached by the
``contrib.messages`` framework.
"""
self.assertMessageCount(response, success=0, warn=0, info=0, error=0)
def assertMessageCount(self, response=None, **kwargs):
"""Asserts that the specified number of messages have been attached
for various message types. Usage would look like
``self.assertMessageCount(success=1)``.
"""
temp_req = self.client.request(**{'wsgi.input': None})
temp_req.COOKIES = self.client.cookies
storage = default_storage(temp_req)
messages = []
if response is None:
# To gain early access to the messages we have to decode the
# cookie on the test client.
if 'messages' in self.client.cookies:
message_cookie = self.client.cookies['messages'].value
messages = storage._decode(message_cookie)
# Check for messages in the context
elif hasattr(response, "context") and "messages" in response.context:
messages = response.context["messages"]
# Check for messages attached to the request on a TemplateResponse
elif hasattr(response, "_request") and hasattr(response._request,
"_messages"):
messages = response._request._messages._queued_messages
# If we don't have messages and we don't expect messages, we're done.
if not any(kwargs.values()) and not messages:
return
# If we expected messages and have none, that's a problem.
if any(kwargs.values()) and not messages:
error_msg = "Messages were expected, but none were set."
assert 0 == sum(kwargs.values()), error_msg
# Otherwise, make sure we got the expected messages.
for msg_type, count in kwargs.items():
msgs = [force_text(m.message)
for m in messages if msg_type in m.tags]
assert len(msgs) == count, \
"%s messages not as expected: %s" % (msg_type.title(),
", ".join(msgs))
def assertNotContains(self, response, text, status_code=200,
msg_prefix='', html=False):
# Prior to Django 1.7 assertContains and assertNotContains behaved
# differently regarding response's 'streaming' flag
if django.VERSION < (1, 7):
return self._assertNotContains(response, text, status_code,
msg_prefix, html)
else:
return super(TestCase, self).assertNotContains(
response, text, status_code, msg_prefix, html)
def _assertNotContains(self, response, text, status_code=200,
msg_prefix='', html=False):
"""Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
This is an override of django_test.TestCase.assertNotContains method,
which is able to work with StreamingHttpResponse. Should be called
for Django versions prior to 1.7.
"""
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if (hasattr(response, 'render') and callable(response.render) and
not response.is_rendered):
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
if getattr(response, 'streaming', False):
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = force_text(text, encoding=response._charset)
content = content.decode(response._charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = testcases.assert_and_parse_html(
self, content, None, 'Response\'s content is not valid HTML:')
text = testcases.assert_and_parse_html(
self, text, None, 'Second argument is not valid HTML:')
self.assertEqual(
content.count(text), 0,
msg_prefix + "Response should not contain %s" % text_repr)
@unittest.skipUnless(os.environ.get('WITH_SELENIUM', False),
"The WITH_SELENIUM env variable is not set.")
class SeleniumTestCase(LiveServerTestCase):
@classmethod
def setUpClass(cls):
socket.setdefaulttimeout(60)
if os.environ.get('WITH_SELENIUM', False):
time.sleep(1)
# Start a virtual display server for running the tests headless.
if os.environ.get('SELENIUM_HEADLESS', False):
cls.vdisplay = xvfbwrapper.Xvfb(width=1280, height=720)
cls.vdisplay.start()
cls.selenium = WebDriver()
super(SeleniumTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
if os.environ.get('WITH_SELENIUM', False):
cls.selenium.quit()
time.sleep(1)
if hasattr(cls, 'vdisplay'):
cls.vdisplay.stop()
super(SeleniumTestCase, cls).tearDownClass()
def setUp(self):
socket.setdefaulttimeout(60)
self.selenium.implicitly_wait(30)
self.ui = selenium_ui
super(SeleniumTestCase, self).setUp()
class JasmineTests(SeleniumTestCase):
"""Helper class which allows you to create a simple Jasmine test running
through Selenium
To run a jasmine test suite create a class which extends JasmineTests in
the :file:`horizon/test/jasmine/jasmine_tests.py` and define two class
attributes
.. attribute:: sources
A list of JS source files (the {{ STATIC_URL }} will be added
automatically, these are the source files tested
.. attribute:: specs
A list of Jasmine JS spec files (the {{ STATIC_URL }} will be added
automatically
.. attribute:: template_name
A template which will contain the html needed by the test,
this attribute is optional, if it is not specified the default template
will be used. The template, if specified, must extends
:file:`horizon/jasmine/jasmine.html` and insert the html in a block
which name must be content
"""
sources = []
specs = []
template_name = None
def run_jasmine(self):
self.selenium.get(
"%s%s%s" % (self.live_server_url,
"/jasmine/",
self.__class__.__name__))
wait = self.ui.WebDriverWait(self.selenium, 120)
def jasmine_done(driver):
text = driver.find_element_by_class_name("duration").text
return "finished" in text
wait.until(jasmine_done)
failures = \
self.selenium.find_elements_by_css_selector(".spec-detail.failed")
results = []
for failure in failures:
results.append(
failure.find_element_by_class_name("description").text)
results.append(
failure.find_element_by_class_name("stack-trace").text)
self.assertEqual(results, [], '\n\n' + '\n\n'.join(results) + '\n\n')
def test(self):
if self.__class__ == JasmineTests:
return
self.run_jasmine()
| apache-2.0 |
DiUS/build-lights | light-controller/lib/daemonize.py | 2 | 8525 | # From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731
"""Disk And Execution MONitor (Daemon)
Configurable daemon behaviors:
1.) The current working directory set to the "/" directory.
2.) The current file creation mode mask set to 0.
3.) Close all open files (1024).
4.) Redirect standard I/O streams to "/dev/null".
A failed call to fork() now raises an exception.
References:
1) Advanced Programming in the Unix Environment: W. Richard Stevens
2) Unix Programming Frequently Asked Questions:
http://www.erlenstar.demon.co.uk/unix/faq_toc.html
"""
__author__ = "Chad J. Schroeder"
__copyright__ = "Copyright (C) 2005 Chad J. Schroeder"
__revision__ = "$Id$"
__version__ = "0.2"
# Standard Python modules.
import os # Miscellaneous OS interfaces.
import sys # System-specific parameters and functions.
# Default daemon parameters.
# File mode creation mask of the daemon.
UMASK = 0
# Default working directory for the daemon.
WORKDIR = "/"
# Default maximum for the number of available file descriptors.
MAXFD = 1024
# The standard I/O file descriptors are redirected to /dev/null by default.
if (hasattr(os, "devnull")):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
def createDaemon():
"""Detach a process from the controlling terminal and run it in the
background as a daemon.
"""
try:
# Fork a child process so the parent can exit. This returns control to
# the command-line or shell. It also guarantees that the child will not
# be a process group leader, since the child receives a new process ID
# and inherits the parent's process group ID. This step is required
# to insure that the next call to os.setsid is successful.
pid = os.fork()
except OSError, e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if (pid == 0): # The first child.
# To become the session leader of this new session and the process group
# leader of the new process group, we call os.setsid(). The process is
# also guaranteed not to have a controlling terminal.
os.setsid()
# Is ignoring SIGHUP necessary?
#
# It's often suggested that the SIGHUP signal should be ignored before
# the second fork to avoid premature termination of the process. The
# reason is that when the first child terminates, all processes, e.g.
# the second child, in the orphaned group will be sent a SIGHUP.
#
# "However, as part of the session management system, there are exactly
# two cases where SIGHUP is sent on the death of a process:
#
# 1) When the process that dies is the session leader of a session that
# is attached to a terminal device, SIGHUP is sent to all processes
# in the foreground process group of that terminal device.
# 2) When the death of a process causes a process group to become
# orphaned, and one or more processes in the orphaned group are
# stopped, then SIGHUP and SIGCONT are sent to all members of the
# orphaned group." [2]
#
# The first case can be ignored since the child is guaranteed not to have
# a controlling terminal. The second case isn't so easy to dismiss.
# The process group is orphaned when the first child terminates and
# POSIX.1 requires that every STOPPED process in an orphaned process
# group be sent a SIGHUP signal followed by a SIGCONT signal. Since the
# second child is not STOPPED though, we can safely forego ignoring the
# SIGHUP signal. In any case, there are no ill-effects if it is ignored.
#
# import signal # Set handlers for asynchronous events.
# signal.signal(signal.SIGHUP, signal.SIG_IGN)
try:
# Fork a second child and exit immediately to prevent zombies. This
# causes the second child process to be orphaned, making the init
# process responsible for its cleanup. And, since the first child is
# a session leader without a controlling terminal, it's possible for
# it to acquire one by opening a terminal in the future (System V-
# based systems). This second fork guarantees that the child is no
# longer a session leader, preventing the daemon from ever acquiring
# a controlling terminal.
pid = os.fork() # Fork a second child.
except OSError, e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if (pid == 0): # The second child.
# Since the current working directory may be a mounted filesystem, we
# avoid the issue of not being able to unmount the filesystem at
# shutdown time by changing it to the root directory.
os.chdir(WORKDIR)
# We probably don't want the file mode creation mask inherited from
# the parent, so we give the child complete control over permissions.
os.umask(UMASK)
else:
# exit() or _exit()? See below.
os._exit(0) # Exit parent (the first child) of the second child.
else:
# exit() or _exit()?
# _exit is like exit(), but it doesn't call any functions registered
# with atexit (and on_exit) or any registered signal handlers. It also
# closes any open file descriptors. Using exit() may cause all stdio
# streams to be flushed twice and any temporary files may be unexpectedly
# removed. It's therefore recommended that child branches of a fork()
# and the parent branch(es) of a daemon use _exit().
os._exit(0) # Exit parent of the first child.
# Close all open file descriptors. This prevents the child from keeping
# open any file descriptors inherited from the parent. There is a variety
# of methods to accomplish this task. Three are listed below.
#
# Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
# number of open file descriptors to close. If it doesn't exists, use
# the default value (configurable).
#
# try:
# maxfd = os.sysconf("SC_OPEN_MAX")
# except (AttributeError, ValueError):
# maxfd = MAXFD
#
# OR
#
# if (os.sysconf_names.has_key("SC_OPEN_MAX")):
# maxfd = os.sysconf("SC_OPEN_MAX")
# else:
# maxfd = MAXFD
#
# OR
#
# Use the getrlimit method to retrieve the maximum file descriptor number
# that can be opened by this process. If there is not limit on the
# resource, use the default value.
#
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
# Redirect the standard I/O file descriptors to the specified file. Since
# the daemon has no controlling terminal, most daemons redirect stdin,
# stdout, and stderr to /dev/null. This is done to prevent side-effects
# from reads and writes to the standard I/O file descriptors.
# This call to open is guaranteed to return the lowest file descriptor,
# which will be 0 (stdin), since it was closed above.
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
# Duplicate standard input to standard output and standard error.
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
return(0)
if __name__ == "__main__":
retCode = createDaemon()
# The code, as is, will create a new file in the root directory, when
# executed with superuser privileges. The file will contain the following
# daemon related process parameters: return code, process ID, parent
# process group ID, session ID, user ID, effective user ID, real group ID,
# and the effective group ID. Notice the relationship between the daemon's
# process ID, process group ID, and its parent's process ID.
procParams = """
return code = %s
process ID = %s
parent process ID = %s
process group ID = %s
session ID = %s
user ID = %s
effective user ID = %s
real group ID = %s
effective group ID = %s
""" % (retCode, os.getpid(), os.getppid(), os.getpgrp(), os.getsid(0),
os.getuid(), os.geteuid(), os.getgid(), os.getegid())
open("createDaemon.log", "w").write(procParams + "\n")
sys.exit(retCode)
| gpl-3.0 |
timfel/squeakvm | processors/ARM/gdb-8.3.1/gdb/copyright.py | 7 | 11517 | #! /usr/bin/env python
# Copyright (C) 2011-2019 Free Software Foundation, Inc.
#
# This file is part of GDB.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""copyright.py
This script updates the list of years in the copyright notices in
most files maintained by the GDB project.
Usage: cd src/gdb && python copyright.py
Always review the output of this script before committing it!
A useful command to review the output is:
% filterdiff -x \*.c -x \*.cc -x \*.h -x \*.exp updates.diff
This removes the bulk of the changes which are most likely to be correct.
"""
import datetime
import os
import os.path
import subprocess
def get_update_list():
"""Return the list of files to update.
Assumes that the current working directory when called is the root
of the GDB source tree (NOT the gdb/ subdirectory!). The names of
the files are relative to that root directory.
"""
result = []
for gdb_dir in ('gdb', 'sim', 'include/gdb'):
for root, dirs, files in os.walk(gdb_dir, topdown=True):
for dirname in dirs:
reldirname = "%s/%s" % (root, dirname)
if (dirname in EXCLUDE_ALL_LIST
or reldirname in EXCLUDE_LIST
or reldirname in NOT_FSF_LIST
or reldirname in BY_HAND):
# Prune this directory from our search list.
dirs.remove(dirname)
for filename in files:
relpath = "%s/%s" % (root, filename)
if (filename in EXCLUDE_ALL_LIST
or relpath in EXCLUDE_LIST
or relpath in NOT_FSF_LIST
or relpath in BY_HAND):
# Ignore this file.
pass
else:
result.append(relpath)
return result
def update_files(update_list):
"""Update the copyright header of the files in the given list.
We use gnulib's update-copyright script for that.
"""
# We want to use year intervals in the copyright notices, and
# all years should be collapsed to one single year interval,
# even if there are "holes" in the list of years found in the
# original copyright notice (OK'ed by the FSF, case [gnu.org #719834]).
os.environ['UPDATE_COPYRIGHT_USE_INTERVALS'] = '2'
# Perform the update, and save the output in a string.
update_cmd = ['bash', 'gdb/gnulib/import/extra/update-copyright']
update_cmd += update_list
p = subprocess.Popen(update_cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
update_out = p.communicate()[0]
# Process the output. Typically, a lot of files do not have
# a copyright notice :-(. The update-copyright script prints
# a well defined warning when it did not find the copyright notice.
# For each of those, do a sanity check and see if they may in fact
# have one. For the files that are found not to have one, we filter
# the line out from the output, since there is nothing more to do,
# short of looking at each file and seeing which notice is appropriate.
# Too much work! (~4,000 files listed as of 2012-01-03).
update_out = update_out.splitlines()
warning_string = ': warning: copyright statement not found'
warning_len = len(warning_string)
for line in update_out:
if line.endswith('\n'):
line = line[:-1]
if line.endswith(warning_string):
filename = line[:-warning_len]
if may_have_copyright_notice(filename):
print line
else:
# Unrecognized file format. !?!
print "*** " + line
def may_have_copyright_notice(filename):
"""Check that the given file does not seem to have a copyright notice.
The filename is relative to the root directory.
This function assumes that the current working directory is that root
directory.
The algorigthm is fairly crude, meaning that it might return
some false positives. I do not think it will return any false
negatives... We might improve this function to handle more
complex cases later...
"""
# For now, it may have a copyright notice if we find the word
# "Copyright" at the (reasonable) start of the given file, say
# 50 lines...
MAX_LINES = 50
fd = open(filename)
lineno = 1
for line in fd:
if 'Copyright' in line:
return True
lineno += 1
if lineno > 50:
return False
return False
def main ():
"""The main subprogram."""
if not os.path.isfile("gnulib/import/extra/update-copyright"):
print "Error: This script must be called from the gdb directory."
root_dir = os.path.dirname(os.getcwd())
os.chdir(root_dir)
update_list = get_update_list()
update_files (update_list)
# Remind the user that some files need to be updated by HAND...
if MULTIPLE_COPYRIGHT_HEADERS:
print
print("\033[31m"
"REMINDER: Multiple copyright headers must be updated by hand:"
"\033[0m")
for filename in MULTIPLE_COPYRIGHT_HEADERS:
print " ", filename
if BY_HAND:
print
print "\033[31mREMINDER: The following files must be updated by hand." \
"\033[0m"
for filename in BY_HAND:
print " ", filename
############################################################################
#
# Some constants, placed at the end because they take up a lot of room.
# The actual value of these constants is not significant to the understanding
# of the script.
#
############################################################################
# Files which should not be modified, either because they are
# generated, non-FSF, or otherwise special (e.g. license text,
# or test cases which must be sensitive to line numbering).
#
# Filenames are relative to the root directory.
EXCLUDE_LIST = (
'gdb/nat/glibc_thread_db.h',
'gdb/CONTRIBUTE',
'gdb/gnulib/import'
)
# Files which should not be modified, either because they are
# generated, non-FSF, or otherwise special (e.g. license text,
# or test cases which must be sensitive to line numbering).
#
# Matches any file or directory name anywhere. Use with caution.
# This is mostly for files that can be found in multiple directories.
# Eg: We want all files named COPYING to be left untouched.
EXCLUDE_ALL_LIST = (
"COPYING", "COPYING.LIB", "CVS", "configure", "copying.c",
"fdl.texi", "gpl.texi", "aclocal.m4",
)
# The list of files to update by hand.
BY_HAND = (
# Nothing at the moment :-).
)
# Files containing multiple copyright headers. This script is only
# fixing the first one it finds, so we need to finish the update
# by hand.
MULTIPLE_COPYRIGHT_HEADERS = (
"gdb/doc/gdb.texinfo",
"gdb/doc/refcard.tex",
"gdb/gdbarch.sh",
)
# The list of file which have a copyright, but not head by the FSF.
# Filenames are relative to the root directory.
NOT_FSF_LIST = (
"gdb/exc_request.defs",
"gdb/gdbtk",
"gdb/testsuite/gdb.gdbtk/",
"sim/arm/armemu.h", "sim/arm/armos.c", "sim/arm/gdbhost.c",
"sim/arm/dbg_hif.h", "sim/arm/dbg_conf.h", "sim/arm/communicate.h",
"sim/arm/armos.h", "sim/arm/armcopro.c", "sim/arm/armemu.c",
"sim/arm/kid.c", "sim/arm/thumbemu.c", "sim/arm/armdefs.h",
"sim/arm/armopts.h", "sim/arm/dbg_cp.h", "sim/arm/dbg_rdi.h",
"sim/arm/parent.c", "sim/arm/armsupp.c", "sim/arm/armrdi.c",
"sim/arm/bag.c", "sim/arm/armvirt.c", "sim/arm/main.c", "sim/arm/bag.h",
"sim/arm/communicate.c", "sim/arm/gdbhost.h", "sim/arm/armfpe.h",
"sim/arm/arminit.c",
"sim/common/cgen-fpu.c", "sim/common/cgen-fpu.h",
"sim/common/cgen-accfp.c",
"sim/mips/m16run.c", "sim/mips/sim-main.c",
"sim/moxie/moxie-gdb.dts",
# Not a single file in sim/ppc/ appears to be copyright FSF :-(.
"sim/ppc/filter.h", "sim/ppc/gen-support.h", "sim/ppc/ld-insn.h",
"sim/ppc/hw_sem.c", "sim/ppc/hw_disk.c", "sim/ppc/idecode_branch.h",
"sim/ppc/sim-endian.h", "sim/ppc/table.c", "sim/ppc/hw_core.c",
"sim/ppc/gen-support.c", "sim/ppc/gen-semantics.h", "sim/ppc/cpu.h",
"sim/ppc/sim_callbacks.h", "sim/ppc/RUN", "sim/ppc/Makefile.in",
"sim/ppc/emul_chirp.c", "sim/ppc/hw_nvram.c", "sim/ppc/dc-test.01",
"sim/ppc/hw_phb.c", "sim/ppc/hw_eeprom.c", "sim/ppc/bits.h",
"sim/ppc/hw_vm.c", "sim/ppc/cap.h", "sim/ppc/os_emul.h",
"sim/ppc/options.h", "sim/ppc/gen-idecode.c", "sim/ppc/filter.c",
"sim/ppc/corefile-n.h", "sim/ppc/std-config.h", "sim/ppc/ld-decode.h",
"sim/ppc/filter_filename.h", "sim/ppc/hw_shm.c",
"sim/ppc/pk_disklabel.c", "sim/ppc/dc-simple", "sim/ppc/misc.h",
"sim/ppc/device_table.h", "sim/ppc/ld-insn.c", "sim/ppc/inline.c",
"sim/ppc/emul_bugapi.h", "sim/ppc/hw_cpu.h", "sim/ppc/debug.h",
"sim/ppc/hw_ide.c", "sim/ppc/debug.c", "sim/ppc/gen-itable.h",
"sim/ppc/interrupts.c", "sim/ppc/hw_glue.c", "sim/ppc/emul_unix.c",
"sim/ppc/sim_calls.c", "sim/ppc/dc-complex", "sim/ppc/ld-cache.c",
"sim/ppc/registers.h", "sim/ppc/dc-test.02", "sim/ppc/options.c",
"sim/ppc/igen.h", "sim/ppc/registers.c", "sim/ppc/device.h",
"sim/ppc/emul_chirp.h", "sim/ppc/hw_register.c", "sim/ppc/hw_init.c",
"sim/ppc/sim-endian-n.h", "sim/ppc/filter_filename.c",
"sim/ppc/bits.c", "sim/ppc/idecode_fields.h", "sim/ppc/hw_memory.c",
"sim/ppc/misc.c", "sim/ppc/double.c", "sim/ppc/psim.h",
"sim/ppc/hw_trace.c", "sim/ppc/emul_netbsd.h", "sim/ppc/psim.c",
"sim/ppc/ppc-instructions", "sim/ppc/tree.h", "sim/ppc/README",
"sim/ppc/gen-icache.h", "sim/ppc/gen-model.h", "sim/ppc/ld-cache.h",
"sim/ppc/mon.c", "sim/ppc/corefile.h", "sim/ppc/vm.c",
"sim/ppc/INSTALL", "sim/ppc/gen-model.c", "sim/ppc/hw_cpu.c",
"sim/ppc/corefile.c", "sim/ppc/hw_opic.c", "sim/ppc/gen-icache.c",
"sim/ppc/events.h", "sim/ppc/os_emul.c", "sim/ppc/emul_generic.c",
"sim/ppc/main.c", "sim/ppc/hw_com.c", "sim/ppc/gen-semantics.c",
"sim/ppc/emul_bugapi.c", "sim/ppc/device.c", "sim/ppc/emul_generic.h",
"sim/ppc/tree.c", "sim/ppc/mon.h", "sim/ppc/interrupts.h",
"sim/ppc/cap.c", "sim/ppc/cpu.c", "sim/ppc/hw_phb.h",
"sim/ppc/device_table.c", "sim/ppc/lf.c", "sim/ppc/lf.c",
"sim/ppc/dc-stupid", "sim/ppc/hw_pal.c", "sim/ppc/ppc-spr-table",
"sim/ppc/emul_unix.h", "sim/ppc/words.h", "sim/ppc/basics.h",
"sim/ppc/hw_htab.c", "sim/ppc/lf.h", "sim/ppc/ld-decode.c",
"sim/ppc/sim-endian.c", "sim/ppc/gen-itable.c",
"sim/ppc/idecode_expression.h", "sim/ppc/table.h", "sim/ppc/dgen.c",
"sim/ppc/events.c", "sim/ppc/gen-idecode.h", "sim/ppc/emul_netbsd.c",
"sim/ppc/igen.c", "sim/ppc/vm_n.h", "sim/ppc/vm.h",
"sim/ppc/hw_iobus.c", "sim/ppc/inline.h",
"sim/testsuite/sim/bfin/s21.s", "sim/testsuite/sim/mips/mips32-dsp2.s",
)
if __name__ == "__main__":
main()
| mit |
bdestombe/flopy-1 | flopy/utils/modpathfile.py | 2 | 20645 | """
Module to read MODPATH output files. The module contains two
important classes that can be accessed by the user.
* EndpointFile (ascii endpoint file)
* PathlineFile (ascii pathline file)
"""
import numpy as np
from ..utils.flopy_io import loadtxt
class PathlineFile():
"""
PathlineFile Class.
Parameters
----------
filename : string
Name of the pathline file
verbose : bool
Write information to the screen. Default is False.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
The PathlineFile class provides simple ways to retrieve MODPATH 6
pathline data from a MODPATH 6 ascii pathline file.
Examples
--------
>>> import flopy
>>> pthobj = flopy.utils.PathlineFile('model.mppth')
>>> p1 = pthobj.get_data(partid=1)
"""
kijnames = ['k', 'i', 'j', 'particleid', 'particlegroup', 'linesegmentindex']
def __init__(self, filename, verbose=False):
"""
Class constructor.
"""
self.fname = filename
self.dtype, self.outdtype = self._get_dtypes()
self._build_index()
self._data = loadtxt(self.file, dtype=self.dtype, skiprows=self.skiprows)
# set number of particle ids
self.nid = self._data['particleid'].max()
# convert layer, row, and column indices; particle id and group; and
# line segment indices to zero-based
for n in self.kijnames:
self._data[n] -= 1
# close the input file
self.file.close()
return
def _build_index(self):
"""
Set position of the start of the pathline data.
"""
self.skiprows = 0
self.file = open(self.fname, 'r')
while True:
line = self.file.readline()
if isinstance(line, bytes):
line = line.decode()
if self.skiprows < 1:
if 'MODPATH_PATHLINE_FILE 6' not in line.upper():
errmsg = '{} is not a valid pathline file'.format(self.fname)
raise Exception(errmsg)
self.skiprows += 1
if 'end header' in line.lower():
break
self.file.seek(0)
def _get_dtypes(self):
"""
Build numpy dtype for the MODPATH 6 pathline file.
"""
dtype = np.dtype([("particleid", np.int), ("particlegroup", np.int),
("timepointindex", np.int), ("cumulativetimestep", np.int),
("time", np.float32), ("x", np.float32),
("y", np.float32), ("z", np.float32),
("k", np.int), ("i", np.int), ("j", np.int),
("grid", np.int), ("xloc", np.float32),
("yloc", np.float32), ("zloc", np.float32),
("linesegmentindex", np.int)])
outdtype = np.dtype([("x", np.float32), ("y", np.float32), ("z", np.float32),
("time", np.float32), ("k", np.int), ("id", np.int)])
return dtype, outdtype
def get_maxid(self):
"""
Get the maximum pathline number in the file pathline file
Returns
----------
out : int
Maximum pathline number.
"""
return self.maxid
def get_maxtime(self):
"""
Get the maximum time in pathline file
Returns
----------
out : float
Maximum pathline time.
"""
return self.data['time'].max()
def get_data(self, partid=0, totim=None, ge=True):
"""
get pathline data from the pathline file for a single pathline.
Parameters
----------
partid : int
The zero-based particle id. The first record is record 0.
totim : float
The simulation time. All pathline points for particle partid
that are greater than or equal to (ge=True) or less than or
equal to (ge=False) totim will be returned. Default is None
ge : bool
Boolean that determines if pathline times greater than or equal
to or less than or equal to totim is used to create a subset
of pathlines. Default is True.
Returns
----------
ra : numpy record array
A numpy recarray with the x, y, z, time, k, and particleid for
pathline partid.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy.utils.modpathfile as mpf
>>> pthobj = flopy.utils.PathlineFile('model.mppth')
>>> p1 = pthobj.get_data(partid=1)
"""
idx = self._data['particleid'] == partid
if totim is not None:
if ge:
idx = (self._data['time'] >= totim) & (self._data['particleid'] == partid)
else:
idx = (self._data['time'] <= totim) & (self._data['particleid'] == partid)
else:
idx = self._data['particleid'] == partid
self._ta = self._data[idx]
ra = np.rec.fromarrays((self._ta['x'], self._ta['y'], self._ta['z'],
self._ta['time'], self._ta['k'], self._ta['particleid']), dtype=self.outdtype)
return ra
def get_alldata(self, totim=None, ge=True):
"""
get pathline data from the pathline file for all pathlines and all times.
Parameters
----------
totim : float
The simulation time. All pathline points for particle partid
that are greater than or equal to (ge=True) or less than or
equal to (ge=False) totim will be returned. Default is None
ge : bool
Boolean that determines if pathline times greater than or equal
to or less than or equal to totim is used to create a subset
of pathlines. Default is True.
Returns
----------
plist : a list of numpy record array
A list of numpy recarrays with the x, y, z, time, k, and particleid for
all pathlines.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy.utils.modpathfile as mpf
>>> pthobj = flopy.utils.PathlineFile('model.mppth')
>>> p = pthobj.get_alldata()
"""
plist = []
for partid in range(self.nid):
plist.append(self.get_data(partid=partid, totim=totim, ge=ge))
return plist
def get_destination_pathline_data(self, dest_cells):
"""Get pathline data for set of destination cells.
Parameters
----------
dest_cells : list or array of tuples
(k, i, j) of each destination cell (zero-based)
Returns
-------
pthldest : np.recarray
Slice of pathline data array (e.g. PathlineFile._data)
containing only pathlines with final k,i,j in dest_cells.
"""
ra = self._data.view(np.recarray)
# find the intersection of endpoints and dest_cells
# convert dest_cells to same dtype for comparison
raslice = ra[['k', 'i', 'j']]
dest_cells = np.array(dest_cells, dtype=raslice.dtype)
inds = np.in1d(raslice, dest_cells)
epdest = ra[inds].copy().view(np.recarray)
# use particle ids to get the rest of the paths
inds = np.in1d(ra.particleid, epdest.particleid)
pthldes = ra[inds].copy()
pthldes.sort(order=['particleid', 'time'])
return pthldes
def write_shapefile(self, pathline_data=None,
one_per_particle=True,
direction='ending',
shpname='endpoings.shp',
sr=None, epsg=None,
**kwargs):
"""Write pathlines to shapefile.
pathline_data : np.recarry
Record array of same form as that returned by EndpointFile.get_alldata.
(if none, EndpointFile.get_alldata() is exported).
one_per_particle : boolean (default True)
True writes a single LineString with a single set of attribute data for each
particle. False writes a record/geometry for each pathline segment
(each row in the PathLine file). This option can be used to visualize
attribute information (time, model layer, etc.) across a pathline in a GIS.
direction : str
String defining if starting or ending particle locations should be
included in shapefile attribute information. Only used if one_per_particle=False.
(default is 'ending')
shpname : str
File path for shapefile
sr : flopy.utils.reference.SpatialReference instance
Used to scale and rotate Global x,y,z values in MODPATH Endpoint file
epsg : int
EPSG code for writing projection (.prj) file. If this is not supplied,
the proj4 string or epgs code associated with sr will be used.
kwargs : keyword arguments to flopy.export.shapefile_utils.recarray2shp
"""
from ..utils.reference import SpatialReference
from ..utils.geometry import LineString
from ..export.shapefile_utils import recarray2shp
pth = pathline_data
if pth is None:
pth = self._data.view(np.recarray)
pth = pth.copy()
pth.sort(order=['particleid', 'time'])
if sr is None:
sr = SpatialReference()
particles = np.unique(pth.particleid)
geoms = []
# 1 geometry for each path
if one_per_particle:
loc_inds = 0
if direction == 'ending':
loc_inds = -1
pthdata = []
for pid in particles:
ra = pth[pth.particleid == pid]
x, y = sr.transform(ra.x, ra.y)
z = ra.z
geoms.append(LineString(list(zip(x, y, z))))
pthdata.append((pid,
ra.particlegroup[0],
ra.time.max(),
ra.k[loc_inds],
ra.i[loc_inds],
ra.j[loc_inds]))
pthdata = np.array(pthdata, dtype=[('particleid', np.int),
('particlegroup', np.int),
('time', np.float),
('k', np.int),
('i', np.int),
('j', np.int)
]).view(np.recarray)
# geometry for each row in PathLine file
else:
dtype = pth.dtype
#pthdata = np.empty((0, len(dtype)), dtype=dtype).view(np.recarray)
pthdata = []
for pid in particles:
ra = pth[pth.particleid == pid]
x, y = sr.transform(ra.x, ra.y)
z = ra.z
geoms += [LineString([(x[i-1], y[i-1], z[i-1]),
(x[i], y[i], z[i])])
for i in np.arange(1, (len(ra)))]
#pthdata = np.append(pthdata, ra[1:]).view(np.recarray)
pthdata += ra[1:].tolist()
pthdata = np.array(pthdata, dtype=dtype).view(np.recarray)
# convert back to one-based
for n in set(self.kijnames).intersection(set(pthdata.dtype.names)):
pthdata[n] += 1
recarray2shp(pthdata, geoms, shpname=shpname, epsg=sr.epsg, **kwargs)
class EndpointFile():
"""
EndpointFile Class.
Parameters
----------
filename : string
Name of the endpoint file
verbose : bool
Write information to the screen. Default is False.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
The EndpointFile class provides simple ways to retrieve MODPATH 6
endpoint data from a MODPATH 6 ascii endpoint file.
Examples
--------
>>> import flopy
>>> endobj = flopy.utils.EndpointFile('model.mpend')
>>> e1 = endobj.get_data(partid=1)
"""
kijnames = ['k0', 'i0', 'j0', 'k', 'i', 'j', 'particleid', 'particlegroup']
def __init__(self, filename, verbose=False):
"""
Class constructor.
"""
self.fname = filename
self.dtype = self._get_dtypes()
self._build_index()
self._data = loadtxt(self.file, dtype=self.dtype, skiprows=self.skiprows)
# set number of particle ids
self.nid = self._data['particleid'].max()
# convert layer, row, and column indices; particle id and group; and
# line segment indices to zero-based
for n in self.kijnames:
self._data[n] -= 1
# close the input file
self.file.close()
return
def _build_index(self):
"""
Set position of the start of the pathline data.
"""
self.skiprows = 0
self.file = open(self.fname, 'r')
idx = 0
while True:
line = self.file.readline()
if isinstance(line, bytes):
line = line.decode()
if self.skiprows < 1:
if 'MODPATH_ENDPOINT_FILE 6' not in line.upper():
errmsg = '{} is not a valid endpoint file'.format(self.fname)
raise Exception(errmsg)
self.skiprows += 1
if idx == 1:
t = line.strip()
self.direction = 1
if int(t[0]) == 2:
self.direction = -1
if 'end header' in line.lower():
break
self.file.seek(0)
def _get_dtypes(self):
"""
Build numpy dtype for the MODPATH 6 endpoint file.
"""
dtype = np.dtype([("particleid", np.int), ("particlegroup", np.int),
('status', np.int), ('initialtime', np.float32),
('finaltime', np.float32), ('initialgrid', np.int),
('k0', np.int), ('i0', np.int),
('j0', np.int), ('initialcellface', np.int),
('initialzone', np.int), ('xloc0', np.float32),
('yloc0', np.float32), ('zloc0', np.float32),
('x0', np.float32), ('y0', np.float32), ('z0', np.float32),
('finalgrid', np.int), ('k', np.int), ('i', np.int),
('j', np.int), ('finalcellface', np.int),
('finalzone', np.int), ('xloc', np.float32),
('yloc', np.float32), ('zloc', np.float32),
('x', np.float32), ('y', np.float32), ('z', np.float32),
('label', '|S40')])
return dtype
def get_maxid(self):
"""
Get the maximum endpoint particle id in the file endpoint file
Returns
----------
out : int
Maximum endpoint particle id.
"""
return self.maxid
def get_maxtime(self):
"""
Get the maximum time in the endpoint file
Returns
----------
out : float
Maximum endpoint time.
"""
return self.data['finaltime'].max()
def get_maxtraveltime(self):
"""
Get the maximum travel time in the endpoint file
Returns
----------
out : float
Maximum endpoint travel time.
"""
return (self.data['finaltime'] - self.data['initialtime']).max()
def get_data(self, partid=0):
"""
Get endpoint data from the endpoint file for a single particle.
Parameters
----------
partid : int
The zero-based particle id. The first record is record 0.
(default is 0)
Returns
----------
ra : numpy record array
A numpy recarray with the endpoint particle data for
endpoint partid.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> endobj = flopy.utils.EndpointFile('model.mpend')
>>> e1 = endobj.get_data(partid=1)
"""
idx = self._data['particleid'] == partid
ra = self._data[idx]
return ra
def get_alldata(self):
"""
Get endpoint data from the endpoint file for all endpoints.
Parameters
----------
Returns
----------
ra : numpy record array
A numpy recarray with the endpoint particle data
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> endobj = flopy.utils.EndpointFile('model.mpend')
>>> e = endobj.get_alldata()
"""
ra = self._data.view(np.recarray).copy()
# if final:
# ra = np.rec.fromarrays((self._data['x'], self._data['y'], self._data['z'],
# self._data['finaltime'], self._data['k'],
# self._data['particleid']), dtype=self.outdtype)
# else:
# ra = np.rec.fromarrays((self._data['x0'], self._data['y0'], self._data['z0'],
# self._data['initialtime'], self._data['k0'],
# self._data['particleid']), dtype=self.outdtype)
return ra
def get_destination_endpoint_data(self, dest_cells):
"""Get endpoint data for set of destination cells.
Parameters
----------
dest_cells : list or array of tuples
(k, i, j) of each destination cell (zero-based)
Returns
-------
epdest : np.recarray
Slice of endpoint data array (e.g. EndpointFile.get_alldata)
containing only data with final k,i,j in dest_cells.
"""
ra = self.get_alldata()
# find the intersection of endpoints and dest_cells
# convert dest_cells to same dtype for comparison
raslice = ra[['k', 'i', 'j']]
dest_cells = np.array(dest_cells, dtype=raslice.dtype)
inds = np.in1d(raslice, dest_cells)
epdest = ra[inds].copy().view(np.recarray)
return epdest
def write_shapefile(self, endpoint_data=None,
shpname='endpoings.shp',
direction='ending', sr=None, epsg=None,
**kwargs):
"""Write particle starting / ending locations to shapefile.
endpoint_data : np.recarry
Record array of same form as that returned by EndpointFile.get_alldata.
(if none, EndpointFile.get_alldata() is exported).
shpname : str
File path for shapefile
direction : str
String defining if starting or ending particle locations should be
considered. (default is 'ending')
sr : flopy.utils.reference.SpatialReference instance
Used to scale and rotate Global x,y,z values in MODPATH Endpoint file
epsg : int
EPSG code for writing projection (.prj) file. If this is not supplied,
the proj4 string or epgs code associated with sr will be used.
kwargs : keyword arguments to flopy.export.shapefile_utils.recarray2shp
"""
from ..utils.reference import SpatialReference
from ..utils.geometry import Point
from ..export.shapefile_utils import recarray2shp
epd = endpoint_data.copy()
if epd is None:
epd = self.get_alldata()
if direction.lower() == 'ending':
xcol, ycol, zcol = 'x', 'y', 'z'
elif direction.lower() == 'starting':
xcol, ycol, zcol = 'x0', 'y0', 'z0'
else:
errmsg = 'flopy.map.plot_endpoint direction must be "ending" ' + \
'or "starting".'
raise Exception(errmsg)
if sr is None:
sr = SpatialReference()
x, y = sr.transform(epd[xcol], epd[ycol])
z = epd[zcol]
geoms = [Point(x[i], y[i], z[i]) for i in range(len(epd))]
# convert back to one-based
for n in self.kijnames:
epd[n] += 1
recarray2shp(epd, geoms, shpname=shpname, epsg=epsg, **kwargs)
| bsd-3-clause |
initOS/server-tools | base_import_match/models/base_import.py | 9 | 10254 | # -*- coding: utf-8 -*-
# © 2016 Grupo ESOC Ingeniería de Servicios, S.L.U. - Jairo Llopis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api, fields, models
from openerp import SUPERUSER_ID # TODO remove in v10
class BaseImportMatch(models.Model):
_name = "base_import.match"
_description = "Deduplicate settings prior to CSV imports."
_order = "sequence, name"
name = fields.Char(
compute="_compute_name",
store=True,
index=True)
sequence = fields.Integer(index=True)
model_id = fields.Many2one(
"ir.model",
"Model",
required=True,
ondelete="cascade",
domain=[("osv_memory", "=", False)],
help="In this model you will apply the match.")
model_name = fields.Char(
related="model_id.model",
store=True,
index=True)
field_ids = fields.One2many(
comodel_name="base_import.match.field",
inverse_name="match_id",
string="Fields",
required=True,
help="Fields that will define an unique key.")
@api.multi
@api.onchange("model_id")
def _onchange_model_id(self):
self.field_ids.unlink()
@api.model
def create(self, vals):
"""Wrap the model after creation."""
result = super(BaseImportMatch, self).create(vals)
self._load_autopatch(result.model_name)
return result
@api.multi
def unlink(self):
"""Unwrap the model after deletion."""
models = set(self.mapped("model_name"))
result = super(BaseImportMatch, self).unlink()
for model in models:
self._load_autopatch(model)
return result
@api.multi
def write(self, vals):
"""Wrap the model after writing."""
result = super(BaseImportMatch, self).write(vals)
if "model_id" in vals or "model_name" in vals:
for s in self:
self._load_autopatch(s.model_name)
return result
# TODO convert to @api.model_cr in v10
def _register_hook(self, cr):
"""Autopatch on init."""
models = set(
self.browse(
cr,
SUPERUSER_ID,
self.search(cr, SUPERUSER_ID, list()))
.mapped("model_name"))
for model in models:
self._load_autopatch(cr, SUPERUSER_ID, model)
@api.multi
@api.depends("model_id", "field_ids")
def _compute_name(self):
"""Automatic self-descriptive name for the setting records."""
for s in self:
s.name = u"{}: {}".format(
s.model_id.display_name,
" + ".join(
s.field_ids.mapped(
lambda r: (
(u"{} ({})" if r.conditional else u"{}").format(
r.field_id.name,
r.imported_value)))))
@api.model
def _match_find(self, model, converted_row, imported_row):
"""Find a update target for the given row.
This will traverse by order all match rules that can be used with the
imported data, and return a match for the first rule that returns a
single result.
:param openerp.models.Model model:
Model object that is being imported.
:param dict converted_row:
Row converted to Odoo api format, like the 3rd value that
:meth:`openerp.models.Model._convert_records` returns.
:param dict imported_row:
Row as it is being imported, in format::
{
"field_name": "string value",
"other_field": "True",
...
}
:return openerp.models.Model:
Return a dataset with one single match if it was found, or an
empty dataset if none or multiple matches were found.
"""
# Get usable rules to perform matches
usable = self._usable_for_load(model._name, converted_row.keys())
# Traverse usable combinations
for combination in usable:
combination_valid = True
domain = list()
for field in combination.field_ids:
# Check imported value if it is a conditional field
if field.conditional:
# Invalid combinations are skipped
if imported_row[field.name] != field.imported_value:
combination_valid = False
break
domain.append((field.name, "=", converted_row[field.name]))
if not combination_valid:
continue
match = model.search(domain)
# When a single match is found, stop searching
if len(match) == 1:
return match
# Return an empty match if none or multiple was found
return model
@api.model
def _load_wrapper(self):
"""Create a new load patch method."""
@api.model
def wrapper(self, fields, data):
"""Try to identify rows by other pseudo-unique keys.
It searches for rows that have no XMLID specified, and gives them
one if any :attr:`~.field_ids` combination is found. With a valid
XMLID in place, Odoo will understand that it must *update* the
record instead of *creating* a new one.
"""
newdata = list()
# Data conversion to ORM format
import_fields = map(models.fix_import_export_id_paths, fields)
converted_data = self._convert_records(
self._extract_records(import_fields, data))
# Mock Odoo to believe the user is importing the ID field
if "id" not in fields:
fields.append("id")
import_fields.append(["id"])
# Needed to match with converted data field names
clean_fields = [f[0] for f in import_fields]
for dbid, xmlid, record, info in converted_data:
row = dict(zip(clean_fields, data[info["record"]]))
match = self
if xmlid:
# Skip rows with ID, they do not need all this
row["id"] = xmlid
elif dbid:
# Find the xmlid for this dbid
match = self.browse(dbid)
else:
# Store records that match a combination
match = self.env["base_import.match"]._match_find(
self, record, row)
# Give a valid XMLID to this row if a match was found
row["id"] = (match._BaseModel__export_xml_id()
if match else row.get("id", u""))
# Store the modified row, in the same order as fields
newdata.append(tuple(row[f] for f in clean_fields))
# Leave the rest to Odoo itself
del data
return wrapper.origin(self, fields, newdata)
# Flag to avoid confusions with other possible wrappers
wrapper.__base_import_match = True
return wrapper
@api.model
def _load_autopatch(self, model_name):
"""[Un]apply patch automatically."""
self._load_unpatch(model_name)
if self.search([("model_name", "=", model_name)]):
self._load_patch(model_name)
@api.model
def _load_patch(self, model_name):
"""Apply patch for :param:`model_name`'s load method.
:param str model_name:
Model technical name, such as ``res.partner``.
"""
self.env[model_name]._patch_method(
"load", self._load_wrapper())
@api.model
def _load_unpatch(self, model_name):
"""Apply patch for :param:`model_name`'s load method.
:param str model_name:
Model technical name, such as ``res.partner``.
"""
model = self.env[model_name]
# Unapply patch only if there is one
try:
if model.load.__base_import_match:
model._revert_method("load")
except AttributeError:
pass
@api.model
def _usable_for_load(self, model_name, fields):
"""Return a set of elements usable for calling ``load()``.
:param str model_name:
Technical name of the model where you are loading data.
E.g. ``res.partner``.
:param list(str|bool) fields:
List of field names being imported.
"""
result = self
available = self.search([("model_name", "=", model_name)])
# Use only criteria with all required fields to match
for record in available:
if all(f.name in fields for f in record.field_ids):
result += record
return result
class BaseImportMatchField(models.Model):
_name = "base_import.match.field"
_description = "Field import match definition"
name = fields.Char(
related="field_id.name")
field_id = fields.Many2one(
comodel_name="ir.model.fields",
string="Field",
required=True,
ondelete="cascade",
domain="[('model_id', '=', model_id)]",
help="Field that will be part of an unique key.")
match_id = fields.Many2one(
comodel_name="base_import.match",
string="Match",
ondelete="cascade",
required=True)
model_id = fields.Many2one(
related="match_id.model_id")
conditional = fields.Boolean(
help="Enable if you want to use this field only in some conditions.")
imported_value = fields.Char(
help="If the imported value is not this, the whole matching rule will "
"be discarded. Be careful, this data is always treated as a "
"string, and comparison is case-sensitive so if you set 'True', "
"it will NOT match '1' nor 'true', only EXACTLY 'True'.")
@api.multi
@api.onchange("field_id", "match_id", "conditional", "imported_value")
def _onchange_match_id_name(self):
"""Update match name."""
self.mapped("match_id")._compute_name()
| agpl-3.0 |
LipuFei/team-hipchat-bot | bot/hipchat_db.py | 1 | 3336 | import json
import logging
import time
import leveldb
from twisted.internet import reactor
from twisted.web.client import getPage
class HipchatUserDb(object):
def __init__(self, bot, server, token, db_path):
self._logger = logging.getLogger(self.__class__.__name__)
self.bot = bot
self.server = server
self.token = token
self._db = leveldb.LevelDB(db_path)
self._update_interval = 60.0 * 60.0 * 24.0 * 5.0 # every 5 days
self._fetch_interval = 5.0
self._last_time = 0.0
def set(self, name, mention_name):
self._db.Put(name.encode('utf-8'), mention_name.encode('utf-8'))
def get(self, name):
return self._db.Get(name.encode('utf-8'))
def has(self, name):
result = True
try:
self._db.Get(name.encode('utf-8'))
except KeyError:
result = False
return result
def _append_auth_token(self, url):
final_url = url + (u'?auth_token=%s' if url.find(u'?') == -1 else u'&auth_token=%s')
final_url = final_url % self.token
return final_url
def _get_later(self):
current_time = time.time()
later = self._last_time + self._fetch_interval - current_time
if later < 0.0:
later = 0.0
self._last_time = current_time + later
return later
def populate_user_db(self):
self._logger.info(u"starting fetching users...")
final_url = u"https://%(server)s/v2/user" % {u"server": self.server}
final_url = self._append_auth_token(final_url)
later = self._get_later()
reactor.callLater(later, self._get_page, final_url,
self._got_user_list_success, self._got_user_list_failure)
def _get_page(self, url, callback1, callback2):
getPage(url.encode('utf-8')).addCallbacks(callback1, callback2)
def _got_user_list_success(self, data):
result_dict = json.loads(data, encoding='utf-8')
# get user details
for user in result_dict.get(u'items', []):
if u'name' in user and u'mention_name' in user:
link = user.get(u'links', {}).get(u'self')
if link is not None:
# get full info
final_url = self._append_auth_token(link)
later = self._get_later()
reactor.callLater(later, self._get_page, final_url,
self._got_user_success, self._got_user_failure)
# get next page
next_link = result_dict.get(u'links', {}).get(u'next')
if next_link is not None:
final_url = self._append_auth_token(next_link)
later = self._get_later()
reactor.callLater(later, self._get_page, final_url,
self._got_user_list_success, self._got_user_list_failure)
def _got_user_list_failure(self, result):
self._logger.error(u"failed to get user list: %s", repr(result))
def _got_user_success(self, data):
user = json.loads(data, encoding='utf-8')
self.set(user[u'name'], data.encode('utf-8'))
self._logger.info(u"user details updated.")
def _got_user_failure(self, result):
self._logger.error(u"failed to get user details: %s", repr(result))
| mit |
OptiPop/external_skia | tools/misc_utils.py | 68 | 7711 | # Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to host the VerboseSubprocess, ChangeDir, and ReSearch classes.
"""
import os
import re
import subprocess
def print_subprocess_args(prefix, *args, **kwargs):
"""Print out args in a human-readable manner."""
def quote_and_escape(string):
"""Quote and escape a string if necessary."""
if ' ' in string or '\n' in string:
string = '"%s"' % string.replace('"', '\\"')
return string
if 'cwd' in kwargs:
print '%scd %s' % (prefix, kwargs['cwd'])
print prefix + ' '.join(quote_and_escape(arg) for arg in args[0])
if 'cwd' in kwargs:
print '%scd -' % prefix
class VerboseSubprocess(object):
"""Call subprocess methods, but print out command before executing.
Attributes:
verbose: (boolean) should we print out the command or not. If
not, this is the same as calling the subprocess method
quiet: (boolean) suppress stdout on check_call and call.
prefix: (string) When verbose, what to print before each command.
"""
def __init__(self, verbose):
self.verbose = verbose
self.quiet = not verbose
self.prefix = '~~$ '
def check_call(self, *args, **kwargs):
"""Wrapper for subprocess.check_call().
Args:
*args: to be passed to subprocess.check_call()
**kwargs: to be passed to subprocess.check_call()
Returns:
Whatever subprocess.check_call() returns.
Raises:
OSError or subprocess.CalledProcessError: raised by check_call.
"""
if self.verbose:
print_subprocess_args(self.prefix, *args, **kwargs)
if self.quiet:
with open(os.devnull, 'w') as devnull:
return subprocess.check_call(*args, stdout=devnull, **kwargs)
else:
return subprocess.check_call(*args, **kwargs)
def call(self, *args, **kwargs):
"""Wrapper for subprocess.check().
Args:
*args: to be passed to subprocess.check_call()
**kwargs: to be passed to subprocess.check_call()
Returns:
Whatever subprocess.call() returns.
Raises:
OSError or subprocess.CalledProcessError: raised by call.
"""
if self.verbose:
print_subprocess_args(self.prefix, *args, **kwargs)
if self.quiet:
with open(os.devnull, 'w') as devnull:
return subprocess.call(*args, stdout=devnull, **kwargs)
else:
return subprocess.call(*args, **kwargs)
def check_output(self, *args, **kwargs):
"""Wrapper for subprocess.check_output().
Args:
*args: to be passed to subprocess.check_output()
**kwargs: to be passed to subprocess.check_output()
Returns:
Whatever subprocess.check_output() returns.
Raises:
OSError or subprocess.CalledProcessError: raised by check_output.
"""
if self.verbose:
print_subprocess_args(self.prefix, *args, **kwargs)
return subprocess.check_output(*args, **kwargs)
def strip_output(self, *args, **kwargs):
"""Wrap subprocess.check_output and str.strip().
Pass the given arguments into subprocess.check_output() and return
the results, after stripping any excess whitespace.
Args:
*args: to be passed to subprocess.check_output()
**kwargs: to be passed to subprocess.check_output()
Returns:
The output of the process as a string without leading or
trailing whitespace.
Raises:
OSError or subprocess.CalledProcessError: raised by check_output.
"""
if self.verbose:
print_subprocess_args(self.prefix, *args, **kwargs)
return str(subprocess.check_output(*args, **kwargs)).strip()
def popen(self, *args, **kwargs):
"""Wrapper for subprocess.Popen().
Args:
*args: to be passed to subprocess.Popen()
**kwargs: to be passed to subprocess.Popen()
Returns:
The output of subprocess.Popen()
Raises:
OSError or subprocess.CalledProcessError: raised by Popen.
"""
if self.verbose:
print_subprocess_args(self.prefix, *args, **kwargs)
return subprocess.Popen(*args, **kwargs)
class ChangeDir(object):
"""Use with a with-statement to temporarily change directories."""
# pylint: disable=I0011,R0903
def __init__(self, directory, verbose=False):
self._directory = directory
self._verbose = verbose
def __enter__(self):
if self._directory != os.curdir:
if self._verbose:
print '~~$ cd %s' % self._directory
cwd = os.getcwd()
os.chdir(self._directory)
self._directory = cwd
def __exit__(self, etype, value, traceback):
if self._directory != os.curdir:
if self._verbose:
print '~~$ cd %s' % self._directory
os.chdir(self._directory)
class ReSearch(object):
"""A collection of static methods for regexing things."""
@staticmethod
def search_within_stream(input_stream, pattern, default=None):
"""Search for regular expression in a file-like object.
Opens a file for reading and searches line by line for a match to
the regex and returns the parenthesized group named return for the
first match. Does not search across newlines.
For example:
pattern = '^root(:[^:]*){4}:(?P<return>[^:]*)'
with open('/etc/passwd', 'r') as stream:
return search_within_file(stream, pattern)
should return root's home directory (/root on my system).
Args:
input_stream: file-like object to be read
pattern: (string) to be passed to re.compile
default: what to return if no match
Returns:
A string or whatever default is
"""
pattern_object = re.compile(pattern)
for line in input_stream:
match = pattern_object.search(line)
if match:
return match.group('return')
return default
@staticmethod
def search_within_string(input_string, pattern, default=None):
"""Search for regular expression in a string.
Args:
input_string: (string) to be searched
pattern: (string) to be passed to re.compile
default: what to return if no match
Returns:
A string or whatever default is
"""
match = re.search(pattern, input_string)
return match.group('return') if match else default
@staticmethod
def search_within_output(verbose, pattern, default, *args, **kwargs):
"""Search for regular expression in a process output.
Does not search across newlines.
Args:
verbose: (boolean) shoule we call print_subprocess_args?
pattern: (string) to be passed to re.compile
default: what to return if no match
*args: to be passed to subprocess.Popen()
**kwargs: to be passed to subprocess.Popen()
Returns:
A string or whatever default is
"""
if verbose:
print_subprocess_args('~~$ ', *args, **kwargs)
proc = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
return ReSearch.search_within_stream(proc.stdout, pattern, default)
| bsd-3-clause |
mrunge/openstack_horizon | setup.py | 608 | 1045 | #!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
| apache-2.0 |
maxiee/MyCodes | KalmanAndBesianFiltersInPython/MyKalman/sensor.py | 1 | 1504 | import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import numpy.random as random
import math
class OneDSensor(object):
def __init__(self,
x0=0, # 初始位置
velocity=1,
measurement_variance=0.0,
process_variance=0.0):
self.x = x0
self.velocity = velocity
self.noise = math.sqrt(measurement_variance)
self.pnoise = math.sqrt(process_variance)
self.constant_vel = velocity
def sense_position(self):
# random.rand() 0-1均匀分布
# random.randn() 均值为0的正态分布
pnoise = abs(random.rand() * self.pnoise)
if self.velocity > self.constant_vel:
pnoise = -pnoise
self.velocity += pnoise
self.x = self.x + self.velocity
return self.x + random.randn() * self.noise
if __name__ == "__main__":
count = 100
dog = OneDSensor(measurement_variance=4.0)
xs = []
dog2 = OneDSensor(measurement_variance=100.0)
xs2 = []
dog3 = OneDSensor(process_variance=0.5)
xs3 = []
xs_real = [i for i in range(count)]
for i in range(count):
x = dog.sense_position()
xs.append(x)
x2 = dog2.sense_position()
xs2.append(x2)
x3 = dog3.sense_position()
xs3.append(x3)
plt.plot(xs_real, label='real', lw=2)
plt.plot(xs, label='Sensor')
plt.plot(xs2, label='Sensor2')
plt.plot(xs3, label='Sensor3')
plt.legend(loc='best')
plt.show()
| gpl-3.0 |
agry/NGECore2 | scripts/mobiles/rori/squall_female.py | 2 | 1568 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('squall_female')
mobileTemplate.setLevel(37)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Herbivore Meat")
mobileTemplate.setMeatAmount(8)
mobileTemplate.setHideType("Bristley Hide")
mobileTemplate.setBoneAmount(12)
mobileTemplate.setBoneType("Avian Bones")
mobileTemplate.setBoneAmount(8)
mobileTemplate.setSocialGroup("squall")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_squall.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_bite_3')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('squall_female', mobileTemplate)
return | lgpl-3.0 |
Versent/ansible | v1/ansible/utils/display_functions.py | 147 | 2184 | # (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import textwrap
from ansible import constants as C
from ansible import errors
from ansible.callbacks import display
__all__ = ['deprecated', 'warning', 'system_warning']
# list of all deprecation messages to prevent duplicate display
deprecations = {}
warns = {}
def deprecated(msg, version, removed=False):
''' used to print out a deprecation message.'''
if not removed and not C.DEPRECATION_WARNINGS:
return
if not removed:
if version:
new_msg = "\n[DEPRECATION WARNING]: %s. This feature will be removed in version %s." % (msg, version)
else:
new_msg = "\n[DEPRECATION WARNING]: %s. This feature will be removed in a future release." % (msg)
new_msg = new_msg + " Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.\n\n"
else:
raise errors.AnsibleError("[DEPRECATED]: %s. Please update your playbooks." % msg)
wrapped = textwrap.wrap(new_msg, 79)
new_msg = "\n".join(wrapped) + "\n"
if new_msg not in deprecations:
display(new_msg, color='purple', stderr=True)
deprecations[new_msg] = 1
def warning(msg):
new_msg = "\n[WARNING]: %s" % msg
wrapped = textwrap.wrap(new_msg, 79)
new_msg = "\n".join(wrapped) + "\n"
if new_msg not in warns:
display(new_msg, color='bright purple', stderr=True)
warns[new_msg] = 1
def system_warning(msg):
if C.SYSTEM_WARNINGS:
warning(msg)
| gpl-3.0 |
grepme/CMPUT410Lab01 | virt_env/virt1/lib/python2.7/site-packages/FormEncode-1.3.0a1-py2.7.egg/formencode/tests/test_subclassing.py | 3 | 9115 | # -*- coding: utf-8 -*-
import unittest
from formencode.api import is_validator, FancyValidator, Invalid
from formencode.compound import CompoundValidator, All
from formencode.validators import Int
class CustomValidator(FancyValidator):
"""A custom validator based directly on FancyValidator."""
messages = {
'custom': "%(number)s is invalid",
}
def _convert_to_python(self, value, state):
if value == '1':
raise Invalid(self.message(
'custom', state, number='one'), value, state)
return int(value)
def _convert_from_python(self, value, state):
if value == 2:
raise Invalid(self.message(
'custom', state, number='two'), value, state)
return str(value)
def _validate_other(self, value, state):
if value == '3':
raise Invalid(self.message(
'custom', state, number='three'), value, state)
def _validate_python(self, value, state):
if value == 4:
raise Invalid(self.message(
'custom', state, number='four'), value, state)
class TestCustomValidator(unittest.TestCase):
def test_is_validator(self):
self.assertTrue(is_validator(CustomValidator))
self.assertTrue(is_validator(CustomValidator()))
def test_to_python(self):
cv = CustomValidator()
self.assertEqual(cv.to_python('0'), 0)
try:
cv.to_python('1')
except Invalid as e:
self.assertTrue(
'one is invalid' in str(e), e)
else:
self.fail("one should be invalid")
self.assertEqual(cv.to_python('2'), 2)
try:
cv.to_python('3')
except Invalid as e:
self.assertTrue(
'three is invalid' in str(e), e)
else:
self.fail("three should be invalid")
try:
cv.to_python('4')
except Invalid as e:
self.assertTrue(
'four is invalid' in str(e), e)
else:
self.fail("four should be invalid")
self.assertEqual(cv.to_python('5'), 5)
def test_from_python(self):
cv = CustomValidator()
self.assertEqual(cv.from_python(0), '0')
self.assertEqual(cv.from_python(1), '1')
try:
cv.from_python(2)
except Invalid as e:
self.assertTrue(
'two is invalid' in str(e), e)
else:
self.fail("two should be invalid")
self.assertEqual(cv.from_python(3), '3')
self.assertEqual(cv.from_python(4), '4')
self.assertEqual(cv.from_python(5), '5')
def test_from_python_no_accept(self):
cv = CustomValidator(accept_python=False)
self.assertEqual(cv.from_python(0), '0')
self.assertEqual(cv.from_python(1), '1')
try:
cv.from_python(2)
except Invalid as e:
self.assertTrue(
'two is invalid' in str(e), e)
else:
self.fail("two should be invalid")
try:
cv.from_python(3)
except Invalid as e:
self.assertTrue(
'three is invalid' in str(e), e)
else:
self.fail("three should be invalid")
try:
cv.from_python(4)
except Invalid as e:
self.assertTrue(
'four is invalid' in str(e), e)
else:
self.fail("four should be invalid")
self.assertEqual(cv.from_python(5), '5')
class NotOneValidator(Int):
"""A custom validator based on an existing validator."""
messages = {
'custom': "must not be %(number)d",
}
number = 1
def _convert_to_python(self, value, state):
value = super(NotOneValidator, self)._convert_to_python(value, state)
if value == self.number:
raise Invalid(self.message(
'custom', state, number=self.number), value, state)
return value
class TestNotOneValidator(unittest.TestCase):
def test_is_validator(self):
self.assertTrue(is_validator(NotOneValidator))
self.assertTrue(is_validator(NotOneValidator()))
self.assertTrue(is_validator(NotOneValidator(one=2)))
def test_to_python(self):
nov = NotOneValidator()
self.assertEqual(nov.to_python('0'), 0)
try:
nov.to_python('1')
except Invalid as e:
self.assertTrue(
'must not be 1' in str(e), e)
else:
self.fail("1 should be invalid")
self.assertEqual(nov.to_python('2'), 2)
self.assertEqual(nov.to_python('42'), 42)
def test_to_python_number(self):
nov = NotOneValidator(number=42)
self.assertEqual(nov.to_python('0'), 0)
self.assertEqual(nov.to_python('1'), 1)
self.assertEqual(nov.to_python('2'), 2)
try:
nov.to_python('42')
except Invalid as e:
self.assertTrue(
'must not be 42' in str(e), e)
else:
self.fail("42 should be invalid")
def test_to_python_range(self):
nov = NotOneValidator(min=40, max=49, number=42)
self.assertRaises(Invalid, nov.to_python, '0')
self.assertRaises(Invalid, nov.to_python, '1')
self.assertRaises(Invalid, nov.to_python, '2')
self.assertRaises(Invalid, nov.to_python, '39')
self.assertEqual(nov.to_python('40'), 40)
self.assertEqual(nov.to_python('41'), 41)
try:
nov.to_python('42')
except Invalid as e:
self.assertTrue(
'must not be 42' in str(e), e)
else:
self.fail("42 should be invalid")
self.assertEqual(nov.to_python('43'), 43)
self.assertEqual(nov.to_python('49'), 49)
self.assertRaises(Invalid, nov.to_python, '50')
class CustomCompoundValidator(CompoundValidator):
"""A custom validator based directly on CompoundValidator."""
def _attempt_convert(self, value, state, validate):
return validate(self.validators[1], value, state)
class TestCustomCompoundValidator(unittest.TestCase):
def setUp(self):
self.validator = CustomCompoundValidator(
validators=[Int(min=3), Int(max=5)])
def test_is_validator(self):
self.assertTrue(is_validator(CustomCompoundValidator))
self.assertTrue(is_validator(self.validator))
def test_to_python(self):
ccv = self.validator
self.assertEqual(ccv.to_python('2'), 2)
self.assertEqual(ccv.to_python('4'), 4)
self.assertRaises(Invalid, ccv.to_python, '6')
class AllAndNotOneValidator(All):
"""A custom validator based on an existing CompoundValidator."""
messages = {
'custom': "must not be %(number)d",
}
number = 1
def _attempt_convert(self, value, state, validate):
value = super(AllAndNotOneValidator, self)._attempt_convert(
value, state, validate)
if value == self.number:
raise Invalid(self.message(
'custom', state, number=self.number), value, state)
return value
class TestAllAndNotOneValidator(unittest.TestCase):
def setUp(self):
self.validator = AllAndNotOneValidator(
validators=[Int(min=3), Int(max=5)], number=4)
def test_is_validator(self):
self.assertTrue(is_validator(AllAndNotOneValidator))
self.assertTrue(is_validator(self.validator))
def test_to_python(self):
cav = self.validator
self.assertRaises(Invalid, cav.to_python, '1')
self.assertRaises(Invalid, cav.to_python, '2')
self.assertEqual(cav.to_python('3'), 3)
try:
cav.to_python('4')
except Invalid as e:
self.assertTrue(
'must not be 4' in str(e), e)
else:
self.fail("4 should be invalid")
self.assertEqual(cav.to_python('5'), 5)
self.assertRaises(Invalid, cav.to_python, '6')
self.assertRaises(Invalid, cav.to_python, '7')
class DeclarativeAllValidator(All):
"""A CompoundValidator with subvalidators given as attributes."""
first_validator = Int(min=3)
second_validator = Int(max=5)
class TestDeclarativeAllValidator(unittest.TestCase):
def test_is_validator(self):
self.assertTrue(is_validator(DeclarativeAllValidator))
self.assertTrue(is_validator(DeclarativeAllValidator()))
def test_attrs_deleted(self):
self.assertFalse(hasattr(DeclarativeAllValidator, 'first_validator'))
self.assertFalse(hasattr(DeclarativeAllValidator, 'second_validator'))
def test_to_python(self):
dav = DeclarativeAllValidator()
self.assertRaises(Invalid, dav.to_python, '1')
self.assertRaises(Invalid, dav.to_python, '2')
self.assertEqual(dav.to_python('3'), 3)
self.assertEqual(dav.to_python('4'), 4)
self.assertEqual(dav.to_python('5'), 5)
self.assertRaises(Invalid, dav.to_python, '6')
self.assertRaises(Invalid, dav.to_python, '7')
| apache-2.0 |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/numpy/lib/tests/test_format.py | 33 | 30021 | from __future__ import division, absolute_import, print_function
r''' Test the .npy file format.
Set up:
>>> import sys
>>> from io import BytesIO
>>> from numpy.lib import format
>>>
>>> scalars = [
... np.uint8,
... np.int8,
... np.uint16,
... np.int16,
... np.uint32,
... np.int32,
... np.uint64,
... np.int64,
... np.float32,
... np.float64,
... np.complex64,
... np.complex128,
... object,
... ]
>>>
>>> basic_arrays = []
>>>
>>> for scalar in scalars:
... for endian in '<>':
... dtype = np.dtype(scalar).newbyteorder(endian)
... basic = np.arange(15).astype(dtype)
... basic_arrays.extend([
... np.array([], dtype=dtype),
... np.array(10, dtype=dtype),
... basic,
... basic.reshape((3,5)),
... basic.reshape((3,5)).T,
... basic.reshape((3,5))[::-1,::2],
... ])
...
>>>
>>> Pdescr = [
... ('x', 'i4', (2,)),
... ('y', 'f8', (2, 2)),
... ('z', 'u1')]
>>>
>>>
>>> PbufferT = [
... ([3,2], [[6.,4.],[6.,4.]], 8),
... ([4,3], [[7.,5.],[7.,5.]], 9),
... ]
>>>
>>>
>>> Ndescr = [
... ('x', 'i4', (2,)),
... ('Info', [
... ('value', 'c16'),
... ('y2', 'f8'),
... ('Info2', [
... ('name', 'S2'),
... ('value', 'c16', (2,)),
... ('y3', 'f8', (2,)),
... ('z3', 'u4', (2,))]),
... ('name', 'S2'),
... ('z2', 'b1')]),
... ('color', 'S2'),
... ('info', [
... ('Name', 'U8'),
... ('Value', 'c16')]),
... ('y', 'f8', (2, 2)),
... ('z', 'u1')]
>>>
>>>
>>> NbufferT = [
... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8),
... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9),
... ]
>>>
>>>
>>> record_arrays = [
... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),
... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
... ]
Test the magic string writing.
>>> format.magic(1, 0)
'\x93NUMPY\x01\x00'
>>> format.magic(0, 0)
'\x93NUMPY\x00\x00'
>>> format.magic(255, 255)
'\x93NUMPY\xff\xff'
>>> format.magic(2, 5)
'\x93NUMPY\x02\x05'
Test the magic string reading.
>>> format.read_magic(BytesIO(format.magic(1, 0)))
(1, 0)
>>> format.read_magic(BytesIO(format.magic(0, 0)))
(0, 0)
>>> format.read_magic(BytesIO(format.magic(255, 255)))
(255, 255)
>>> format.read_magic(BytesIO(format.magic(2, 5)))
(2, 5)
Test the header writing.
>>> for arr in basic_arrays + record_arrays:
... f = BytesIO()
... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it
... print repr(f.getvalue())
...
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<u2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<i2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<u4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<i4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<u8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<i8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<f4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<f8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<c8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<c16', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"
"v\x00{'descr': [('x', '<i4', (2,)), ('y', '<f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
"\x16\x02{'descr': [('x', '<i4', (2,)),\n ('Info',\n [('value', '<c16'),\n ('y2', '<f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '<c16', (2,)),\n ('y3', '<f8', (2,)),\n ('z3', '<u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '<U8'), ('Value', '<c16')]),\n ('y', '<f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
"v\x00{'descr': [('x', '>i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
"\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
'''
import sys
import os
import shutil
import tempfile
import warnings
from io import BytesIO
import numpy as np
from numpy.compat import asbytes, asbytes_nested
from numpy.testing import (
run_module_suite, assert_, assert_array_equal, assert_raises, raises,
dec
)
from numpy.lib import format
tempdir = None
# Module-level setup.
def setup_module():
global tempdir
tempdir = tempfile.mkdtemp()
def teardown_module():
global tempdir
if tempdir is not None and os.path.isdir(tempdir):
shutil.rmtree(tempdir)
tempdir = None
# Generate some basic arrays to test with.
scalars = [
np.uint8,
np.int8,
np.uint16,
np.int16,
np.uint32,
np.int32,
np.uint64,
np.int64,
np.float32,
np.float64,
np.complex64,
np.complex128,
object,
]
basic_arrays = []
for scalar in scalars:
for endian in '<>':
dtype = np.dtype(scalar).newbyteorder(endian)
basic = np.arange(1500).astype(dtype)
basic_arrays.extend([
# Empty
np.array([], dtype=dtype),
# Rank-0
np.array(10, dtype=dtype),
# 1-D
basic,
# 2-D C-contiguous
basic.reshape((30, 50)),
# 2-D F-contiguous
basic.reshape((30, 50)).T,
# 2-D non-contiguous
basic.reshape((30, 50))[::-1, ::2],
])
# More complicated record arrays.
# This is the structure of the table used for plain objects:
#
# +-+-+-+
# |x|y|z|
# +-+-+-+
# Structure of a plain array description:
Pdescr = [
('x', 'i4', (2,)),
('y', 'f8', (2, 2)),
('z', 'u1')]
# A plain list of tuples with values for testing:
PbufferT = [
# x y z
([3, 2], [[6., 4.], [6., 4.]], 8),
([4, 3], [[7., 5.], [7., 5.]], 9),
]
# This is the structure of the table used for nested objects (DON'T PANIC!):
#
# +-+---------------------------------+-----+----------+-+-+
# |x|Info |color|info |y|z|
# | +-----+--+----------------+----+--+ +----+-----+ | |
# | |value|y2|Info2 |name|z2| |Name|Value| | |
# | | | +----+-----+--+--+ | | | | | | |
# | | | |name|value|y3|z3| | | | | | | |
# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+
#
# The corresponding nested array description:
Ndescr = [
('x', 'i4', (2,)),
('Info', [
('value', 'c16'),
('y2', 'f8'),
('Info2', [
('name', 'S2'),
('value', 'c16', (2,)),
('y3', 'f8', (2,)),
('z3', 'u4', (2,))]),
('name', 'S2'),
('z2', 'b1')]),
('color', 'S2'),
('info', [
('Name', 'U8'),
('Value', 'c16')]),
('y', 'f8', (2, 2)),
('z', 'u1')]
NbufferT = [
# x Info color info y z
# value y2 Info2 name z2 Name Value
# name value y3 z3
([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True),
'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8),
([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False),
'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9),
]
record_arrays = [
np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
]
#BytesIO that reads a random number of bytes at a time
class BytesIOSRandomSize(BytesIO):
def read(self, size=None):
import random
size = random.randint(1, size)
return super(BytesIOSRandomSize, self).read(size)
def roundtrip(arr):
f = BytesIO()
format.write_array(f, arr)
f2 = BytesIO(f.getvalue())
arr2 = format.read_array(f2)
return arr2
def roundtrip_randsize(arr):
f = BytesIO()
format.write_array(f, arr)
f2 = BytesIOSRandomSize(f.getvalue())
arr2 = format.read_array(f2)
return arr2
def roundtrip_truncated(arr):
f = BytesIO()
format.write_array(f, arr)
#BytesIO is one byte short
f2 = BytesIO(f.getvalue()[0:-1])
arr2 = format.read_array(f2)
return arr2
def assert_equal_(o1, o2):
assert_(o1 == o2)
def test_roundtrip():
for arr in basic_arrays + record_arrays:
arr2 = roundtrip(arr)
yield assert_array_equal, arr, arr2
def test_roundtrip_randsize():
for arr in basic_arrays + record_arrays:
if arr.dtype != object:
arr2 = roundtrip_randsize(arr)
yield assert_array_equal, arr, arr2
def test_roundtrip_truncated():
for arr in basic_arrays:
if arr.dtype != object:
yield assert_raises, ValueError, roundtrip_truncated, arr
def test_long_str():
# check items larger than internal buffer size, gh-4027
long_str_arr = np.ones(1, dtype=np.dtype((str, format.BUFFER_SIZE + 1)))
long_str_arr2 = roundtrip(long_str_arr)
assert_array_equal(long_str_arr, long_str_arr2)
@dec.slow
def test_memmap_roundtrip():
# Fixme: test crashes nose on windows.
if not (sys.platform == 'win32' or sys.platform == 'cygwin'):
for arr in basic_arrays + record_arrays:
if arr.dtype.hasobject:
# Skip these since they can't be mmap'ed.
continue
# Write it out normally and through mmap.
nfn = os.path.join(tempdir, 'normal.npy')
mfn = os.path.join(tempdir, 'memmap.npy')
fp = open(nfn, 'wb')
try:
format.write_array(fp, arr)
finally:
fp.close()
fortran_order = (
arr.flags.f_contiguous and not arr.flags.c_contiguous)
ma = format.open_memmap(mfn, mode='w+', dtype=arr.dtype,
shape=arr.shape, fortran_order=fortran_order)
ma[...] = arr
del ma
# Check that both of these files' contents are the same.
fp = open(nfn, 'rb')
normal_bytes = fp.read()
fp.close()
fp = open(mfn, 'rb')
memmap_bytes = fp.read()
fp.close()
yield assert_equal_, normal_bytes, memmap_bytes
# Check that reading the file using memmap works.
ma = format.open_memmap(nfn, mode='r')
del ma
def test_compressed_roundtrip():
arr = np.random.rand(200, 200)
npz_file = os.path.join(tempdir, 'compressed.npz')
np.savez_compressed(npz_file, arr=arr)
arr1 = np.load(npz_file)['arr']
assert_array_equal(arr, arr1)
def test_python2_python3_interoperability():
if sys.version_info[0] >= 3:
fname = 'win64python2.npy'
else:
fname = 'python3.npy'
path = os.path.join(os.path.dirname(__file__), 'data', fname)
data = np.load(path)
assert_array_equal(data, np.ones(2))
def test_version_2_0():
f = BytesIO()
# requires more than 2 byte for header
dt = [(("%d" % i) * 100, float) for i in range(500)]
d = np.ones(1000, dtype=dt)
format.write_array(f, d, version=(2, 0))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', UserWarning)
format.write_array(f, d)
assert_(w[0].category is UserWarning)
f.seek(0)
n = format.read_array(f)
assert_array_equal(d, n)
# 1.0 requested but data cannot be saved this way
assert_raises(ValueError, format.write_array, f, d, (1, 0))
def test_version_2_0_memmap():
# requires more than 2 byte for header
dt = [(("%d" % i) * 100, float) for i in range(500)]
d = np.ones(1000, dtype=dt)
tf = tempfile.mktemp('', 'mmap', dir=tempdir)
# 1.0 requested but data cannot be saved this way
assert_raises(ValueError, format.open_memmap, tf, mode='w+', dtype=d.dtype,
shape=d.shape, version=(1, 0))
ma = format.open_memmap(tf, mode='w+', dtype=d.dtype,
shape=d.shape, version=(2, 0))
ma[...] = d
del ma
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', UserWarning)
ma = format.open_memmap(tf, mode='w+', dtype=d.dtype,
shape=d.shape, version=None)
assert_(w[0].category is UserWarning)
ma[...] = d
del ma
ma = format.open_memmap(tf, mode='r')
assert_array_equal(ma, d)
def test_write_version():
f = BytesIO()
arr = np.arange(1)
# These should pass.
format.write_array(f, arr, version=(1, 0))
format.write_array(f, arr)
format.write_array(f, arr, version=None)
format.write_array(f, arr)
format.write_array(f, arr, version=(2, 0))
format.write_array(f, arr)
# These should all fail.
bad_versions = [
(1, 1),
(0, 0),
(0, 1),
(2, 2),
(255, 255),
]
for version in bad_versions:
try:
format.write_array(f, arr, version=version)
except ValueError:
pass
else:
raise AssertionError("we should have raised a ValueError for the bad version %r" % (version,))
bad_version_magic = asbytes_nested([
'\x93NUMPY\x01\x01',
'\x93NUMPY\x00\x00',
'\x93NUMPY\x00\x01',
'\x93NUMPY\x02\x00',
'\x93NUMPY\x02\x02',
'\x93NUMPY\xff\xff',
])
malformed_magic = asbytes_nested([
'\x92NUMPY\x01\x00',
'\x00NUMPY\x01\x00',
'\x93numpy\x01\x00',
'\x93MATLB\x01\x00',
'\x93NUMPY\x01',
'\x93NUMPY',
'',
])
def test_read_magic_bad_magic():
for magic in malformed_magic:
f = BytesIO(magic)
yield raises(ValueError)(format.read_magic), f
def test_read_version_1_0_bad_magic():
for magic in bad_version_magic + malformed_magic:
f = BytesIO(magic)
yield raises(ValueError)(format.read_array), f
def test_bad_magic_args():
assert_raises(ValueError, format.magic, -1, 1)
assert_raises(ValueError, format.magic, 256, 1)
assert_raises(ValueError, format.magic, 1, -1)
assert_raises(ValueError, format.magic, 1, 256)
def test_large_header():
s = BytesIO()
d = {'a': 1, 'b': 2}
format.write_array_header_1_0(s, d)
s = BytesIO()
d = {'a': 1, 'b': 2, 'c': 'x'*256*256}
assert_raises(ValueError, format.write_array_header_1_0, s, d)
def test_bad_header():
# header of length less than 2 should fail
s = BytesIO()
assert_raises(ValueError, format.read_array_header_1_0, s)
s = BytesIO(asbytes('1'))
assert_raises(ValueError, format.read_array_header_1_0, s)
# header shorter than indicated size should fail
s = BytesIO(asbytes('\x01\x00'))
assert_raises(ValueError, format.read_array_header_1_0, s)
# headers without the exact keys required should fail
d = {"shape": (1, 2),
"descr": "x"}
s = BytesIO()
format.write_array_header_1_0(s, d)
assert_raises(ValueError, format.read_array_header_1_0, s)
d = {"shape": (1, 2),
"fortran_order": False,
"descr": "x",
"extrakey": -1}
s = BytesIO()
format.write_array_header_1_0(s, d)
assert_raises(ValueError, format.read_array_header_1_0, s)
def test_large_file_support():
from nose import SkipTest
if (sys.platform == 'win32' or sys.platform == 'cygwin'):
raise SkipTest("Unknown if Windows has sparse filesystems")
# try creating a large sparse file
tf_name = os.path.join(tempdir, 'sparse_file')
try:
# seek past end would work too, but linux truncate somewhat
# increases the chances that we have a sparse filesystem and can
# avoid actually writing 5GB
import subprocess as sp
sp.check_call(["truncate", "-s", "5368709120", tf_name])
except:
raise SkipTest("Could not create 5GB large file")
# write a small array to the end
with open(tf_name, "wb") as f:
f.seek(5368709120)
d = np.arange(5)
np.save(f, d)
# read it back
with open(tf_name, "rb") as f:
f.seek(5368709120)
r = np.load(f)
assert_array_equal(r, d)
if __name__ == "__main__":
run_module_suite()
| mit |
ivanod/nemesys-qos | nemesys/arp.py | 9 | 13681 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Fondazione Ugo Bordoni.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This code is derived from: arprequest.py by Antoine Millet
# Original version:
# -> https://pypi.python.org/pypi/arprequest
import ipcalc
from threading import Thread
import string
import re
from logger import logging
import socket
import platform
import ping
import re
from subprocess import Popen, PIPE
is_windows = (platform.system().startswith("Windows"))
logger = logging.getLogger()
if (is_windows):
from ctypes import *
import struct
from threading import Thread
import Queue
""" Loading Windows system libraries should not be a problem """
""" Iphplpapi should work on Win 2000 and above """
try:
iphlpapi = windll.Iphlpapi
ws2_32 = windll.ws2_32
except WindowsError:
""" Should it still fail """
logger.error("Error loading windows system libraries!")
raise Exception("Manca uno o più delle librerie Iphlapi.dll e ws2_32.dll")
else:
from struct import pack, unpack
import time
import select
HW_TYPE_ETH = 0x0001
ETH_P_IP = 0x0800
ETH_P_ARP = 0x0806
ARP_REQUEST = 0x0001
ARP_REPLY = 0x0002
MAC_ADDR_LEN = 0x0006
IP_ADDR_LEN = 0x0004
TECHNICOLOR_MACS = ['^A..B1.E9']
TECHNICOLOR_IPS = ['192.168.1.253']
def _print_mac(value):
return string.join(["%02X" % ord(b) for b in value], ':')
## TODO: this should be the same as _print_mac
""" Convert c_ulong*2 to a hexadecimal string or a printable ascii
string delimited by the 3rd parameter"""
def mac_straddr(mac, printable=False, delimiter=None):
""" Expect a list of length 2 returned by arp_query """
if len(mac) != 2:
return -1
if printable:
if delimiter:
m = ""
for c in mac_straddr(mac):
m += "%02x" % ord(c) + delimiter
return m.rstrip(delimiter)
return repr(mac_straddr(mac)).strip("\'")
return struct.pack("L", mac[0])+struct.pack("H", mac[1])
def _print_ip(value):
return string.join(["%d" % ord(b) for b in value], '.')
def _val2int(val):
'''Hex to Integer.'''
return int(''.join(['%02d'%ord(c) for c in val]), 16)
def _is_technicolor(ip, mac):
if (not re.match("([0-9A-F]{2}:){5}[0-9A-F]", mac, re.I)):
logger.warn("Errore nell'utilizzo della funzione _is_technicolor: formato MAC non corretto (%s)" % mac)
for ip_technicolor in TECHNICOLOR_IPS:
if (ip == ip_technicolor):
logger.debug("Trovato possibile IP spurio di router Technicolor: %s" % ip)
for mac_technicolor in TECHNICOLOR_MACS:
if re.search(mac_technicolor, mac, re.I):
logger.info("Trovato IP spurio di router Technicolor: %s [%s]" % (ip, mac))
return True
return False
'''
This check makes is needed to ignore routers that respond to ARP and ping with two
addresses, typically this happens with routers from Technicolor/Technico
If the MAC address is the same, except for the first byte, then it is considered
Technicolor and ignored
'''
def _filter_out_technicolor(IPTable):
n_hosts = len(IPTable)
if (n_hosts < 2):
logger.debug("No check for technicolor, num hosts = %d" % n_hosts)
return n_hosts
temp_table = []
for ip_addr in IPTable:
temp_table.append(IPTable[ip_addr][2:])
unique_addresses = set(temp_table)
n_hosts_technicolor_removed = len(unique_addresses)
if (n_hosts_technicolor_removed <= 0):
logger.error("Check for technicolor FAILED")
pass
elif (n_hosts_technicolor_removed < n_hosts):
logger.info("Probable technicolor router detected")
n_hosts = n_hosts_technicolor_removed
return n_hosts
def do_arping(if_dev_name, IPsrc, NETmask, realSubnet = True, timeout = 1, mac = None, threshold = 1):
if is_windows:
IPTable = do_win_arping(IPsrc, NETmask, realSubnet)
else:
IPTable = do_unix_arping(if_dev_name, IPsrc, NETmask, realSubnet)
hosts = "HOSTS: "
for key in IPTable:
hosts = hosts+"[%s|%s] " % (IPTable[key], key)
logger.info(hosts)
# Check for router that responds with 2 IP addresses
# with slightly different Ethernet addresses
nHosts = _filter_out_technicolor(IPTable)
return nHosts
###########################
## Parte linux-only
## This will only work on linux, since raw sockets are not
## supported on Windows and *BSD (including Darwin)
###########################
def do_linux_arping(if_dev_name, IPsrc, NETmask, realSubnet = True, timeout = 1, mac = None, threshold = 1):
# Initialize a raw socket (requires super-user access)
my_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.SOCK_RAW)
HOST = socket.gethostbyname(socket.gethostname())
my_socket.bind((if_dev_name, socket.SOCK_RAW))
if (mac):
MACsrc = "".join(chr(int(macEL, 16)) for macEL in mac.split(':'))
else:
logger.info("Richiesta esecuzione di arping senza la specifica del MAC address.")
return 0
MACdst = "\xFF"*6
logger.debug("MAC_source = %s" % mac.upper())
IPsrc = socket.gethostbyname(IPsrc)
logger.debug("IP source = %s" % IPsrc)
IPnet = ipcalc.Network('%s/%d' % (IPsrc, NETmask))
net = IPnet.network()
bcast = IPnet.broadcast()
logger.debug("network = %s" % net)
lasting = 2 ** (32 - NETmask)
index = 0
### Send ARP requests to all IPs ###
for IPdst in IPnet:
if ((IPdst.hex() == net.hex() or IPdst.hex() == bcast.hex()) and realSubnet):
logger.debug("Saltato ip %s" % IPdst)
elif(IPdst.dq == IPsrc):
logger.debug("Salto il mio ip %s" % IPdst)
else:
# Send ARP request
IPdst = str(IPdst)
# logger.debug('Arping host %s' % IPdst)
send_arp_request(IPsrc, IPdst, if_dev_name, my_socket)
index += 1
lasting -= 1
#if (index >= MAX or lasting <= 0):
if (lasting <= 0):
index = 0
try:
IPtable = receive_arp_response(mac, my_socket, timeout)
except Exception as e:
logger.error("Errore durante la ricezione degli arping: %s" % e)
#TODO why this?
# if(nHosts > threshold):
# break
my_socket.close()
return IPtable
def send_arp_request(src_ip, dest_ip, if_dev_name, my_socket):
'''Send ARP request'''
# Create packet :
frame = [
### ETHERNET part ###
# Dest MAC address (=broadcast) : TODO should be all 0???
pack('!6B', *(0xFF,) * 6),
# Source MAC address :
my_socket.getsockname()[4],
# Protocol type (=ARP) :
pack('!H', ETH_P_ARP),
### ARP part ###
# HW and protocol types and address lenghts (=Ethernet/IP/6/4 bytes) :
pack('!HHBB', HW_TYPE_ETH, ETH_P_IP, MAC_ADDR_LEN, IP_ADDR_LEN),
# Operation type (=ARP Request) :
pack('!H', ARP_REQUEST),
# Source MAC address :
my_socket.getsockname()[4],
# Source IP address :
pack('!4B', *[int(x) for x in src_ip.split('.')]),
# Target MAC address (=00*6) :
pack('!6B', *(0,) * 6),
# Target IP address :
pack('!4B', *[int(x) for x in dest_ip.split('.')])
]
# Send the packet
my_socket.send(''.join(frame))
def receive_arp_response(mac_addr, my_socket, timeout):
IPtable = {}
'''Wait for response'''
timeLeft = timeout*1000
stopTime = time.time() + timeout*1;
while True:
timeLeft = stopTime - time.time()
whatReady = select.select([my_socket], [], [], timeLeft)
if whatReady[0] == []: # Timeout
break
#TODO: is this really necessary?
if time.time() > stopTime:
break
# Get packet frame :
frame = my_socket.recv(1024)
# Get protocol type :
proto_type = _val2int(unpack('!2s', frame[12:14])[0])
if proto_type != ETH_P_ARP:
continue # Not ARP, skip
# Get Operation type :
op = _val2int(unpack('!2s', frame[20:22])[0])
if op != ARP_REPLY:
continue # Not ARP response, skip
# Get addresses :
arp_headers = frame[18:20]
arp_headers_values = unpack('!1s1s', arp_headers)
hw_size, pt_size = [_val2int(v) for v in arp_headers_values]
total_addresses_byte = hw_size * 2 + pt_size * 2
arp_addrs = frame[22:22 + total_addresses_byte]
src_hw, src_pt, dst_hw, dst_pt = unpack('!%ss%ss%ss%ss'
% (hw_size, pt_size, hw_size, pt_size), arp_addrs)
dest_mac = _print_mac(dst_hw)
# Compare dest mac address in packet to the one we looked for :
if (dest_mac.strip().upper() == mac_addr.strip().upper()):
src_mac = _print_mac(src_hw)
src_ip = _print_ip(src_pt)
# TODO add check if found enough to stop
if (src_ip not in IPtable):
if (not _is_technicolor(src_ip, src_mac)):
IPtable[src_ip] = src_mac
logger.info('Trovato Host %s con indirizzo fisico %s' % (src_ip, src_mac))
else:
logger.debug("Found response from Technicolor")
return IPtable
###########################
## Parte Darwin, woks also for linux
##
## Not very pretty, but works...
###########################
def do_unix_arping(if_dev_name, IPsrc = None, NETmask=24, realSubnet=True, timeout=0.01):
logger.debug("IP source = %s" % IPsrc)
IPnet = ipcalc.Network('%s/%d' % (IPsrc, NETmask))
net = IPnet.network()
bcast = IPnet.broadcast()
logger.debug("network = %s" % net)
mytable = {}
for IPdst in IPnet:
if ((IPdst.hex() == net.hex() or IPdst.hex() == bcast.hex()) and realSubnet):
logger.debug("Saltato ip \'%s\'" % IPdst)
elif(IPdst.dq == IPsrc):
logger.debug("Salto il mio ip \'%s\'" % IPdst)
else:
mac = _send_one_mac_arp(str(IPdst), timeout)
if mac:
mytable[str(IPdst)] = mac
return mytable
def _send_one_mac_arp(IPdst, timeout=0.01):
# Remove any existing entry
pid = Popen(["arp", "-d", IPdst], stdout=PIPE)
s = pid.communicate()[0]
# Check output? should be none
# Now ping the destination
try:
ping.do_one("%s" % IPdst, timeout)
except:
pass # Timeout
pid = Popen(["arp", "-n", IPdst], stdout=PIPE, stderr=PIPE)
s = pid.communicate()[0]
my_match = re.search(r"(([a-fA-F\d]{1,2}\:){5}[a-fA-F\d]{1,2})", s)
if my_match:
mac_str = _pad_mac_string(my_match.groups()[0])
if (not _is_technicolor(IPdst, mac_str)):
logger.info('Trovato Host %s con indirizzo fisico %s' % (IPdst, mac_str))
return mac_str
else :
logger.debug("Found response from Technicolor")
def _pad_mac_string(mac_str):
parts = mac_str.split(':')
padded_mac_str = ":".join('%02x' % int(n,16) for n in parts)
return padded_mac_str
###########################
## Parte windows
###########################
def do_win_arping(IPsrc = None, NETmask=24, realSubnet=True):
IPnet = ipcalc.Network('%s/%d' % (IPsrc, NETmask))
result_queue = Queue.Queue()
net = IPnet.network()
bcast = IPnet.broadcast()
logger.debug("network = %s" % net)
lasting = 2 ** (32 - NETmask)
index = 0
threads = []
### Send ARP requests to all IPs ###
for IPdst in IPnet:
if ((IPdst.hex() == net.hex() or IPdst.hex() == bcast.hex()) and realSubnet):
logger.debug("Saltato ip \'%s\'" % IPdst)
elif(IPdst.dq == IPsrc):
logger.debug("Salto il mio ip \'%s\'" % IPdst)
else:
t = Thread(target=_send_one_win_arp, args=(IPdst, result_queue))
threads.append(t)
[x.start() for x in threads]
[x.join() for x in threads]
mytable = {}
items_in_queue = True
while items_in_queue:
try:
ip,mac = result_queue.get_nowait()
if (ip not in mytable):
mytable[ip] = mac
except Queue.Empty:
items_in_queue = False
return mytable
def _send_one_win_arp(IPdst, result_queue):
# Send ARP request
logger.debug("Sending ARP to \'%s\'" % IPdst)
IPdst = str(IPdst)
mac_addr = (c_ulong*2)()
addr_len = c_ulong(6)
dest_ip = ws2_32.inet_addr(IPdst)
src_ip = ws2_32.inet_addr(socket.gethostbyname(socket.gethostname()))
error = iphlpapi.SendARP(dest_ip, src_ip, byref(mac_addr), byref(addr_len))
if error:
if (int(error) != 31) and (int(error) != 67):
logger.error("Warning: SendARP failed! Error code: %d", int(error))
else:
mac_str = mac_straddr(mac_addr, True, ":")
if (not _is_technicolor(IPdst, mac_str)):
result_queue.put((IPdst, mac_str))
else :
logger.debug("Found response from Technicolor")
def main():
pass
if __name__ == '__main__':
main() | gpl-3.0 |
uncled1023/pygments | Pygments/pygments-lib/pygments/lexers/graph.py | 25 | 2370 | # -*- coding: utf-8 -*-
"""
pygments.lexers.graph
~~~~~~~~~~~~~~~~~~~~~
Lexers for graph query languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, this
from pygments.token import Keyword, Punctuation, Comment, Operator, Name,\
String, Number, Whitespace
__all__ = ['CypherLexer']
class CypherLexer(RegexLexer):
"""
For `Cypher Query Language
<http://docs.neo4j.org/chunked/milestone/cypher-query-lang.html>`_
For the Cypher version in Neo4J 2.0
.. versionadded:: 2.0
"""
name = 'Cypher'
aliases = ['cypher']
filenames = ['*.cyp', '*.cypher']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
include('comment'),
include('keywords'),
include('clauses'),
include('relations'),
include('strings'),
include('whitespace'),
include('barewords'),
],
'comment': [
(r'^.*//.*\n', Comment.Single),
],
'keywords': [
(r'(create|order|match|limit|set|skip|start|return|with|where|'
r'delete|foreach|not|by)\b', Keyword),
],
'clauses': [
# TODO: many missing ones, see http://docs.neo4j.org/refcard/2.0/
(r'(all|any|as|asc|create|create\s+unique|delete|'
r'desc|distinct|foreach|in|is\s+null|limit|match|none|'
r'order\s+by|return|set|skip|single|start|union|where|with)\b',
Keyword),
],
'relations': [
(r'(-\[)(.*?)(\]->)', bygroups(Operator, using(this), Operator)),
(r'(<-\[)(.*?)(\]-)', bygroups(Operator, using(this), Operator)),
(r'(-\[)(.*?)(\]-)', bygroups(Operator, using(this), Operator)),
(r'-->|<--|\[|\]', Operator),
(r'<|>|<>|=|<=|=>|\(|\)|\||:|,|;', Punctuation),
(r'[.*{}]', Punctuation),
],
'strings': [
(r'"(?:\\[tbnrf\'"\\]|[^\\"])*"', String),
(r'`(?:``|[^`])+`', Name.Variable),
],
'whitespace': [
(r'\s+', Whitespace),
],
'barewords': [
(r'[a-z]\w*', Name),
(r'\d+', Number),
],
}
| bsd-2-clause |
jhurt/ReportLab | tests/test_graphics_images.py | 6 | 2101 | #Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
"""
Tests for RLG Image shapes.
"""
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import os
import unittest
from reportlab.graphics.shapes import Image, Drawing
from reportlab.graphics import renderPDF
from reportlab.lib.pagesizes import A4
IMAGES = []
IMAGENAME = 'cs_logo.gif'
IMAGENAME = 'pythonpowered.gif'
class ImageTestCase(unittest.TestCase):
"Test RLG Image shape."
def __del__(self):
if IMAGES[-1] != None:
return
else:
del IMAGES[-1]
d = Drawing(A4[0], A4[1])
for img in IMAGES:
d.add(img)
outPath = outputfile("test_graphics_images.pdf")
renderPDF.drawToFile(d, outPath) #, '')
assert os.path.exists(outPath) == 1
def test0(self):
"Test convert a bitmap file as Image shape into a tmp. PDF file."
d = Drawing(110, 44)
inPath = IMAGENAME
img = Image(0, 0, 110, 44, inPath)
d.add(img)
IMAGES.append(img)
def test1(self):
"Test Image shape, adding it to a PDF page."
inPath = IMAGENAME
img = Image(0, 0, 110, 44, inPath)
IMAGES.append(img)
def test2(self):
"Test scaled Image shape adding it to a PDF page."
inPath = IMAGENAME
img = Image(0, 0, 110, 44, inPath)
d = Drawing(110, 44)
d.add(img)
d.translate(120, 0)
d.scale(2, 2)
IMAGES.append(d)
def test3(self):
"Test rotated Image shape adding it to a PDF page."
inPath = IMAGENAME
img = Image(0, 0, 110, 44, inPath)
d = Drawing(110, 44)
d.add(img)
d.translate(420, 0)
d.scale(2, 2)
d.rotate(45)
IMAGES.append(d)
IMAGES.append(None) # used to indicate last test
def makeSuite():
return makeSuiteForClasses(ImageTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
| bsd-3-clause |
chengjf/database-interface-doc-management | flask-demo/flask/Lib/site-packages/sqlalchemy/inspection.py | 81 | 3093 | # sqlalchemy/inspect.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The inspection module provides the :func:`.inspect` function,
which delivers runtime information about a wide variety
of SQLAlchemy objects, both within the Core as well as the
ORM.
The :func:`.inspect` function is the entry point to SQLAlchemy's
public API for viewing the configuration and construction
of in-memory objects. Depending on the type of object
passed to :func:`.inspect`, the return value will either be
a related object which provides a known interface, or in many
cases it will return the object itself.
The rationale for :func:`.inspect` is twofold. One is that
it replaces the need to be aware of a large variety of "information
getting" functions in SQLAlchemy, such as :meth:`.Inspector.from_engine`,
:func:`.orm.attributes.instance_state`, :func:`.orm.class_mapper`,
and others. The other is that the return value of :func:`.inspect`
is guaranteed to obey a documented API, thus allowing third party
tools which build on top of SQLAlchemy configurations to be constructed
in a forwards-compatible way.
.. versionadded:: 0.8 The :func:`.inspect` system is introduced
as of version 0.8.
"""
from . import util, exc
_registrars = util.defaultdict(list)
def inspect(subject, raiseerr=True):
"""Produce an inspection object for the given target.
The returned value in some cases may be the
same object as the one given, such as if a
:class:`.Mapper` object is passed. In other
cases, it will be an instance of the registered
inspection type for the given object, such as
if an :class:`.engine.Engine` is passed, an
:class:`.Inspector` object is returned.
:param subject: the subject to be inspected.
:param raiseerr: When ``True``, if the given subject
does not
correspond to a known SQLAlchemy inspected type,
:class:`sqlalchemy.exc.NoInspectionAvailable`
is raised. If ``False``, ``None`` is returned.
"""
type_ = type(subject)
for cls in type_.__mro__:
if cls in _registrars:
reg = _registrars[cls]
if reg is True:
return subject
ret = reg(subject)
if ret is not None:
break
else:
reg = ret = None
if raiseerr and (
reg is None or ret is None
):
raise exc.NoInspectionAvailable(
"No inspection system is "
"available for object of type %s" %
type_)
return ret
def _inspects(*types):
def decorate(fn_or_cls):
for type_ in types:
if type_ in _registrars:
raise AssertionError(
"Type %s is already "
"registered" % type_)
_registrars[type_] = fn_or_cls
return fn_or_cls
return decorate
def _self_inspects(cls):
_inspects(cls)(True)
return cls
| apache-2.0 |
horance-liu/tensorflow | tensorflow/contrib/tensor_forest/hybrid/python/hybrid_layer.py | 144 | 1595 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines the layer abstraction for hybrid models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables as framework_variables
class HybridLayer(object):
"""Layers are building blocks for hybrid models."""
def _define_vars(self,
params,
**kwargs):
"""Override to define the TensorFlow variables for the layer."""
raise NotImplementedError
# pylint: disable=unused-argument
def __init__(self, params, layer_num, device_assigner, *args, **kwargs):
self.layer_num = layer_num
self.device_assigner = (
device_assigner or framework_variables.VariableDeviceChooser())
self.params = params
self._define_vars(params, **kwargs)
def inference_graph(self, data, data_spec=None):
raise NotImplementedError
| apache-2.0 |
meta-it/misc-addons | web_sessions_management/http.py | 1 | 2172 | # -*- encoding: utf-8 -*-
#
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
import logging
import openerp
import werkzeug.contrib.sessions
import werkzeug.datastructures
import werkzeug.exceptions
import werkzeug.local
import werkzeug.routing
import werkzeug.wrappers
import werkzeug.wsgi
from openerp.http import request
from openerp.tools.func import lazy_property
#
_logger = logging.getLogger(__name__)
class OpenERPSession(openerp.http.OpenERPSession):
def logout(self, keep_db=False, logout_type=None, env=None):
try:
env = env or request.env
except:
pass
if env and hasattr(env, 'registry') and env.registry.get('ir.sessions'):
session = env['ir.sessions'].sudo().search([('session_id', '=', self.sid)])
if session:
session._on_session_logout(logout_type)
return super(OpenERPSession, self).logout(keep_db=keep_db)
class RootTkobr(openerp.http.Root):
@lazy_property
def session_store(self):
# Setup http sessions
path = openerp.tools.config.session_dir
_logger.debug('HTTP sessions stored in: %s', path)
return werkzeug.contrib.sessions.FilesystemSessionStore(path, session_class=OpenERPSession)
root = RootTkobr()
openerp.http.root.session_store = root.session_store
| lgpl-3.0 |
lihui7115/ChromiumGStreamerBackend | native_client_sdk/src/build_tools/generate_make.py | 61 | 8678 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import buildbot_common
import build_version
import getos
from buildbot_common import ErrorExit
from easy_template import RunTemplateFileIfChanged
from build_paths import SDK_RESOURCE_DIR
def Trace(msg):
if Trace.verbose:
sys.stderr.write(str(msg) + '\n')
Trace.verbose = False
def IsExample(desc):
dest = desc['DEST']
return dest.startswith(('examples', 'tests', 'getting_started'))
def GenerateSourceCopyList(desc):
sources = []
# Some examples use their own Makefile/sources/etc.
if 'TARGETS' not in desc:
# Only copy the DATA files.
return desc.get('DATA', [])
# Add sources for each target
for target in desc['TARGETS']:
sources.extend(target['SOURCES'])
# And HTML and data files
sources.extend(desc.get('DATA', []))
if IsExample(desc):
sources.append('common.js')
if not desc.get('NO_PACKAGE_FILES'):
sources.extend(['icon128.png', 'background.js'])
return sources
def GetSourcesDict(sources):
source_map = {}
for key in ['.c', '.cc']:
source_list = [fname for fname in sources if fname.endswith(key)]
if source_list:
source_map[key] = source_list
else:
source_map[key] = []
return source_map
def GetProjectObjects(source_dict):
object_list = []
for key in ['.c', '.cc']:
for src in source_dict[key]:
object_list.append(os.path.splitext(src)[0])
return object_list
def GetPlatforms(plat_list, plat_filter, first_toolchain):
platforms = []
for plat in plat_list:
if plat in plat_filter:
platforms.append(plat)
if first_toolchain:
return [platforms[0]]
return platforms
def ErrorMsgFunc(text):
sys.stderr.write(text + '\n')
def AddMakeBat(pepperdir, makepath):
"""Create a simple batch file to execute Make.
Creates a simple batch file named make.bat for the Windows platform at the
given path, pointing to the Make executable in the SDK."""
makepath = os.path.abspath(makepath)
if not makepath.startswith(pepperdir):
ErrorExit('Make.bat not relative to Pepper directory: ' + makepath)
makeexe = os.path.abspath(os.path.join(pepperdir, 'tools'))
relpath = os.path.relpath(makeexe, makepath)
fp = open(os.path.join(makepath, 'make.bat'), 'wb')
outpath = os.path.join(relpath, 'make.exe')
# Since make.bat is only used by Windows, for Windows path style
outpath = outpath.replace(os.path.sep, '\\')
fp.write('@%s %%*\n' % outpath)
fp.close()
def FindFile(name, srcroot, srcdirs):
checks = []
for srcdir in srcdirs:
srcfile = os.path.join(srcroot, srcdir, name)
srcfile = os.path.abspath(srcfile)
if os.path.exists(srcfile):
return srcfile
else:
checks.append(srcfile)
ErrorMsgFunc('%s not found in:\n\t%s' % (name, '\n\t'.join(checks)))
return None
def IsNexe(desc):
for target in desc['TARGETS']:
if target['TYPE'] == 'main':
return True
return False
def ProcessHTML(srcroot, dstroot, desc, toolchains, configs, first_toolchain):
name = desc['NAME']
nmf = desc['TARGETS'][0]['NAME']
outdir = os.path.join(dstroot, desc['DEST'], name)
srcpath = os.path.join(srcroot, 'index.html')
dstpath = os.path.join(outdir, 'index.html')
tools = GetPlatforms(toolchains, desc['TOOLS'], first_toolchain)
path = "{tc}/{config}"
replace = {
'title': desc['TITLE'],
'attrs':
'data-name="%s" data-tools="%s" data-configs="%s" data-path="%s"' % (
nmf, ' '.join(tools), ' '.join(configs), path),
}
RunTemplateFileIfChanged(srcpath, dstpath, replace)
def GenerateManifest(srcroot, dstroot, desc):
outdir = os.path.join(dstroot, desc['DEST'], desc['NAME'])
srcpath = os.path.join(SDK_RESOURCE_DIR, 'manifest.json.template')
dstpath = os.path.join(outdir, 'manifest.json')
permissions = desc.get('PERMISSIONS', [])
combined_permissions = list(permissions)
socket_permissions = desc.get('SOCKET_PERMISSIONS', [])
if socket_permissions:
combined_permissions.append({'socket': socket_permissions})
filesystem_permissions = desc.get('FILESYSTEM_PERMISSIONS', [])
if filesystem_permissions:
combined_permissions.append({'fileSystem': filesystem_permissions})
pretty_permissions = json.dumps(combined_permissions,
sort_keys=True, indent=4)
replace = {
'name': desc['TITLE'],
'description': '%s Example' % desc['TITLE'],
'key': True,
'channel': None,
'permissions': pretty_permissions,
'multi_platform': desc.get('MULTI_PLATFORM', False),
'version': build_version.ChromeVersionNoTrunk(),
'min_chrome_version': desc.get('MIN_CHROME_VERSION')
}
RunTemplateFileIfChanged(srcpath, dstpath, replace)
def FindAndCopyFiles(src_files, root, search_dirs, dst_dir):
buildbot_common.MakeDir(dst_dir)
for src_name in src_files:
src_file = FindFile(src_name, root, search_dirs)
if not src_file:
ErrorExit('Failed to find: ' + src_name)
dst_file = os.path.join(dst_dir, src_name)
if os.path.exists(dst_file):
if os.stat(src_file).st_mtime <= os.stat(dst_file).st_mtime:
Trace('Skipping "%s", destination "%s" is newer.' % (
src_file, dst_file))
continue
dst_path = os.path.dirname(dst_file)
if not os.path.exists(dst_path):
buildbot_common.MakeDir(dst_path)
buildbot_common.CopyFile(src_file, dst_file)
def ModifyDescInPlace(desc):
"""Perform post-load processing on .dsc file data.
Currently this consists of:
- Add -Wall to CXXFLAGS
"""
for target in desc['TARGETS']:
target.setdefault('CXXFLAGS', [])
target['CXXFLAGS'].insert(0, '-Wall')
def ProcessProject(pepperdir, srcroot, dstroot, desc, toolchains, configs=None,
first_toolchain=False):
if not configs:
configs = ['Debug', 'Release']
name = desc['NAME']
out_dir = os.path.join(dstroot, desc['DEST'], name)
buildbot_common.MakeDir(out_dir)
srcdirs = desc.get('SEARCH', ['.', SDK_RESOURCE_DIR])
# Copy sources to example directory
sources = GenerateSourceCopyList(desc)
FindAndCopyFiles(sources, srcroot, srcdirs, out_dir)
# Copy public headers to the include directory.
for headers_set in desc.get('HEADERS', []):
headers = headers_set['FILES']
header_out_dir = os.path.join(dstroot, headers_set['DEST'])
FindAndCopyFiles(headers, srcroot, srcdirs, header_out_dir)
make_path = os.path.join(out_dir, 'Makefile')
outdir = os.path.dirname(os.path.abspath(make_path))
if getos.GetPlatform() == 'win':
AddMakeBat(pepperdir, outdir)
# If this project has no TARGETS, then we don't need to generate anything.
if 'TARGETS' not in desc:
return (name, desc['DEST'])
if IsNexe(desc):
template = os.path.join(SDK_RESOURCE_DIR, 'Makefile.example.template')
else:
template = os.path.join(SDK_RESOURCE_DIR, 'Makefile.library.template')
# Ensure the order of |tools| is the same as toolchains; that way if
# first_toolchain is set, it will choose based on the order of |toolchains|.
tools = [tool for tool in toolchains if tool in desc['TOOLS']]
if first_toolchain:
tools = [tools[0]]
ModifyDescInPlace(desc)
template_dict = {
'desc': desc,
'rel_sdk': '/'.join(['..'] * (len(desc['DEST'].split('/')) + 1)),
'pre': desc.get('PRE', ''),
'post': desc.get('POST', ''),
'tools': tools,
'sel_ldr': desc.get('SEL_LDR'),
'targets': desc['TARGETS'],
'multi_platform': desc.get('MULTI_PLATFORM', False),
}
RunTemplateFileIfChanged(template, make_path, template_dict)
if IsExample(desc):
ProcessHTML(srcroot, dstroot, desc, toolchains, configs,
first_toolchain)
if not desc.get('NO_PACKAGE_FILES'):
GenerateManifest(srcroot, dstroot, desc)
return (name, desc['DEST'])
def GenerateMasterMakefile(pepperdir, out_path, targets, deps):
"""Generate a Master Makefile that builds all examples.
Args:
pepperdir: NACL_SDK_ROOT
out_path: Root for output such that out_path+NAME = full path
targets: List of targets names
"""
in_path = os.path.join(SDK_RESOURCE_DIR, 'Makefile.index.template')
out_path = os.path.join(out_path, 'Makefile')
rel_path = os.path.relpath(pepperdir, os.path.dirname(out_path))
template_dict = {
'projects': targets,
'deps' : deps,
'rel_sdk' : rel_path,
}
RunTemplateFileIfChanged(in_path, out_path, template_dict)
outdir = os.path.dirname(os.path.abspath(out_path))
if getos.GetPlatform() == 'win':
AddMakeBat(pepperdir, outdir)
| bsd-3-clause |
ThirdProject/android_external_chromium_org | tools/perf/benchmarks/endure.py | 23 | 1390 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from measurements import endure
class _EndureBenchmark(test.Test):
test = endure.Endure
# Default options for endure benchmarks. Could be overridden in subclasses.
options = {
'output_format': 'csv',
'skip_navigate_on_repeat': True,
'page_repeat_secs': 7200,
'perf_stats_interval': '100s'
}
class EndureCalendarForwardBackward(_EndureBenchmark):
page_set = 'page_sets/calendar_forward_backward.json'
class EndureBrowserControl(_EndureBenchmark):
page_set = 'page_sets/browser_control.json'
class EndureBrowserControlClick(_EndureBenchmark):
page_set = 'page_sets/browser_control_click.json'
class EndureGmailAltThreadlistConversation(_EndureBenchmark):
page_set = 'page_sets/gmail_alt_threadlist_conversation.json'
class EndureGmailAltTwoLabels(_EndureBenchmark):
page_set = 'page_sets/gmail_alt_two_labels.json'
class EndureGmailExpandCollapseConversation(_EndureBenchmark):
page_set = 'page_sets/gmail_expand_collapse_conversation.json'
class EndureIndexedDBOffline(_EndureBenchmark):
page_set = 'page_sets/indexeddb_offline.json'
class EndurePlusAltPostsPhotos(_EndureBenchmark):
page_set = 'page_sets/plus_alt_posts_photos.json'
| bsd-3-clause |
s3h10r/avaon | django-avaon/acore/views/visualize.py | 1 | 6406 | """
statistics - heatmaps of impacts
"""
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import loader,Context,RequestContext
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
# --- if no x-server is available, comment this out
#import matplotlib
#matplotlib.use("Cairo") # cairo-backend, so we can create figures without x-server (PDF & PNG ...)
# Cairo backend requires that pycairo is installed.
# --- end no x-server
def _calc_stats_heatmap(id=None):
"""
create a datstructure which gives us info about the density of impacts
per weekday & hour
returns
data[weekday][hour] = nr. of impacts
"""
"""
TODO: write a test for me
"""
import datetime as dt
from acore.models import Impact, Thing
if id:
imps = Impact.objects.filter(thing = id)
else:
imps = Impact.objects.all()
# --- init empty data[weekday][hour]
# data = [[0] *24]*7 # data[weekday][hour] # BUG! try: data[0][1] && print data (reference?)
data = []
for i in range(7):
data.append([0]*24)
# ---
delta = dt.timedelta(seconds=60*60)
for im in imps:
#print im.t_from, "+", im.duration, "==", im.duration.seconds, 's'
#print " "*4, im.t_from.weekday()
travel_t = im.t_from
dur = 0
while True:
wd = travel_t.weekday()
h = travel_t.hour
data[wd][h] += 1
dur += delta.seconds
travel_t += delta
if ((im.t_to - travel_t) < delta) and (im.t_to.hour != travel_t.hour):
break
return data
def _plot_heatmap(fig,ax,data,y_label="y_label",x_label="x_label",title="title"):
"""
"""
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
ax.set_title(title,fontstyle='italic')
ax.set_ylabel(y_label)
ax.set_xlabel(x_label)
ticks_x=24
ticks_y=7
# we must turn our data into a numpy-array for using pcolor
data = np.array(data)
ax.pcolor(data, cmap=plt.cm.Blues)
pc = ax.pcolor(data, cmap=plt.cm.Blues)
fig.colorbar(pc, shrink=0.5)
# Shift ticks to be at 0.5, 1.5, etc
# http://stackoverflow.com/questions/24190858/matplotlib-move-ticklabels-between-ticks
ax.yaxis.set(ticks=np.arange(0.5,ticks_y + 0.5),ticklabels=range(0,ticks_y))
ax.xaxis.set(ticks=np.arange(0.5,ticks_x + 0.5),ticklabels=range(0,ticks_x))
ax.set_xlim(0.0,ticks_x)
ax.set_ylim(0.0,ticks_y)
ax.xaxis.labelpad = 10
fig.gca().set_aspect('equal')
fig.tight_layout()# not as good as savefig(bbox_inches="tight")?
def heatmap(request,id=None):
"""
plot a "impact density heatmap" in format data[weekday][hour]
for all available impacts or for Thing.id = id
"""
"""
TODO: time-range param
"""
import numpy as np
import random
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
data = _calc_stats_heatmap(id)
fig=Figure(figsize=(10,8), dpi=100)
ax=fig.add_subplot(111)
if not id:
_plot_heatmap(fig,ax,data,y_label="weekday", x_label="time", title="impact density #heatmap\n")
else:
from acore.models import Impact, Thing
thing = Thing.objects.get(id=id)
_plot_heatmap(fig,ax,data,y_label="weekday", x_label="time", title="'%s' - impact density #heatmap\n" % thing.name)
# cool, Django's HttpResponse object supports file-like API :)
# so we dunnot need StringIO
response = HttpResponse(content_type="image/png")
# bbox_inches 'tight' removes lot of unwanted whitespace around our plot
fig.savefig(response, format="png",bbox_inches='tight')
return response
def demo_heatmap(request):
"""
returns matplotlib plot as selftest
"""
"""
http://stackoverflow.com/questions/14391959/heatmap-in-matplotlib-with-pcolor
http://stackoverflow.com/questions/15988413/python-pylab-pcolor-options-for-publication-quality-plots
http://stackoverflow.com/questions/24190858/matplotlib-move-ticklabels-between-ticks
http://matplotlib.org/faq/usage_faq.html#matplotlib-pylab-and-pyplot-how-are-they-related
"""
import django
import numpy as np
import random
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
fig=Figure(figsize=(10,8), dpi=100)
ax=fig.add_subplot(111)
ax.set_title("demo heatmap e.g. 'impact density'",fontstyle='italic')
ax.set_ylabel("weekday")
ax.set_xlabel("time in h")
ticks_x=24
ticks_y=7
data = []
for i in range(ticks_y):
y = [ random.randint(0,10) for i in range(ticks_x) ]
data.append(y)
data = np.array(data)
pc = ax.pcolor(data, cmap=plt.cm.Blues) # http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps
ax.yaxis.set_ticks(np.arange(0.5,ticks_y + 0.5),range(0,ticks_y))
ax.xaxis.set_ticks(np.arange(0.5,ticks_x + 0.5),range(0,ticks_x))
# http://stackoverflow.com/questions/12608788/changing-the-tick-frequency-on-x-or-y-axis-in-matplotlib
import matplotlib.ticker as ticker
#ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
loc = ticker.MultipleLocator(base=1) # this locator puts ticks at regular intervals
ax.xaxis.set_major_locator(loc)
ax.xaxis.set_minor_locator(ticker.NullLocator())
ax.set_xlim(0.0,ticks_x)
ax.set_ylim(0.0,ticks_y)
#ax.xaxis.labelpad = 20
fig.colorbar(pc, shrink=0.5)
#plt.gca().invert_yaxis()
fig.gca().set_aspect('equal')
# tight_layout() & bbox_inches 'tight' removes lot of unwanted
# whitespace around our plot
fig.tight_layout()# not as good as savefig(bbox_inches="tight")?
# cool, Django's HttpResponse object supports file-like API :)
# so we dunnot need StringIO
response=django.http.HttpResponse(content_type='image/png')
fig.savefig(response, format="png",bbox_inches='tight')
return response
if __name__ == '__main__':
print _calc_stats_heatmap()
| gpl-2.0 |
BT-jmichaud/reporting-engine | report_custom_filename/model/ir_actions_report_xml.py | 3 | 1670 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2014 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class IrActionsReportXml(models.Model):
_inherit = 'ir.actions.report.xml'
download_filename = fields.Char(
'Download filename',
help='Fill in this field to have a custom file name when downloading '
'this report. This string is evaluated as a jinja2 expression.\n'
'You can use python expressions, `objects` is a browse record list of '
'the objects for which the report is being generated.\n'
'Check for this list\'s length to determine if it is a report being '
'printed for multiple records or not. You also have access to `o`, '
'which is the first record in the list')
| agpl-3.0 |
danielvdao/TheAnimalFarm | venv/lib/python2.7/site-packages/pip/_vendor/requests/api.py | 637 | 4333 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the request.
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
return session.request(method=method, url=url, **kwargs)
def get(url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('post', url, data=data, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('delete', url, **kwargs)
| gpl-2.0 |
stevehof/location-ninja | lib/examples/auth/auth.py | 2 | 6376 | import os
from flask import Flask, url_for, redirect, render_template, request
from flask.ext.sqlalchemy import SQLAlchemy
from wtforms import form, fields, validators
from flask.ext import admin, login
from flask.ext.admin.contrib import sqla
from flask.ext.admin import helpers, expose
# Create Flask application
app = Flask(__name__)
# Create dummy secrey key so we can use sessions
app.config['SECRET_KEY'] = '123456790'
# Create in-memory database
app.config['DATABASE_FILE'] = 'sample_db.sqlite'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + app.config['DATABASE_FILE']
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
# Create user model. For simplicity, it will store passwords in plain text.
# Obviously that's not right thing to do in real world application.
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(100))
last_name = db.Column(db.String(100))
login = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120))
password = db.Column(db.String(64))
# Flask-Login integration
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
# Required for administrative interface
def __unicode__(self):
return self.username
# Define login and registration forms (for flask-login)
class LoginForm(form.Form):
login = fields.TextField(validators=[validators.required()])
password = fields.PasswordField(validators=[validators.required()])
def validate_login(self, field):
user = self.get_user()
if user is None:
raise validators.ValidationError('Invalid user')
if user.password != self.password.data:
raise validators.ValidationError('Invalid password')
def get_user(self):
return db.session.query(User).filter_by(login=self.login.data).first()
class RegistrationForm(form.Form):
login = fields.TextField(validators=[validators.required()])
email = fields.TextField()
password = fields.PasswordField(validators=[validators.required()])
def validate_login(self, field):
if db.session.query(User).filter_by(login=self.login.data).count() > 0:
raise validators.ValidationError('Duplicate username')
# Initialize flask-login
def init_login():
login_manager = login.LoginManager()
login_manager.init_app(app)
# Create user loader function
@login_manager.user_loader
def load_user(user_id):
return db.session.query(User).get(user_id)
# Create customized model view class
class MyModelView(sqla.ModelView):
def is_accessible(self):
return login.current_user.is_authenticated()
# Create customized index view class that handles login & registration
class MyAdminIndexView(admin.AdminIndexView):
@expose('/')
def index(self):
if not login.current_user.is_authenticated():
return redirect(url_for('.login_view'))
return super(MyAdminIndexView, self).index()
@expose('/login/', methods=('GET', 'POST'))
def login_view(self):
# handle user login
form = LoginForm(request.form)
if helpers.validate_form_on_submit(form):
user = form.get_user()
login.login_user(user)
if login.current_user.is_authenticated():
return redirect(url_for('.index'))
link = '<p>Don\'t have an account? <a href="' + url_for('.register_view') + '">Click here to register.</a></p>'
self._template_args['form'] = form
self._template_args['link'] = link
return super(MyAdminIndexView, self).index()
@expose('/register/', methods=('GET', 'POST'))
def register_view(self):
form = RegistrationForm(request.form)
if helpers.validate_form_on_submit(form):
user = User()
form.populate_obj(user)
db.session.add(user)
db.session.commit()
login.login_user(user)
return redirect(url_for('.index'))
link = '<p>Already have an account? <a href="' + url_for('.login_view') + '">Click here to log in.</a></p>'
self._template_args['form'] = form
self._template_args['link'] = link
return super(MyAdminIndexView, self).index()
@expose('/logout/')
def logout_view(self):
login.logout_user()
return redirect(url_for('.index'))
# Flask views
@app.route('/')
def index():
return render_template('index.html')
# Initialize flask-login
init_login()
# Create admin
admin = admin.Admin(app, 'Auth', index_view=MyAdminIndexView(), base_template='my_master.html')
# Add view
admin.add_view(MyModelView(User, db.session))
def build_sample_db():
"""
Populate a small db with some example entries.
"""
import string
import random
db.drop_all()
db.create_all()
test_user = User(login="test", password="test")
db.session.add(test_user)
first_names = [
'Harry', 'Amelia', 'Oliver', 'Jack', 'Isabella', 'Charlie','Sophie', 'Mia',
'Jacob', 'Thomas', 'Emily', 'Lily', 'Ava', 'Isla', 'Alfie', 'Olivia', 'Jessica',
'Riley', 'William', 'James', 'Geoffrey', 'Lisa', 'Benjamin', 'Stacey', 'Lucy'
]
last_names = [
'Brown', 'Smith', 'Patel', 'Jones', 'Williams', 'Johnson', 'Taylor', 'Thomas',
'Roberts', 'Khan', 'Lewis', 'Jackson', 'Clarke', 'James', 'Phillips', 'Wilson',
'Ali', 'Mason', 'Mitchell', 'Rose', 'Davis', 'Davies', 'Rodriguez', 'Cox', 'Alexander'
]
for i in range(len(first_names)):
user = User()
user.first_name = first_names[i]
user.last_name = last_names[i]
user.login = user.first_name.lower()
user.email = user.login + "@example.com"
user.password = ''.join(random.choice(string.ascii_lowercase + string.digits) for i in range(10))
db.session.add(user)
db.session.commit()
return
if __name__ == '__main__':
# Build a sample db on the fly, if one does not exist yet.
app_dir = os.path.realpath(os.path.dirname(__file__))
database_path = os.path.join(app_dir, app.config['DATABASE_FILE'])
if not os.path.exists(database_path):
build_sample_db()
# Start app
app.run(debug=True)
| gpl-3.0 |
Kongsea/tensorflow | tensorflow/python/summary/text_summary.py | 19 | 2872 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements text_summary in TensorFlow, with TensorBoard support.
The text_summary is a wrapper around the generic tensor_summary that takes a
string-type tensor and emits a TensorSummary op with SummaryMetadata that
notes that this summary is textual data for the TensorBoard text plugin.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.ops.summary_ops import tensor_summary
PLUGIN_NAME = "text"
def text_summary(name, tensor, collections=None):
"""Summarizes textual data.
Text data summarized via this plugin will be visible in the Text Dashboard
in TensorBoard. The standard TensorBoard Text Dashboard will render markdown
in the strings, and will automatically organize 1d and 2d tensors into tables.
If a tensor with more than 2 dimensions is provided, a 2d subarray will be
displayed along with a warning message. (Note that this behavior is not
intrinsic to the text summary api, but rather to the default TensorBoard text
plugin.)
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: a string-type Tensor to summarize.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
Returns:
A TensorSummary op that is configured so that TensorBoard will recognize
that it contains textual data. The TensorSummary is a scalar `Tensor` of
type `string` which contains `Summary` protobufs.
Raises:
ValueError: If tensor has the wrong type.
"""
if tensor.dtype != dtypes.string:
raise ValueError("Expected tensor %s to have dtype string, got %s" %
(tensor.name, tensor.dtype))
summary_metadata = summary_pb2.SummaryMetadata(
plugin_data=summary_pb2.SummaryMetadata.PluginData(
plugin_name=PLUGIN_NAME))
t_summary = tensor_summary(
name=name,
tensor=tensor,
summary_metadata=summary_metadata,
collections=collections)
return t_summary
| apache-2.0 |
DinoCow/airflow | airflow/contrib/hooks/sagemaker_hook.py | 7 | 1288 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.sagemaker`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.hooks.sagemaker import ( # noqa
LogState,
Position,
SageMakerHook,
argmin,
secondary_training_status_changed,
secondary_training_status_message,
)
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.sagemaker`.",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 |
batxes/4c2vhic | Six_mouse_models/Six_mouse_models_final_output_0.2_-0.1_11000/Six_mouse_models19402.py | 2 | 18203 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((4618.43, 1546.44, 6472.06), (0, 1, 0), 846)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((5366.74, -551.767, 5159.39), (0.7, 0.7, 0.7), 846)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((4204.71, -38.5966, 6512.14), (0.7, 0.7, 0.7), 846)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((4792.49, 658.861, 6646.96), (0.7, 0.7, 0.7), 846)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((4472.79, 1181.5, 6190.58), (0.7, 0.7, 0.7), 846)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((4935.37, 1584.41, 7505.96), (0.7, 0.7, 0.7), 846)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((6371.15, 388.041, 8764.51), (0.7, 0.7, 0.7), 846)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((4457.77, 1728.72, 8347.7), (0.7, 0.7, 0.7), 846)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((4459.09, 1989.73, 8169.55), (0.7, 0.7, 0.7), 846)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((5135.06, 3001.11, 9121.13), (0.7, 0.7, 0.7), 846)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((3170.9, 2363.38, 9209.58), (0, 1, 0), 846)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((2811.72, 2593.76, 7171.81), (0.7, 0.7, 0.7), 846)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((3000.19, 2164.1, 9075.66), (0.7, 0.7, 0.7), 846)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((4311.95, 3127.66, 8439.02), (0.7, 0.7, 0.7), 846)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((3219.09, 2702.81, 8261.04), (0.7, 0.7, 0.7), 846)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((1423.52, 2981.83, 7355.59), (0.7, 0.7, 0.7), 846)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((2352.1, 3990.6, 8911.56), (0.7, 0.7, 0.7), 846)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((2794.03, 3969.52, 8670.86), (0.7, 0.7, 0.7), 846)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((3140.41, 3517.98, 7058.13), (0.7, 0.7, 0.7), 846)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((2599.39, 2555, 6555.85), (0.7, 0.7, 0.7), 846)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((3068.21, 3860.15, 7526.68), (0, 1, 0), 846)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((4663.12, 3365.38, 7492.89), (0.7, 0.7, 0.7), 846)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((3206.08, 3032.39, 5787.5), (0.7, 0.7, 0.7), 846)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((3349.38, 3531, 6332.44), (0.7, 0.7, 0.7), 846)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((5086.98, 4619.8, 7357.62), (0.7, 0.7, 0.7), 846)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((4621.12, 3889.19, 5498.42), (0.7, 0.7, 0.7), 846)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((3093.8, 2969.16, 4825.48), (0.7, 0.7, 0.7), 846)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((3495.45, 3593.96, 5801.34), (0.7, 0.7, 0.7), 846)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((3468.9, 4364.7, 7393.46), (0.7, 0.7, 0.7), 846)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((3301.88, 3386.69, 6203.72), (0.7, 0.7, 0.7), 846)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((3510.89, 4769.22, 5980.18), (0, 1, 0), 846)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((4022.73, 5182.88, 4576.82), (0.7, 0.7, 0.7), 846)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((3987.51, 4713.26, 4945.12), (0.7, 0.7, 0.7), 846)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((4249.82, 3580.47, 4496.07), (0.7, 0.7, 0.7), 846)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((5253.59, 3909.19, 5918.96), (0.7, 0.7, 0.7), 846)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((5319.42, 4702.21, 4602.47), (0.7, 0.7, 0.7), 846)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((6482.4, 4925.26, 7250.77), (1, 0.7, 0), 846)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((6049.99, 4026.56, 4737.83), (0.7, 0.7, 0.7), 846)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((5775.01, 5385.96, 5295.24), (0.7, 0.7, 0.7), 846)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((5732.27, 5346.58, 2897.09), (1, 0.7, 0), 846)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((5867.03, 6626.33, 4598.88), (0.7, 0.7, 0.7), 846)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((5930.61, 7636.06, 3374.48), (0.7, 0.7, 0.7), 846)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((5558.33, 7031.83, 3270.94), (0.7, 0.7, 0.7), 846)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((6003.54, 7119.73, 3398.24), (0.7, 0.7, 0.7), 846)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((5349.69, 6965.08, 2515.17), (0.7, 0.7, 0.7), 846)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((5842.2, 6728.71, 3117.7), (0.7, 0.7, 0.7), 846)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((5793.56, 7852.02, 2732.58), (0.7, 0.7, 0.7), 846)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((6095.93, 7597.2, 1957.51), (0.7, 0.7, 0.7), 846)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((6662.76, 7687.6, 2076.38), (0.7, 0.7, 0.7), 846)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((7573.99, 7052.06, 3659.28), (0.7, 0.7, 0.7), 846)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((5844.35, 7124.65, 3325.49), (0.7, 0.7, 0.7), 846)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((7674.34, 6710.23, 3288.61), (0, 1, 0), 846)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((5561.84, 9325.67, 3952.64), (0.7, 0.7, 0.7), 846)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((6268.58, 7247.05, 2888.14), (0.7, 0.7, 0.7), 846)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((7158.75, 6858.61, 2001.06), (0.7, 0.7, 0.7), 846)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((6032.33, 7514.06, 3080.43), (0.7, 0.7, 0.7), 846)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((6557.07, 6817.16, 4114.53), (0.7, 0.7, 0.7), 846)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((6633.41, 7327.62, 2082.9), (0.7, 0.7, 0.7), 846)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((6485.03, 7839.19, 3759.78), (0.7, 0.7, 0.7), 846)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((7383.62, 6497.36, 2778.53), (0.7, 0.7, 0.7), 846)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((8035.94, 8024, 2097.07), (0.7, 0.7, 0.7), 846)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((8713.09, 7293.58, 3907), (0, 1, 0), 846)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((9102.83, 7157.05, 5966.6), (0.7, 0.7, 0.7), 846)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((8283.87, 8095.7, 4441.39), (0.7, 0.7, 0.7), 846)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((6818.38, 9365.21, 3163.77), (0.7, 0.7, 0.7), 846)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((8844.98, 9113.32, 3321.33), (0.7, 0.7, 0.7), 846)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((7807.41, 9688.92, 2878.25), (0.7, 0.7, 0.7), 846)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((6823.18, 9376.95, 4299.13), (0.7, 0.7, 0.7), 846)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((7933.73, 10766.7, 5210.89), (0.7, 0.7, 0.7), 846)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((7424.04, 9999.82, 4623.9), (0.7, 0.7, 0.7), 846)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((8929.48, 10354, 3519.41), (0.7, 0.7, 0.7), 846)
if "particle_71 geometry" not in marker_sets:
s=new_marker_set('particle_71 geometry')
marker_sets["particle_71 geometry"]=s
s= marker_sets["particle_71 geometry"]
mark=s.place_marker((7529.89, 9392.68, 4830.63), (0, 1, 0), 846)
if "particle_72 geometry" not in marker_sets:
s=new_marker_set('particle_72 geometry')
marker_sets["particle_72 geometry"]=s
s= marker_sets["particle_72 geometry"]
mark=s.place_marker((8918.55, 9585.61, 4618.44), (0.7, 0.7, 0.7), 846)
if "particle_73 geometry" not in marker_sets:
s=new_marker_set('particle_73 geometry')
marker_sets["particle_73 geometry"]=s
s= marker_sets["particle_73 geometry"]
mark=s.place_marker((7614.48, 10228.8, 3700.83), (0.7, 0.7, 0.7), 846)
if "particle_74 geometry" not in marker_sets:
s=new_marker_set('particle_74 geometry')
marker_sets["particle_74 geometry"]=s
s= marker_sets["particle_74 geometry"]
mark=s.place_marker((8807.94, 9405.19, 5080.94), (0, 1, 0), 846)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 |
puttarajubr/commcare-hq | custom/_legacy/mvp_docs/models.py | 2 | 2585 | import dateutil
from couchdbkit import ResourceNotFound
from couchdbkit.ext.django.loading import get_db
from casexml.apps.case.models import CommCareCase
from couchforms.models import XFormInstance
class IndicatorDocument(object):
def is_update(self, doc_dict):
"""Checks to see whether doc_dict shows an update from the current
instance doc, so that we keep things in sync.
"""
raise NotImplementedError("is_update must be implemented")
def exists_in_database(self):
return '_rev' in self._doc
@classmethod
def get_db(cls):
"""Makes damn sure that we get the correct DB for this particular app
If cls._db has been set by a superclass, then the super method is
going to grab the wrong db without this."""
app_label = getattr(cls._meta, "app_label")
db = get_db(app_label)
cls._db = db
return db
@classmethod
def wrap_for_indicator_db(cls, doc_dict):
"""
wrap a doc that was pulled from the main db
modifying it so that it can be saved in the indicator db
like wrap, but also:
- sets _rev to whatever it needs to be in order to be saved
to the indicator db without an update conflict
- strips _attachments, because we don't care about them
and having the stub in JSON without the attachment will fail
"""
try:
current_rev = cls.get_db().get(doc_dict['_id'])['_rev']
except ResourceNotFound:
del doc_dict['_rev']
else:
doc_dict['_rev'] = current_rev
if '_attachments' in doc_dict:
del doc_dict['_attachments']
return cls.wrap(doc_dict)
class IndicatorXForm(IndicatorDocument, XFormInstance):
def save(self, **kwargs):
self.doc_type = 'IndicatorXForm'
assert self.get_db().uri != XFormInstance.get_db().uri
super(IndicatorXForm, self).save(**kwargs)
def is_update(self, doc_dict):
# Highly unlikely that an XForm will have been updated from prod.
return False
class IndicatorCase(IndicatorDocument, CommCareCase):
def save(self, **kwargs):
self.doc_type = 'IndicatorCase'
assert self.get_db().uri != CommCareCase.get_db().uri
super(IndicatorCase, self).save(**kwargs)
def is_update(self, doc_dict):
dict_modified_on = dateutil.parser.parse(doc_dict['modified_on'])
current_modified_on = dateutil.parser.parse(self._doc['modified_on'])
return current_modified_on < dict_modified_on
| bsd-3-clause |
idatux/idatuxft | engine/xml/xslt/TextElement.py | 9 | 2357 | ########################################################################
#
# File Name: TextElement.py
#
#
"""
Implementation of the XSLT Spec text stylesheet element.
WWW: http://4suite.com/4XSLT e-mail: support@4suite.com
Copyright (c) 1999-2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
from xml.dom import EMPTY_NAMESPACE
import xml.dom.ext
import xml.dom.Element
from xml.xpath import CoreFunctions
from xml.xslt import XsltElement, XsltException, Error
from xml.dom import Node
class TextElement(XsltElement):
legalAttrs = ('disable-output-escaping',)
def __init__(self, doc, uri=xml.xslt.XSL_NAMESPACE, localName='text', prefix='xsl', baseUri=''):
XsltElement.__init__(self, doc, uri, localName, prefix, baseUri)
return
def setup(self):
self.__dict__['_disable_output_escaping'] = self.getAttributeNS(EMPTY_NAMESPACE, 'disable-output-escaping') == 'yes'
self.__dict__['_nss'] = xml.dom.ext.GetAllNs(self)
for child in self.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
raise XsltException(Error.ILLEGAL_TEXT_CHILD)
self.normalize()
return
def instantiate(self, context, processor):
if not self.firstChild:
return (context,)
if context.processorNss != self._nss:
origState = context.copyNamespaces()
context.setNamespaces(self._nss)
else:
origState = None
value = self.firstChild and self.firstChild.data or ''
if self._disable_output_escaping:
processor.writers[-1].text(value, escapeOutput=0)
else:
processor.writers[-1].text(value)
origState and context.setNamespaces(origState)
return (context,)
def __getinitargs__(self):
return (None, self.namespaceURI, self.localName, self.prefix,
self.baseUri)
def __getstate__(self):
base_state = XsltElement.__getstate__(self)
new_state = (base_state, self._nss, self._disable_output_escaping)
return new_state
def __setstate__(self, state):
XsltElement.__setstate__(self, state[0])
self._nss = state[1]
self._disable_output_escaping = state[2]
return
| gpl-3.0 |
duqiao/django | django/contrib/gis/geos/prototypes/coordseq.py | 485 | 3049 | from ctypes import POINTER, c_double, c_int, c_uint
from django.contrib.gis.geos.libgeos import CS_PTR, GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
GEOSException, last_arg_byref,
)
# ## Error-checking routines specific to coordinate sequences. ##
def check_cs_op(result, func, cargs):
"Checks the status code of a coordinate sequence operation."
if result == 0:
raise GEOSException('Could not set value on coordinate sequence')
else:
return result
def check_cs_get(result, func, cargs):
"Checking the coordinate sequence retrieval."
check_cs_op(result, func, cargs)
# Object in by reference, return its value.
return last_arg_byref(cargs)
# ## Coordinate sequence prototype factory classes. ##
class CsInt(GEOSFuncFactory):
"For coordinate sequence routines that return an integer."
argtypes = [CS_PTR, POINTER(c_uint)]
restype = c_int
errcheck = staticmethod(check_cs_get)
class CsOperation(GEOSFuncFactory):
"For coordinate sequence operations."
restype = c_int
def get_func(self, ordinate=False, get=False):
if get:
# Get routines have double parameter passed-in by reference.
self.errcheck = check_cs_get
dbl_param = POINTER(c_double)
else:
self.errcheck = check_cs_op
dbl_param = c_double
if ordinate:
# Get/Set ordinate routines have an extra uint parameter.
self.argtypes = [CS_PTR, c_uint, c_uint, dbl_param]
else:
self.argtypes = [CS_PTR, c_uint, dbl_param]
return super(CsOperation, self).get_func()
class CsOutput(GEOSFuncFactory):
restype = CS_PTR
def get_func(self, argtypes):
self.argtypes = argtypes
return super(CsOutput, self).get_func()
@staticmethod
def errcheck(result, func, cargs):
if not result:
raise GEOSException(
'Error encountered checking Coordinate Sequence returned from GEOS '
'C function "%s".' % func.__name__
)
return result
# ## Coordinate Sequence ctypes prototypes ##
# Coordinate Sequence constructors & cloning.
cs_clone = CsOutput('GEOSCoordSeq_clone', [CS_PTR])
create_cs = CsOutput('GEOSCoordSeq_create', [c_uint, c_uint])
get_cs = CsOutput('GEOSGeom_getCoordSeq', [GEOM_PTR])
# Getting, setting ordinate
cs_getordinate = CsOperation('GEOSCoordSeq_getOrdinate', ordinate=True, get=True)
cs_setordinate = CsOperation('GEOSCoordSeq_setOrdinate', ordinate=True)
# For getting, x, y, z
cs_getx = CsOperation('GEOSCoordSeq_getX', get=True)
cs_gety = CsOperation('GEOSCoordSeq_getY', get=True)
cs_getz = CsOperation('GEOSCoordSeq_getZ', get=True)
# For setting, x, y, z
cs_setx = CsOperation('GEOSCoordSeq_setX')
cs_sety = CsOperation('GEOSCoordSeq_setY')
cs_setz = CsOperation('GEOSCoordSeq_setZ')
# These routines return size & dimensions.
cs_getsize = CsInt('GEOSCoordSeq_getSize')
cs_getdims = CsInt('GEOSCoordSeq_getDimensions')
| bsd-3-clause |
michaelpacer/dynd-python | dynd/tests/test_array_construct.py | 2 | 45514 | import sys
import unittest
from datetime import date
from dynd import nd, ndt
class TestScalarConstructor(unittest.TestCase):
def test_access_array(self):
a = nd.array(1)
self.assertEqual(a.access_flags, 'readwrite')
a = nd.array(1, access='rw')
self.assertEqual(a.access_flags, 'readwrite')
a = nd.array(1, access='r')
self.assertEqual(a.access_flags, 'immutable')
def test_access_array_with_type(self):
a = nd.array(1, type=ndt.int32)
self.assertEqual(a.access_flags, 'readwrite')
a = nd.array(1, type=ndt.int32, access='rw')
self.assertEqual(a.access_flags, 'readwrite')
a = nd.array(1, type=ndt.int32, access='r')
self.assertEqual(a.access_flags, 'immutable')
def test_access_asarray(self):
a = nd.asarray(1)
self.assertEqual(a.access_flags, 'readwrite')
a = nd.asarray(1, access='rw')
self.assertEqual(a.access_flags, 'readwrite')
a = nd.asarray(1, access='r')
self.assertEqual(a.access_flags, 'immutable')
def test_access_zeros(self):
a = nd.zeros(ndt.int32)
self.assertEqual(a.access_flags, 'readwrite')
a = nd.zeros(ndt.int32, access='rw')
self.assertEqual(a.access_flags, 'readwrite')
a = nd.zeros(ndt.int32, access='r')
self.assertEqual(a.access_flags, 'immutable')
def test_access_ones(self):
a = nd.ones(ndt.int32)
self.assertEqual(a.access_flags, 'readwrite')
a = nd.ones(ndt.int32, access='rw')
self.assertEqual(a.access_flags, 'readwrite')
a = nd.ones(ndt.int32, access='r')
self.assertEqual(a.access_flags, 'immutable')
def test_access_full(self):
a = nd.full(ndt.int32, value=1)
self.assertEqual(a.access_flags, 'readwrite')
a = nd.full(ndt.int32, value=1, access='rw')
self.assertEqual(a.access_flags, 'readwrite')
a = nd.full(ndt.int32, value=1, access='r')
self.assertEqual(a.access_flags, 'immutable')
class TestArrayConstruct(unittest.TestCase):
def test_empty_array(self):
# Empty arrays default to int32
a = nd.array([])
self.assertEqual(nd.type_of(a), ndt.type('0 * int32'))
self.assertEqual(a.shape, (0,))
a = nd.array([[], [], []])
self.assertEqual(nd.type_of(a), ndt.type('3 * 0 * int32'))
self.assertEqual(a.shape, (3, 0))
def test_empty_array_dtype(self):
a = nd.array([], dtype=ndt.int64)
self.assertEqual(nd.type_of(a), ndt.type('0 * int64'))
self.assertEqual(a.shape, (0,))
# Todo: Need to reenable this failing test
# a = nd.array([], dtype='Fixed * float64')
# self.assertEqual(nd.type_of(a), ndt.type('0 * float64'))
# self.assertEqual(a.shape, (0,))
a = nd.array([], dtype='var * int16')
self.assertEqual(nd.type_of(a), ndt.type('var * int16'))
self.assertEqual(len(a), 0)
a = nd.array([], dtype='0 * int16')
self.assertEqual(nd.type_of(a), ndt.type('0 * int16'))
self.assertEqual(len(a), 0)
a = nd.array([], dtype='3 * int16')
self.assertEqual(nd.type_of(a), ndt.type('0 * 3 * int16'))
self.assertEqual(a.shape, (0, 3))
class TestTypedArrayConstructors(unittest.TestCase):
def test_empty(self):
# Constructor from scalar type
a = nd.empty(ndt.int32)
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a), ndt.int32)
# Constructor from type with fixed dimension
a = nd.empty('3 * int32')
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a), ndt.make_fixed_dim(3, ndt.int32))
self.assertEqual(a.shape, (3,))
# Constructor from type with fixed dimension, accesskwarg
a = nd.empty('3 * int32', access='rw')
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a), ndt.make_fixed_dim(3, ndt.int32))
self.assertEqual(a.shape, (3,))
# Can't create with access as immutable
self.assertRaises(ValueError, nd.empty, '3 * int32', access='immutable')
# Constructor from shape as single integer
a = nd.empty(3, ndt.int32)
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a), ndt.type('3 * int32'))
self.assertEqual(a.shape, (3,))
# Constructor from shape as tuple
a = nd.empty((3,4), ndt.int32)
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a), ndt.type('3 * 4 * int32'))
self.assertEqual(a.shape, (3,4))
# Constructor from shape as variadic arguments
a = nd.empty(3, 4, ndt.int32)
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a), ndt.type('3 * 4 * int32'))
self.assertEqual(a.shape, (3,4))
# Constructor from shape as variadic arguments, access kwarg
a = nd.empty(3, 4, ndt.int32, access='rw')
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a), ndt.type('3 * 4 * int32'))
self.assertEqual(a.shape, (3,4))
# Can't create with access as immutable
self.assertRaises(ValueError, nd.empty, 3, 4, ndt.int32, access='immutable')
def check_constructor(self, cons, value):
# Constructor from scalar type
a = cons(ndt.int32)
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a), ndt.int32)
self.assertEqual(nd.as_py(a), value)
# Constructor from type with fixed dimension
a = cons('3 * int32')
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a), ndt.make_fixed_dim(3, ndt.int32))
self.assertEqual(a.shape, (3,))
self.assertEqual(nd.as_py(a), [value]*3)
# Constructor from shape as single integer
a = cons(3, ndt.int32)
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a), ndt.type('3 * int32'))
self.assertEqual(a.shape, (3,))
self.assertEqual(nd.as_py(a), [value]*3)
# Constructor from shape as tuple
a = cons((3,4), ndt.int32)
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a), ndt.type('3 * 4 * int32'))
self.assertEqual(a.shape, (3,4))
self.assertEqual(nd.as_py(a), [[value]*4]*3)
# Constructor from shape as variadic arguments
a = cons(3, 4, ndt.int32)
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a), ndt.type('3 * 4 * int32'))
self.assertEqual(a.shape, (3,4))
self.assertEqual(nd.as_py(a), [[value]*4]*3)
# Constructor of a struct type
a = cons(3, '{x: int32, y: int32}')
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a),
ndt.type('3 * {x: int32, y: int32}'))
self.assertEqual(a.shape, (3,))
self.assertEqual(nd.as_py(a),
[{'x': value, 'y': value}]*3)
# Constructor of a struct type
a = cons(3, ndt.make_struct([ndt.int32]*2, ['x', 'y']))
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a),
ndt.make_fixed_dim(3,
ndt.make_struct([ndt.int32]*2, ['x', 'y'])))
self.assertEqual(a.shape, (3,))
self.assertEqual(nd.as_py(a),
[{'x': value, 'y': value}]*3)
def check_constructor_readwrite(self, cons, value):
# Constructor from scalar type
a = cons(ndt.int32, access='rw')
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a), ndt.int32)
self.assertEqual(nd.as_py(a), value)
# Constructor from type with fixed dimension
a = cons('3 * int32', access='rw')
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a), ndt.make_fixed_dim(3, ndt.int32))
self.assertEqual(a.shape, (3,))
self.assertEqual(nd.as_py(a), [value]*3)
# Constructor from shape as single integer
a = cons(3, ndt.int32, access='rw')
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a), ndt.type('3 * int32'))
self.assertEqual(a.shape, (3,))
self.assertEqual(nd.as_py(a), [value]*3)
# Constructor from shape as tuple
a = cons((3,4), ndt.int32, access='rw')
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a), ndt.type('3 * 4 * int32'))
self.assertEqual(a.shape, (3,4))
self.assertEqual(nd.as_py(a), [[value]*4]*3)
# Constructor from shape as variadic arguments
a = cons(3, 4, ndt.int32, access='rw')
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a), ndt.type('3 * 4 * int32'))
self.assertEqual(a.shape, (3,4))
self.assertEqual(nd.as_py(a), [[value]*4]*3)
# Constructor of a struct type
a = cons(3, '{x: int32, y: int32}', access='rw')
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a),
ndt.type('3 * {x: int32, y: int32}'))
self.assertEqual(a.shape, (3,))
self.assertEqual(nd.as_py(a),
[{'x': value, 'y': value}]*3)
# Constructor of a struct type
a = cons(3, ndt.make_struct([ndt.int32]*2, ['x', 'y']), access='rw')
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a),
ndt.make_fixed_dim(3,
ndt.make_struct([ndt.int32]*2, ['x', 'y'])))
self.assertEqual(a.shape, (3,))
self.assertEqual(nd.as_py(a),
[{'x': value, 'y': value}]*3)
def test_zeros(self):
self.check_constructor(nd.zeros, 0)
self.check_constructor_readwrite(nd.zeros, 0)
def test_ones(self):
self.check_constructor(nd.ones, 1)
self.check_constructor_readwrite(nd.ones, 1)
def test_full(self):
def cons(value):
def c(*args, **kwargs):
kwargs['value'] = value
return nd.full(*args, **kwargs)
return c
self.check_constructor(cons(1000), 1000)
self.check_constructor_readwrite(cons(1000), 1000)
self.check_constructor(cons(-21000), -21000)
self.check_constructor_readwrite(cons(-21000), -21000)
# Also check that 'value' is keyword-only
a = nd.full(2, 3, ndt.float32, value=1.5)
self.assertEqual(nd.as_py(a), [[1.5]*3]*2)
self.assertRaises(TypeError, nd.full, 2, 3, ndt.float32, 1.5)
def test_full_of_struct(self):
# Constructor of a struct type
a = nd.full(3, '{x: int32, y: int32}', value=[1,5], access='rw')
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a),
ndt.type('3 * {x: int32, y: int32}'))
self.assertEqual(a.shape, (3,))
self.assertEqual(nd.as_py(a),
[{'x': 1, 'y': 5}]*3)
# Constructor of a struct type
a = nd.full(3, ndt.make_struct([ndt.int32]*2, ['x', 'y']),
value={'x' : 3, 'y' : 10}, access='rw')
self.assertEqual(a.access_flags, 'readwrite')
self.assertEqual(nd.type_of(a),
ndt.make_fixed_dim(3,
ndt.make_struct([ndt.int32]*2, ['x', 'y'])))
self.assertEqual(a.shape, (3,))
self.assertEqual(nd.as_py(a),
[{'x': 3, 'y': 10}]*3)
class TestArrayConstructor(unittest.TestCase):
# Always constructs a new array
def test_simple(self):
a = nd.array([1, 2, 3], access='rw')
self.assertEqual(nd.type_of(a), ndt.type('3 * int32'))
# Modifying 'a' shouldn't affect 'b', because it's a copy
b = nd.array(a)
a[1] = 10
self.assertEqual(nd.as_py(b), [1, 2, 3])
def test_access_from_pyobject(self):
a = nd.array([1, 2, 3])
self.assertEqual(a.access_flags, 'readwrite')
a = nd.array([1, 2, 3], access='immutable')
self.assertEqual(a.access_flags, 'immutable')
a = nd.array([1, 2, 3], access='readonly')
self.assertEqual(a.access_flags, 'immutable')
a = nd.array([1, 2, 3], access='r')
self.assertEqual(a.access_flags, 'immutable')
a = nd.array([1, 2, 3], access='readwrite')
self.assertEqual(a.access_flags, 'readwrite')
a = nd.array([1, 2, 3], access='rw')
self.assertEqual(a.access_flags, 'readwrite')
def test_access_from_immutable_array(self):
# `a` is an immutable array
a = nd.array([1, 2, 3], access='r')
self.assertEqual(a.access_flags, 'immutable')
b = nd.array(a)
self.assertEqual(b.access_flags, 'readwrite')
b = nd.array(a, access='immutable')
self.assertEqual(b.access_flags, 'immutable')
b = nd.array(a, access='readonly')
self.assertEqual(b.access_flags, 'immutable')
b = nd.array(a, access='r')
self.assertEqual(b.access_flags, 'immutable')
b = nd.array(a, access='readwrite')
self.assertEqual(b.access_flags, 'readwrite')
b = nd.array(a, access='rw')
self.assertEqual(b.access_flags, 'readwrite')
def test_access_from_readwrite_array(self):
# `a` is a readwrite array
a = nd.array([1, 2, 3], access='rw')
self.assertEqual(a.access_flags, 'readwrite')
b = nd.array(a)
self.assertEqual(b.access_flags, 'readwrite')
b = nd.array(a, access='immutable')
self.assertEqual(b.access_flags, 'immutable')
b = nd.array(a, access='readonly')
self.assertEqual(b.access_flags, 'immutable')
b = nd.array(a, access='r')
self.assertEqual(b.access_flags, 'immutable')
b = nd.array(a, access='readwrite')
self.assertEqual(b.access_flags, 'readwrite')
b = nd.array(a, access='rw')
self.assertEqual(b.access_flags, 'readwrite')
class TestViewConstructor(unittest.TestCase):
# Always constructs a view
def test_simple(self):
a = nd.array([1, 2, 3], access='rw')
# Modifying 'a' should affect 'b', because it's a view
b = nd.view(a)
a[1] = 10
self.assertEqual(nd.as_py(b), [1, 10, 3])
# Can't construct a view of a python list
self.assertRaises(RuntimeError, nd.view, [1, 2, 3])
def test_access_from_immutable_array(self):
# `a` is an immutable array
a = nd.array([1, 2, 3], access='r')
b = nd.view(a)
self.assertEqual(b.access_flags, 'immutable')
b = nd.view(a, access='immutable')
self.assertEqual(b.access_flags, 'immutable')
b = nd.view(a, access='readonly')
self.assertEqual(b.access_flags, 'immutable')
b = nd.view(a, access='r')
self.assertEqual(b.access_flags, 'immutable')
# Can't create a readwrite view from a readonly array
self.assertRaises(RuntimeError, nd.view, b, access='readwrite')
self.assertRaises(RuntimeError, nd.view, b, access='rw')
def test_access_from_readwrite_array(self):
# `a` is a readwrite array
a = nd.array([1, 2, 3], access='rw')
b = nd.view(a)
self.assertEqual(b.access_flags, 'readwrite')
# Can't create an immutable view of a readwrite array
self.assertRaises(RuntimeError, nd.view, a, access='immutable')
b = nd.view(a, access='readonly')
self.assertEqual(b.access_flags, 'readonly')
b = nd.view(a, access='r')
self.assertEqual(b.access_flags, 'readonly')
b = nd.view(a, access='readwrite')
self.assertEqual(b.access_flags, 'readwrite')
b = nd.view(a, access='rw')
self.assertEqual(b.access_flags, 'readwrite')
class TestAsArrayConstructor(unittest.TestCase):
# Constructs a view if possible, otherwise a copy
def test_simple(self):
a = nd.asarray([1, 2, 3], access='rw')
self.assertEqual(nd.type_of(a), ndt.type('3 * int32'))
# Modifying 'a' should affect 'b', because it's a view
b = nd.asarray(a)
self.assertEqual(nd.as_py(b), [1, 2, 3])
a[1] = 10
self.assertEqual(nd.as_py(b), [1, 10, 3])
# Can take a readonly view, but still modify the original
b = nd.asarray(a, access='r')
self.assertEqual(nd.as_py(b), [1, 10, 3])
a[1] = 20
self.assertEqual(nd.as_py(b), [1, 20, 3])
# The readonly view we took can't be written to
def assign_at(x, i, y):
x[i] = y
self.assertRaises(RuntimeError, assign_at, b, 1, 30)
# Asking for immutable makes a copy instead of a view
b = nd.asarray(a, access='immutable')
self.assertEqual(nd.as_py(b), [1, 20, 3])
a[1] = 40
self.assertEqual(nd.as_py(b), [1, 20, 3])
# Asking for immutable from a non-immutable
# readonly array makes a copy
aprime = nd.asarray(a, access='r')
b = nd.asarray(aprime, access='immutable')
self.assertEqual(nd.as_py(aprime), [1, 40, 3])
self.assertEqual(nd.as_py(b), [1, 40, 3])
a[1] = 50
self.assertEqual(nd.as_py(aprime), [1, 50, 3])
self.assertEqual(nd.as_py(b), [1, 40, 3])
def test_access_from_pyobject(self):
a = nd.asarray([1, 2, 3])
self.assertEqual(a.access_flags, 'readwrite')
a = nd.asarray([1, 2, 3], access='immutable')
self.assertEqual(a.access_flags, 'immutable')
a = nd.asarray([1, 2, 3], access='readonly')
self.assertEqual(a.access_flags, 'immutable')
a = nd.asarray([1, 2, 3], access='r')
self.assertEqual(a.access_flags, 'immutable')
a = nd.asarray([1, 2, 3], access='readwrite')
self.assertEqual(a.access_flags, 'readwrite')
a = nd.asarray([1, 2, 3], access='rw')
self.assertEqual(a.access_flags, 'readwrite')
def test_access_from_immutable_array(self):
# `a` is an immutable array
a = nd.array([1, 2, 3], access='r')
b = nd.asarray(a)
self.assertEqual(b.access_flags, 'immutable')
b = nd.asarray(a, access='immutable')
self.assertEqual(b.access_flags, 'immutable')
b = nd.asarray(a, access='readonly')
self.assertEqual(b.access_flags, 'immutable')
b = nd.asarray(a, access='r')
self.assertEqual(b.access_flags, 'immutable')
b = nd.asarray(a, access='readwrite')
self.assertEqual(b.access_flags, 'readwrite')
b = nd.asarray(a, access='rw')
self.assertEqual(b.access_flags, 'readwrite')
def test_access_from_readwrite_array(self):
# `a` is a readwrite array
a = nd.array([1, 2, 3], access='rw')
b = nd.asarray(a)
self.assertEqual(b.access_flags, 'readwrite')
b = nd.asarray(a, access='immutable')
self.assertEqual(b.access_flags, 'immutable')
b = nd.asarray(a, access='readonly')
self.assertEqual(b.access_flags, 'readonly')
b = nd.asarray(a, access='r')
self.assertEqual(b.access_flags, 'readonly')
b = nd.asarray(a, access='readwrite')
self.assertEqual(b.access_flags, 'readwrite')
b = nd.asarray(a, access='rw')
self.assertEqual(b.access_flags, 'readwrite')
class TestStringConstruct(unittest.TestCase):
def test_string(self):
a = nd.array('abc', type=ndt.string)
self.assertEqual(nd.type_of(a), ndt.string)
a = nd.array('abc', dtype=ndt.string)
self.assertEqual(nd.type_of(a), ndt.string)
def test_unicode(self):
a = nd.array(u'abc', type=ndt.string)
self.assertEqual(nd.type_of(a), ndt.string)
a = nd.array(u'abc', dtype=ndt.string)
self.assertEqual(nd.type_of(a), ndt.string)
def test_string_array(self):
a = nd.array(['this', 'is', 'a', 'test'],
dtype=ndt.string)
self.assertEqual(nd.type_of(a), ndt.type('4 * string'))
self.assertEqual(nd.as_py(a), ['this', 'is', 'a', 'test'])
a = nd.array(['this', 'is', 'a', 'test'],
dtype='string["U16"]')
self.assertEqual(nd.type_of(a), ndt.type('4 * string["U16"]'))
self.assertEqual(nd.as_py(a), ['this', 'is', 'a', 'test'])
def test_unicode_array(self):
a = nd.array([u'this', 'is', u'a', 'test'],
dtype=ndt.string)
self.assertEqual(nd.type_of(a), ndt.type('4 * string'))
self.assertEqual(nd.as_py(a), ['this', 'is', 'a', 'test'])
a = nd.array([u'this', 'is', u'a', 'test'],
dtype='string["U16"]')
self.assertEqual(nd.type_of(a), ndt.type('4 * string["U16"]'))
self.assertEqual(nd.as_py(a), ['this', 'is', 'a', 'test'])
def test_fixed_string_array(self):
a = nd.array(['a', 'b', 'c'],
dtype='fixed_string[1,"A"]')
self.assertEqual(nd.type_of(a[0]).type_id, 'fixed_string')
self.assertEqual(nd.type_of(a[0]).data_size, 1)
self.assertEqual(nd.as_py(a), ['a', 'b', 'c'])
class TestStructConstruct(unittest.TestCase):
def test_single_struct(self):
a = nd.array([12, 'test', True], type='{x:int32, y:string, z:bool}')
self.assertEqual(nd.type_of(a), ndt.type('{x:int32, y:string, z:bool}'))
self.assertEqual(nd.as_py(a[0]), 12)
self.assertEqual(nd.as_py(a[1]), 'test')
self.assertEqual(nd.as_py(a[2]), True)
# With dtype= parameter instead of type=
a = nd.array([12, 'test', True], dtype='{x:int32, y:string, z:bool}')
self.assertEqual(nd.type_of(a), ndt.type('{x:int32, y:string, z:bool}'))
self.assertEqual(nd.as_py(a[0]), 12)
self.assertEqual(nd.as_py(a[1]), 'test')
self.assertEqual(nd.as_py(a[2]), True)
a = nd.array({'x':12, 'y':'test', 'z':True}, type='{x:int32, y:string, z:bool}')
self.assertEqual(nd.type_of(a), ndt.type('{x:int32, y:string, z:bool}'))
self.assertEqual(nd.as_py(a[0]), 12)
self.assertEqual(nd.as_py(a[1]), 'test')
self.assertEqual(nd.as_py(a[2]), True)
# With dtype= parameter instead of type=
a = nd.array({'x':12, 'y':'test', 'z':True}, dtype='{x:int32, y:string, z:bool}')
self.assertEqual(nd.type_of(a), ndt.type('{x:int32, y:string, z:bool}'))
self.assertEqual(nd.as_py(a[0]), 12)
self.assertEqual(nd.as_py(a[1]), 'test')
self.assertEqual(nd.as_py(a[2]), True)
def test_nested_struct(self):
a = nd.array([[1,2], ['test', 3.5], [3j]],
type='{x: 2 * int16, y: {a: string, b: float64}, z: 1 * complex[float32]}')
self.assertEqual(nd.as_py(a.x), [1, 2])
self.assertEqual(nd.as_py(a.y.a), 'test')
self.assertEqual(nd.as_py(a.y.b), 3.5)
self.assertEqual(nd.as_py(a.z), [3j])
a = nd.array({'x':[1,2], 'y':{'a':'test', 'b':3.5}, 'z':[3j]},
type='{x: 2 * int16, y: {a: string, b: float64}, z: 1 * complex[float32]}')
self.assertEqual(nd.as_py(a.x), [1, 2])
self.assertEqual(nd.as_py(a.y.a), 'test')
self.assertEqual(nd.as_py(a.y.b), 3.5)
self.assertEqual(nd.as_py(a.z), [3j])
def test_single_tuple_array(self):
a = nd.array([(0,0), (3,5), (12,10)], dtype='(int32, int32)')
self.assertEqual(nd.type_of(a), ndt.type('3 * (int32, int32)'))
self.assertEqual(nd.as_py(a[:,0]), [0, 3, 12])
self.assertEqual(nd.as_py(a[:,1]), [0, 5, 10])
def test_single_struct_array(self):
a = nd.array([(0,0), (3,5), (12,10)], dtype='{x:int32, y:int32}')
self.assertEqual(nd.type_of(a), ndt.type('3 * {x:int32, y:int32}'))
self.assertEqual(nd.as_py(a.x), [0, 3, 12])
self.assertEqual(nd.as_py(a.y), [0, 5, 10])
a = nd.array([{'x':0,'y':0}, {'x':3,'y':5}, {'x':12,'y':10}],
dtype='{x:int32, y:int32}')
self.assertEqual(nd.type_of(a), ndt.type('3 * {x:int32, y:int32}'))
self.assertEqual(nd.as_py(a.x), [0, 3, 12])
self.assertEqual(nd.as_py(a.y), [0, 5, 10])
a = nd.array([[(3, 'X')], [(10, 'L'), (12, 'M')]],
dtype='{count:int32, size:fixed_string[1,"A"]}')
self.assertEqual(nd.type_of(a),
ndt.type('2 * var * {count:int32, size:fixed_string[1,"A"]}'))
self.assertEqual(nd.as_py(a.count), [[3], [10, 12]])
self.assertEqual(nd.as_py(a.size), [['X'], ['L', 'M']])
a = nd.array([[{'count':3, 'size':'X'}],
[{'count':10, 'size':'L'}, {'count':12, 'size':'M'}]],
dtype='{count:int32, size:fixed_string[1,"A"]}')
self.assertEqual(nd.type_of(a), ndt.type('2 * var * {count:int32, size:fixed_string[1,"A"]}'))
self.assertEqual(nd.as_py(a.count), [[3], [10, 12]])
self.assertEqual(nd.as_py(a.size), [['X'], ['L', 'M']])
def test_nested_struct_array(self):
a = nd.array([((0,1),0), ((2,2),5), ((100,10),10)],
dtype='{x:{a:int16, b:int16}, y:int32}')
self.assertEqual(nd.type_of(a), ndt.type('3 * {x:{a:int16, b:int16}, y:int32}'))
self.assertEqual(nd.as_py(a.x.a), [0, 2, 100])
self.assertEqual(nd.as_py(a.x.b), [1, 2, 10])
self.assertEqual(nd.as_py(a.y), [0, 5, 10])
a = nd.array([{'x':{'a':0,'b':1},'y':0},
{'x':{'a':2,'b':2},'y':5},
{'x':{'a':100,'b':10},'y':10}],
dtype='{x:{a:int16, b:int16}, y:int32}')
self.assertEqual(nd.type_of(a), ndt.type('3 * {x:{a:int16, b:int16}, y:int32}'))
self.assertEqual(nd.as_py(a.x.a), [0, 2, 100])
self.assertEqual(nd.as_py(a.x.b), [1, 2, 10])
self.assertEqual(nd.as_py(a.y), [0, 5, 10])
a = nd.array([[(3, ('X', 10))], [(10, ('L', 7)), (12, ('M', 5))]],
dtype='{count:int32, size:{name:fixed_string[1,"A"], id: int8}}')
self.assertEqual(nd.type_of(a),
ndt.type('2 * var * {count:int32, size:{name:fixed_string[1,"A"], id: int8}}'))
self.assertEqual(nd.as_py(a.count), [[3], [10, 12]])
self.assertEqual(nd.as_py(a.size.name), [['X'], ['L', 'M']])
self.assertEqual(nd.as_py(a.size.id), [[10], [7, 5]])
def test_missing_field(self):
self.assertRaises(nd.BroadcastError, nd.array,
[0, 1], type='{x:int32, y:int32, z:int32}')
# With dtype= parameter instead of type=
self.assertRaises(nd.BroadcastError, nd.array,
[0, 1], dtype='{x:int32, y:int32, z:int32}')
self.assertRaises(nd.BroadcastError, nd.array,
{'x':0, 'z':1}, type='{x:int32, y:int32, z:int32}')
# With dtype= parameter instead of type=
self.assertRaises(nd.BroadcastError, nd.array,
{'x':0, 'z':1}, dtype='{x:int32, y:int32, z:int32}')
def test_extra_field(self):
self.assertRaises(nd.BroadcastError, nd.array,
[0, 1, 2, 3], type='{x:int32, y:int32, z:int32}')
# With dtype= parameter instead of type=
self.assertRaises(nd.BroadcastError, nd.array,
[0, 1, 2, 3], dtype='{x:int32, y:int32, z:int32}')
self.assertRaises(nd.BroadcastError, nd.array,
{'x':0,'y':1,'z':2,'w':3}, type='{x:int32, y:int32, z:int32}')
# With dtype= parameter instead of type=
self.assertRaises(nd.BroadcastError, nd.array,
{'x':0,'y':1,'z':2,'w':3}, dtype='{x:int32, y:int32, z:int32}')
class TestIteratorConstruct(unittest.TestCase):
# Test dynd construction from iterators
#
# NumPy's np.fromiter(x, dtype) becomes
# nd.array(x, type=ndt.make_var(dtype)')
#
# Additionally, dynd supports dynamically deducing the type as
# it processes the iterators, so nd.array(x) where x is an iterator
# should work well too.
def test_dynamic_fromiter_notype(self):
# When constructing from an empty iterator, defaults to int32
a = nd.array(x for x in [])
self.assertEqual(nd.type_of(a), ndt.type('0 * int32'))
self.assertEqual(nd.as_py(a), [])
def test_dynamic_fromiter_onetype(self):
# Constructing with an iterator like this uses a dynamic
# array construction method. In this simple case, we
# use generators that have a consistent type
# bool result
a = nd.array(iter([True, False]))
self.assertEqual(nd.type_of(a), ndt.type('2 * bool'))
self.assertEqual(nd.as_py(a), [True, False])
# int32 result
a = nd.array(iter([1, 2, True, False]))
self.assertEqual(nd.type_of(a), ndt.type('4 * int32'))
self.assertEqual(nd.as_py(a), [1, 2, 1, 0])
# int64 result
a = nd.array(iter([10000000000, 1, 2, True, False]))
self.assertEqual(nd.type_of(a), ndt.type('5 * int64'))
self.assertEqual(nd.as_py(a), [10000000000, 1, 2, 1, 0])
# float64 result
a = nd.array(iter([3.25, 10000000000, 1, 2, True, False]))
self.assertEqual(nd.type_of(a), ndt.type('6 * float64'))
self.assertEqual(nd.as_py(a), [3.25, 10000000000, 1, 2, 1, 0])
# complex[float64] result
a = nd.array(iter([3.25j, 3.25, 10000000000, 1, 2, True, False]))
self.assertEqual(nd.type_of(a), ndt.type('7 * complex[float64]'))
self.assertEqual(nd.as_py(a), [3.25j, 3.25, 10000000000, 1, 2, 1, 0])
# string result
a = nd.array(str(x) + 'test' for x in range(10))
self.assertEqual(nd.type_of(a), ndt.type('10 * string'))
self.assertEqual(nd.as_py(a), [str(x) + 'test' for x in range(10)])
# string result
a = nd.array(iter([u'test', 'test2']))
self.assertEqual(nd.type_of(a), ndt.type('2 * string'))
self.assertEqual(nd.as_py(a), [u'test', u'test2'])
# bytes result
if sys.version_info[0] >= 3:
a = nd.array(b'x'*x for x in range(10))
self.assertEqual(nd.type_of(a), ndt.type('10 * bytes'))
self.assertEqual(nd.as_py(a), [b'x'*x for x in range(10)])
def test_dynamic_fromiter_booltypepromo(self):
# Test iterator construction cases promoting from a boolean
# int32 result
a = nd.array(iter([True, False, 3]))
self.assertEqual(nd.type_of(a), ndt.type('3 * int32'))
self.assertEqual(nd.as_py(a), [1, 0, 3])
# int64 result
a = nd.array(iter([True, False, -10000000000]))
self.assertEqual(nd.type_of(a), ndt.type('3 * int64'))
self.assertEqual(nd.as_py(a), [1, 0, -10000000000])
# float64 result
a = nd.array(iter([True, False, 3.25]))
self.assertEqual(nd.type_of(a), ndt.type('3 * float64'))
self.assertEqual(nd.as_py(a), [1, 0, 3.25])
# complex[float64] result
a = nd.array(iter([True, False, 3.25j]))
self.assertEqual(nd.type_of(a), ndt.type('3 * complex[float64]'))
self.assertEqual(nd.as_py(a), [1, 0, 3.25j])
# Should raise an error mixing bool and string/bytes
self.assertRaises(TypeError, nd.array, iter([True, False, "test"]))
self.assertRaises(TypeError, nd.array, iter([True, False, u"test"]))
self.assertRaises(TypeError, nd.array, iter([True, False, b"test"]))
def test_dynamic_fromiter_int32typepromo(self):
# Test iterator construction cases promoting from an int32
# int64 result
a = nd.array(iter([1, 2, 10000000000]))
self.assertEqual(nd.type_of(a), ndt.type('3 * int64'))
self.assertEqual(nd.as_py(a), [1, 2, 10000000000])
# float64 result
a = nd.array(iter([1, 2, 3.25]))
self.assertEqual(nd.type_of(a), ndt.type('3 * float64'))
self.assertEqual(nd.as_py(a), [1, 2, 3.25])
# complex[float64] result
a = nd.array(iter([1, 2, 3.25j]))
self.assertEqual(nd.type_of(a), ndt.type('3 * complex[float64]'))
self.assertEqual(nd.as_py(a), [1, 2, 3.25j])
# Should raise an error mixing int32 and string/bytes
self.assertRaises(TypeError, nd.array, iter([1, 2, "test"]))
self.assertRaises(TypeError, nd.array, iter([1, 2, u"test"]))
self.assertRaises(TypeError, nd.array, iter([1, 2, b"test"]))
def test_dynamic_fromiter_int64typepromo(self):
# Test iterator construction cases promoting from an int64
# float64 result
a = nd.array(iter([10000000000, 2, 3.25]))
self.assertEqual(nd.type_of(a), ndt.type('3 * float64'))
self.assertEqual(nd.as_py(a), [10000000000, 2, 3.25])
# complex[float64] result
a = nd.array(iter([10000000000, 2, 3.25j]))
self.assertEqual(nd.type_of(a), ndt.type('3 * complex[float64]'))
self.assertEqual(nd.as_py(a), [10000000000, 2, 3.25j])
# Should raise an error mixing int64 and string/bytes
self.assertRaises(TypeError, nd.array, iter([10000000000, 2, "test"]))
self.assertRaises(TypeError, nd.array, iter([10000000000, 2, u"test"]))
self.assertRaises(TypeError, nd.array, iter([10000000000, 2, b"test"]))
def test_dynamic_fromiter_float64typepromo(self):
# Test iterator construction cases promoting from an float64
# complex[float64] result
a = nd.array(iter([3.25, 2, 3.25j]))
self.assertEqual(nd.type_of(a), ndt.type('3 * complex[float64]'))
self.assertEqual(nd.as_py(a), [3.25, 2, 3.25j])
# Should raise an error mixing float64 and string/bytes
self.assertRaises(TypeError, nd.array, iter([3.25, 2, "test"]))
self.assertRaises(TypeError, nd.array, iter([3.25, 2, u"test"]))
self.assertRaises(TypeError, nd.array, iter([3.25, 2, b"test"]))
def test_dynamic_fromiter_complexfloat64typepromo(self):
# Test iterator construction cases promoting from an complex[float64]
# Should raise an error mixing complex[float64] and string/bytes
self.assertRaises(TypeError, nd.array, iter([3.25j, 2, "test"]))
self.assertRaises(TypeError, nd.array, iter([3.25j, 2, u"test"]))
self.assertRaises(TypeError, nd.array, iter([3.25j, 2, b"test"]))
def test_simple_fromiter(self):
# Var dimension construction from a generator
a = nd.array((2*x + 5 for x in range(10)), type='var * int32')
self.assertEqual(nd.type_of(a), ndt.type('var * int32'))
self.assertEqual(len(a), 10)
self.assertEqual(nd.as_py(a), [2*x + 5 for x in range(10)])
# Fixed dimension construction from a generator
a = nd.array((2*x + 5 for x in range(10)), type='10 * int32')
self.assertEqual(nd.type_of(a), ndt.type('10 * int32'))
self.assertEqual(len(a), 10)
self.assertEqual(nd.as_py(a), [2*x + 5 for x in range(10)])
# Produce an error if it's a fixed dimension with too few elements
self.assertRaises(nd.BroadcastError, nd.array,
(2*x + 5 for x in range(10)), type='11 * int32')
# Produce an error if it's a fixed dimension with too many elements
self.assertRaises(nd.BroadcastError, nd.array,
(2*x + 5 for x in range(10)), type='9 * int32')
# Produce an error if it's a fixed dimension
self.assertRaises(TypeError, nd.array,
(2*x + 5 for x in range(10)), type='Fixed * int32')
def test_simple_fromiter_medsize(self):
# A bigger input to exercise the dynamic resizing a bit
a = nd.array((2*x + 5 for x in range(100000)), type='var * int32')
self.assertEqual(nd.type_of(a), ndt.type('var * int32'))
self.assertEqual(len(a), 100000)
self.assertEqual(nd.as_py(a), [2*x + 5 for x in range(100000)])
def test_ragged_fromiter(self):
# Strided array of var from list of iterators
a = nd.array([(1+x for x in range(3)), (5*x - 10 for x in range(5)),
[2, 10]], type='Fixed * var * int32')
self.assertEqual(nd.type_of(a), ndt.type('3 * var * int32'))
self.assertEqual(nd.as_py(a),
[[1,2,3], [-10, -5, 0, 5, 10], [2, 10]])
# Var array of var from iterator of iterators
a = nd.array(((2*x for x in range(y)) for y in range(4)),
type='var * var * int32')
self.assertEqual(nd.type_of(a), ndt.type('var * var * int32'))
self.assertEqual(nd.as_py(a), [[], [0], [0, 2], [0, 2, 4]])
# Range of ranges
a = nd.array(range(i) for i in range(4))
self.assertEqual(nd.as_py(a), [list(range(i)) for i in range(4)])
def test_ragged_fromiter_typepromo(self):
# 2D nested iterators
vals = [[True, False],
[False, 2, 3],
[-10000000000],
[True, 10, 3.125, 5.5j]]
a = nd.array(iter(x) for x in vals)
self.assertEqual(nd.type_of(a), ndt.type('4 * var * complex[float64]'))
self.assertEqual(nd.as_py(a), vals)
# 3D nested iterators
vals = [[[True, True, True],
[False, False]],
[[True, True, False],
[False, False, -1000, 10000000000],
[10, 20, 10]],
[],
[[],
[1.5],
[]]]
a = nd.array((iter(y) for y in x) for x in vals)
self.assertEqual(nd.type_of(a), ndt.type('4 * var * var * float64'))
self.assertEqual(nd.as_py(a), vals)
# Iterator of lists
vals = [[True, 2, 3],
[4, 5, 6.5],
[1, 2, 3]]
a = nd.array(iter(vals))
self.assertEqual(nd.type_of(a), ndt.type('3 * 3 * float64'))
self.assertEqual(nd.as_py(a), vals)
# Iterator starting with list, also including iterator
vals = [[True, 2, 3],
[4, 5, 6.5],
[1, 2, 3]]
a = nd.array(x for x in [vals[0], iter(vals[1]), vals[2]])
self.assertEqual(nd.type_of(a), ndt.type('3 * 3 * float64'))
self.assertEqual(nd.as_py(a), vals)
# Iterator with lists, but ragged
vals = [[1], [2, 3, 4], [5, 6]]
a = nd.array(iter(vals))
self.assertEqual(nd.type_of(a), ndt.type('3 * var * int32'))
self.assertEqual(nd.as_py(a), vals)
# Iterator starting with list, first raggedness is a short iterator
vals = [[1, 2, 3], [4], [5, 6]]
a = nd.array(x for x in [vals[0], iter(vals[1]), vals[2]])
self.assertEqual(nd.type_of(a), ndt.type('3 * var * int32'))
self.assertEqual(nd.as_py(a), vals)
# Iterator starting with list, first raggedness is a long iterator
vals = [[1], [2, 3, 4], [5, 6]]
a = nd.array(x for x in [vals[0], iter(vals[1]), vals[2]])
self.assertEqual(nd.type_of(a), ndt.type('3 * var * int32'))
self.assertEqual(nd.as_py(a), vals)
def test_ragged_fromlistofiter_typepromo(self):
# list of iterators
vals = [[True, False],
[False, 2, 3],
[-10000000000],
[True, 10, 3.125, 5.5j]]
a = nd.array([iter(x) for x in vals])
self.assertEqual(nd.type_of(a), ndt.type('4 * var * complex[float64]'))
self.assertEqual(nd.as_py(a), vals)
# list of list/iterator
a = nd.array([[1,2,3], (1.5*x for x in range(4)), iter([-1, 1])])
self.assertEqual(nd.type_of(a), ndt.type('3 * var * float64'))
self.assertEqual(nd.as_py(a),
[[1,2,3], [1.5*x for x in range(4)], [-1,1]])
def test_ragged_initial_empty_typepromo(self):
# iterator of lists, first one is empty
vals = [[],
[False, 2, 3]]
a = nd.array(iter(x) for x in vals)
self.assertEqual(nd.type_of(a), ndt.type('2 * var * int32'))
self.assertEqual(nd.as_py(a), vals)
def test_dtype_fromiter(self):
# Specify dtype instead of full type
a = nd.array((2*x + 1 for x in range(7)), dtype=ndt.int32)
self.assertEqual(nd.type_of(a), ndt.type('var * int32'))
self.assertEqual(nd.as_py(a), [2*x + 1 for x in range(7)])
class TestDeduceDims(unittest.TestCase):
def test_simplearr(self):
val = [[[1, 2], [3, 4]], [[5, 6], [7, 8]],
[[11, 12], [13, 14]], [[15, 16], [17, 18]]]
# Deduce all the dims
a = nd.array(val, dtype=ndt.int16)
self.assertEqual(nd.type_of(a), ndt.type('4 * 2 * 2 * int16'))
self.assertEqual(nd.as_py(a), val)
# Specify some dims as fixed
a = nd.array(val, dtype='Fixed * int16')
self.assertEqual(nd.type_of(a), ndt.type('4 * 2 * 2 * int16'))
self.assertEqual(nd.as_py(a), val)
a = nd.array(val, dtype='Fixed * Fixed * int16')
self.assertEqual(nd.type_of(a), ndt.type('4 * 2 * 2 * int16'))
self.assertEqual(nd.as_py(a), val)
a = nd.array(val, dtype='Fixed * Fixed * Fixed * int16')
self.assertEqual(nd.type_of(a), ndt.type('4 * 2 * 2 * int16'))
self.assertEqual(nd.as_py(a), val)
# Specify some dims as fixed
a = nd.array(val, dtype='2 * int16')
self.assertEqual(nd.type_of(a), ndt.type('4 * 2 * 2 * int16'))
self.assertEqual(nd.as_py(a), val)
a = nd.array(val, dtype='2 * 2 * int16')
self.assertEqual(nd.type_of(a), ndt.type('4 * 2 * 2 * int16'))
self.assertEqual(nd.as_py(a), val)
a = nd.array(val, dtype='4 * 2 * 2 * int16')
self.assertEqual(nd.type_of(a), ndt.type('4 * 2 * 2 * int16'))
self.assertEqual(nd.as_py(a), val)
# Mix fixed, symbolic fixed, and var
a = nd.array(val, dtype='4 * var * Fixed * int16')
self.assertEqual(nd.type_of(a), ndt.type('4 * var * 2 * int16'))
self.assertEqual(nd.as_py(a), val)
a = nd.array(val, dtype='var * 2 * int16')
self.assertEqual(nd.type_of(a), ndt.type('4 * var * 2 * int16'))
self.assertEqual(nd.as_py(a), val)
a = nd.array(val, dtype='Fixed * 2 * int16')
self.assertEqual(nd.type_of(a), ndt.type('4 * 2 * 2 * int16'))
self.assertEqual(nd.as_py(a), val)
def test_empty(self):
# A fixed dimension of non-zero size gets pushed down
a = nd.array([], dtype='3 * int32')
self.assertEqual(nd.type_of(a), ndt.type('0 * 3 * int32'))
self.assertEqual(nd.as_py(a), [])
# A fixed dimension of zero size gets absorbed
a = nd.array([], dtype='0 * int32')
self.assertEqual(nd.type_of(a), ndt.type('0 * int32'))
self.assertEqual(nd.as_py(a), [])
# A symbolic fixed dimension gets absorbed
# Todo: Need to reenable this failing test
# a = nd.array([], dtype='Fixed * int32')
# self.assertEqual(nd.type_of(a), ndt.type('0 * int32'))
# self.assertEqual(nd.as_py(a), [])
# A var dimension gets absorbed
a = nd.array([], dtype='var * int32')
self.assertEqual(nd.type_of(a), ndt.type('var * int32'))
self.assertEqual(nd.as_py(a), [])
class TestConstructErrors(unittest.TestCase):
def test_bad_params(self):
self.assertRaises(ValueError, nd.array, dtype='int32')
self.assertRaises(ValueError, nd.array, type='2 * 2 * int32')
self.assertRaises(ValueError, nd.array, access='readwrite')
def test_dict_auto_detect(self):
# Trigger failure in initial auto detect pass
self.assertRaises(ValueError, nd.array, {'x' : 1})
self.assertRaises(ValueError, nd.array, [{'x' : 1}])
# Trigger failure in later type promotion
self.assertRaises(ValueError, nd.array, [['a'], {'x' : 1}])
class TestOptionArrayConstruct(unittest.TestCase):
def check_scalars(self, dtype, input_expected):
dtype = ndt.type(dtype)
for input, expected in input_expected:
a = nd.array(input, dtype=dtype)
self.assertEqual(nd.type_of(a), dtype)
self.assertEqual(nd.as_py(a), expected)
def test_scalar_option(self):
self.check_scalars('?bool', [(None, None),
('', None),
('NA', None),
(False, False),
('true', True)])
self.check_scalars('?int', [(None, None),
('', None),
('NA', None),
(-10, -10),
('12354', 12354)])
self.check_scalars('?real', [(None, None),
('', None),
('NA', None),
(-10, -10),
('12354', 12354),
(1.25, 1.25),
('125e20', 125e20)])
self.check_scalars('?string', [(None, None),
('', ''),
('NA', 'NA'),
(u'\uc548\ub155', u'\uc548\ub155')])
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
joequery/django | tests/auth_tests/test_forms.py | 228 | 26909 | from __future__ import unicode_literals
import datetime
import re
from django import forms
from django.contrib.auth.forms import (
AdminPasswordChangeForm, AuthenticationForm, PasswordChangeForm,
PasswordResetForm, ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget,
SetPasswordForm, UserChangeForm, UserCreationForm,
)
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.forms.fields import CharField, Field
from django.test import SimpleTestCase, TestCase, mock, override_settings
from django.utils import translation
from django.utils.encoding import force_text
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from .settings import AUTH_TEMPLATES
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='testclient@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u2 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='inactive',
first_name='Inactive', last_name='User', email='testclient2@example.com', is_staff=False, is_active=False,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u3 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff',
first_name='Staff', last_name='Member', email='staffmember@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u4 = User.objects.create(
password='', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='empty_password', first_name='Empty', last_name='Password', email='empty_password@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u5 = User.objects.create(
password='$', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unmanageable_password', first_name='Unmanageable', last_name='Password',
email='unmanageable_password@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u6 = User.objects.create(
password='foo$bar', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unknown_password', first_name='Unknown', last_name='Password',
email='unknown_password@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class UserCreationFormTest(TestDataMixin, TestCase):
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[force_text(User._meta.get_field('username').error_messages['unique'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [force_text(validator.message)])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [force_text(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
data = {
'username': 'jsmith@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
u = form.save()
self.assertEqual(password_changed.call_count, 1)
self.assertEqual(repr(u), '<User: jsmith@example.com>')
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
data = {
'username': 'testclient',
'password1': 'testclient',
'password2': 'testclient',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form['password2'].errors), 2)
self.assertIn('The password is too similar to the username.', form['password2'].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form['password2'].errors
)
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class AuthenticationFormTest(TestDataMixin, TestCase):
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})])
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_custom_login_allowed_policy(self):
# The user is inactive, but our custom form policy allows them to log in.
data = {
'username': 'inactive',
'password': 'password',
}
class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm):
def confirm_login_allowed(self, user):
pass
form = AuthenticationFormWithInactiveUsersOkay(None, data)
self.assertTrue(form.is_valid())
# If we want to disallow some logins according to custom logic,
# we should raise a django.forms.ValidationError in the form.
class PickyAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if user.username == "inactive":
raise forms.ValidationError("This user is disallowed.")
raise forms.ValidationError("Sorry, nobody's allowed in.")
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ['This user is disallowed.'])
data = {
'username': 'testclient',
'password': 'password',
}
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
username_field = User._meta.get_field(User.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class SetPasswordFormTest(TestDataMixin, TestCase):
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'testclient',
'new_password2': 'testclient',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form["new_password2"].errors), 2)
self.assertIn('The password is too similar to the username.', form["new_password2"].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form["new_password2"].errors
)
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class PasswordChangeFormTest(TestDataMixin, TestCase):
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors,
[force_text(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields),
['old_password', 'new_password1', 'new_password2'])
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class UserChangeFormTest(TestDataMixin, TestCase):
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [force_text(validator.message)])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unsuable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password'], 'sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161')
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
TEMPLATES=AUTH_TEMPLATES,
USE_TZ=False,
)
class PasswordResetFormTest(TestDataMixin, TestCase):
@classmethod
def setUpClass(cls):
super(PasswordResetFormTest, cls).setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
def create_dummy_user(self):
"""
Create a user and return a tuple (user_object, username, email).
"""
username = 'jsmith'
email = 'jsmith@example.com'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistent_email(self):
"""
Test nonexistent email address. This should not fail because it would
expose information about registered users.
"""
data = {'email': 'foo@bar.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_cleaned_data(self):
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
def test_custom_email_subject(self):
data = {'email': 'testclient@example.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_custom_email_constructor(self):
data = {'email': 'testclient@example.com'}
class CustomEmailPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email,
html_email_template_name=None):
EmailMultiAlternatives(
"Forgot your password?",
"Sorry to hear you forgot your password.",
None, [to_email],
['site_monitor@example.com'],
headers={'Reply-To': 'webmaster@example.com'},
alternatives=[("Really sorry to hear you forgot your password.",
"text/html")]).send()
form = CustomEmailPasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Forgot your password?')
self.assertEqual(mail.outbox[0].bcc, ['site_monitor@example.com'])
self.assertEqual(mail.outbox[0].content_subtype, "plain")
def test_preserve_username_case(self):
"""
Preserve the case of the user name (before the @ in the email address)
when creating a user (#5605).
"""
user = User.objects.create_user('forms_test2', 'tesT@EXAMple.com', 'test')
self.assertEqual(user.email, 'tesT@example.com')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
"""
Test that inactive user cannot receive password reset email.
"""
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', 'test@example.com', 'test')
data = {"email": "test@example.com"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_save_plaintext_email(self):
"""
Test the PasswordResetForm.save() method with no html_email_template_name
parameter passed in.
Test to ensure original behavior is unchanged after the parameter was added.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(mail.outbox[0].alternatives), 0)
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload()))
def test_save_html_email_template_name(self):
"""
Test the PasswordResetFOrm.save() method with html_email_template_name
parameter specified.
Test to ensure that a multipart email is sent with both text/plain
and text/html parts.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save(html_email_template_name='registration/html_password_reset_email.html')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].alternatives), 1)
message = mail.outbox[0].message()
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload()))
self.assertTrue(
re.match(r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$',
message.get_payload(1).get_payload())
)
class ReadOnlyPasswordHashTest(SimpleTestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field.has_changed('aaa', 'bbb'))
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class AdminPasswordChangeFormTest(TestDataMixin, TestCase):
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'password1': 'test123',
'password2': 'test123',
}
form = AdminPasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
| bsd-3-clause |
venmo/ansible | lib/ansible/plugins/action/assemble.py | 39 | 6003 | # (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Stephen Fromm <sfromm@gmail.com>
# Brian Coca <briancoca+dev@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import os.path
import pipes
import shutil
import tempfile
import base64
import re
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
from ansible.utils.hashing import checksum_s
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False):
''' assemble a file from a directory of fragments '''
tmpfd, temp_path = tempfile.mkstemp()
tmp = os.fdopen(tmpfd,'w')
delimit_me = False
add_newline = False
for f in sorted(os.listdir(src_path)):
if compiled_regexp and not compiled_regexp.search(f):
continue
fragment = "%s/%s" % (src_path, f)
if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
continue
fragment_content = file(fragment).read()
# always put a newline between fragments if the previous fragment didn't end with a newline.
if add_newline:
tmp.write('\n')
# delimiters should only appear between fragments
if delimit_me:
if delimiter:
# un-escape anything like newlines
delimiter = delimiter.decode('unicode-escape')
tmp.write(delimiter)
# always make sure there's a newline after the
# delimiter, so lines don't run together
if delimiter[-1] != '\n':
tmp.write('\n')
tmp.write(fragment_content)
delimit_me = True
if fragment_content.endswith('\n'):
add_newline = False
else:
add_newline = True
tmp.close()
return temp_path
def run(self, tmp=None, task_vars=dict()):
if self._play_context.check_mode:
return dict(skipped=True, msg=("skipped, this module does not support check_mode."))
src = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
delimiter = self._task.args.get('delimiter', None)
remote_src = self._task.args.get('remote_src', 'yes')
regexp = self._task.args.get('regexp', None)
ignore_hidden = self._task.args.get('ignore_hidden', False)
if src is None or dest is None:
return dict(failed=True, msg="src and dest are required")
if boolean(remote_src):
return self._execute_module(tmp=tmp, task_vars=task_vars)
elif self._task._role is not None:
src = self._loader.path_dwim_relative(self._task._role._role_path, 'files', src)
else:
# the source is local, so expand it here
src = self._loader.path_dwim(os.path.expanduser(src))
_re = None
if regexp is not None:
_re = re.compile(regexp)
# Does all work assembling the file
path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden)
path_checksum = checksum_s(path)
dest = self._remote_expand_user(dest, tmp)
remote_checksum = self._remote_checksum(tmp, dest)
if path_checksum != remote_checksum:
resultant = file(path).read()
# FIXME: diff needs to be moved somewhere else
#if self.runner.diff:
# dest_result = self._execute_module(module_name='slurp', module_args=dict(path=dest), task_vars=task_vars, tmp=tmp, persist_files=True)
# if 'content' in dest_result:
# dest_contents = dest_result['content']
# if dest_result['encoding'] == 'base64':
# dest_contents = base64.b64decode(dest_contents)
# else:
# raise Exception("unknown encoding, failed: %s" % dest_result)
xfered = self._transfer_data('src', resultant)
# fix file permissions when the copy is done as a different user
if self._play_context.become and self._play_context.become_user != 'root':
self._remote_chmod('a+r', xfered, tmp)
# run the copy module
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=xfered,
dest=dest,
original_basename=os.path.basename(src),
)
)
res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp)
# FIXME: diff stuff
#res.diff = dict(after=resultant)
return res
else:
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=xfered,
dest=dest,
original_basename=os.path.basename(src),
)
)
return self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp)
| gpl-3.0 |
Haibo-Wang-ORG/pytest | testing/python/fixture.py | 16 | 87137 | import pytest, py, sys
from _pytest import python as funcargs
from _pytest.python import FixtureLookupError
from _pytest.pytester import get_public_names
from textwrap import dedent
def test_getfuncargnames():
def f(): pass
assert not funcargs.getfuncargnames(f)
def g(arg): pass
assert funcargs.getfuncargnames(g) == ('arg',)
def h(arg1, arg2="hello"): pass
assert funcargs.getfuncargnames(h) == ('arg1',)
def h(arg1, arg2, arg3="hello"): pass
assert funcargs.getfuncargnames(h) == ('arg1', 'arg2')
class A:
def f(self, arg1, arg2="hello"):
pass
assert funcargs.getfuncargnames(A().f) == ('arg1',)
if sys.version_info < (3,0):
assert funcargs.getfuncargnames(A.f) == ('arg1',)
class TestFillFixtures:
def test_fillfuncargs_exposed(self):
# used by oejskit, kept for compatibility
assert pytest._fillfuncargs == funcargs.fillfixtures
def test_funcarg_lookupfails(self, testdir):
testdir.makepyfile("""
def pytest_funcarg__xyzsomething(request):
return 42
def test_func(some):
pass
""")
result = testdir.runpytest() # "--collect-only")
assert result.ret != 0
result.stdout.fnmatch_lines([
"*def test_func(some)*",
"*fixture*some*not found*",
"*xyzsomething*",
])
def test_funcarg_basic(self, testdir):
item = testdir.getitem("""
def pytest_funcarg__some(request):
return request.function.__name__
def pytest_funcarg__other(request):
return 42
def test_func(some, other):
pass
""")
funcargs.fillfixtures(item)
del item.funcargs["request"]
assert len(get_public_names(item.funcargs)) == 2
assert item.funcargs['some'] == "test_func"
assert item.funcargs['other'] == 42
def test_funcarg_lookup_modulelevel(self, testdir):
testdir.makepyfile("""
def pytest_funcarg__something(request):
return request.function.__name__
class TestClass:
def test_method(self, something):
assert something == "test_method"
def test_func(something):
assert something == "test_func"
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_funcarg_lookup_classlevel(self, testdir):
p = testdir.makepyfile("""
class TestClass:
def pytest_funcarg__something(self, request):
return request.instance
def test_method(self, something):
assert something is self
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*1 passed*"
])
def test_conftest_funcargs_only_available_in_subdir(self, testdir):
sub1 = testdir.mkpydir("sub1")
sub2 = testdir.mkpydir("sub2")
sub1.join("conftest.py").write(py.code.Source("""
import pytest
def pytest_funcarg__arg1(request):
pytest.raises(Exception, "request.getfuncargvalue('arg2')")
"""))
sub2.join("conftest.py").write(py.code.Source("""
import pytest
def pytest_funcarg__arg2(request):
pytest.raises(Exception, "request.getfuncargvalue('arg1')")
"""))
sub1.join("test_in_sub1.py").write("def test_1(arg1): pass")
sub2.join("test_in_sub2.py").write("def test_2(arg2): pass")
result = testdir.runpytest("-v")
result.assert_outcomes(passed=2)
def test_extend_fixture_module_class(self, testdir):
testfile = testdir.makepyfile("""
import pytest
@pytest.fixture
def spam():
return 'spam'
class TestSpam:
@pytest.fixture
def spam(self, spam):
return spam * 2
def test_spam(self, spam):
assert spam == 'spamspam'
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
result = testdir.runpytest(testfile)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_extend_fixture_conftest_module(self, testdir):
testdir.makeconftest("""
import pytest
@pytest.fixture
def spam():
return 'spam'
""")
testfile = testdir.makepyfile("""
import pytest
@pytest.fixture
def spam(spam):
return spam * 2
def test_spam(spam):
assert spam == 'spamspam'
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
result = testdir.runpytest(testfile)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_extend_fixture_conftest_conftest(self, testdir):
testdir.makeconftest("""
import pytest
@pytest.fixture
def spam():
return 'spam'
""")
pkg = testdir.mkpydir("pkg")
pkg.join("conftest.py").write(py.code.Source("""
import pytest
@pytest.fixture
def spam(spam):
return spam * 2
"""))
testfile = pkg.join("test_spam.py")
testfile.write(py.code.Source("""
def test_spam(spam):
assert spam == "spamspam"
"""))
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
result = testdir.runpytest(testfile)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_extend_fixture_conftest_plugin(self, testdir):
testdir.makepyfile(testplugin="""
import pytest
@pytest.fixture
def foo():
return 7
""")
testdir.syspathinsert()
testdir.makeconftest("""
import pytest
pytest_plugins = 'testplugin'
@pytest.fixture
def foo(foo):
return foo + 7
""")
testdir.makepyfile("""
def test_foo(foo):
assert foo == 14
""")
result = testdir.runpytest('-s')
assert result.ret == 0
def test_extend_fixture_plugin_plugin(self, testdir):
# Two plugins should extend each order in loading order
testdir.makepyfile(testplugin0="""
import pytest
@pytest.fixture
def foo():
return 7
""")
testdir.makepyfile(testplugin1="""
import pytest
@pytest.fixture
def foo(foo):
return foo + 7
""")
testdir.syspathinsert()
testdir.makepyfile("""
pytest_plugins = ['testplugin0', 'testplugin1']
def test_foo(foo):
assert foo == 14
""")
result = testdir.runpytest()
assert result.ret == 0
def test_override_parametrized_fixture_conftest_module(self, testdir):
"""Test override of the parametrized fixture with non-parametrized one on the test module level."""
testdir.makeconftest("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def spam(request):
return request.param
""")
testfile = testdir.makepyfile("""
import pytest
@pytest.fixture
def spam():
return 'spam'
def test_spam(spam):
assert spam == 'spam'
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
result = testdir.runpytest(testfile)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_override_parametrized_fixture_conftest_conftest(self, testdir):
"""Test override of the parametrized fixture with non-parametrized one on the conftest level."""
testdir.makeconftest("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def spam(request):
return request.param
""")
subdir = testdir.mkpydir('subdir')
subdir.join("conftest.py").write(py.code.Source("""
import pytest
@pytest.fixture
def spam():
return 'spam'
"""))
testfile = subdir.join("test_spam.py")
testfile.write(py.code.Source("""
def test_spam(spam):
assert spam == "spam"
"""))
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
result = testdir.runpytest(testfile)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_override_non_parametrized_fixture_conftest_module(self, testdir):
"""Test override of the non-parametrized fixture with parametrized one on the test module level."""
testdir.makeconftest("""
import pytest
@pytest.fixture
def spam():
return 'spam'
""")
testfile = testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def spam(request):
return request.param
params = {'spam': 1}
def test_spam(spam):
assert spam == params['spam']
params['spam'] += 1
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*3 passed*"])
result = testdir.runpytest(testfile)
result.stdout.fnmatch_lines(["*3 passed*"])
def test_override_non_parametrized_fixture_conftest_conftest(self, testdir):
"""Test override of the non-parametrized fixture with parametrized one on the conftest level."""
testdir.makeconftest("""
import pytest
@pytest.fixture
def spam():
return 'spam'
""")
subdir = testdir.mkpydir('subdir')
subdir.join("conftest.py").write(py.code.Source("""
import pytest
@pytest.fixture(params=[1, 2, 3])
def spam(request):
return request.param
"""))
testfile = subdir.join("test_spam.py")
testfile.write(py.code.Source("""
params = {'spam': 1}
def test_spam(spam):
assert spam == params['spam']
params['spam'] += 1
"""))
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*3 passed*"])
result = testdir.runpytest(testfile)
result.stdout.fnmatch_lines(["*3 passed*"])
def test_autouse_fixture_plugin(self, testdir):
# A fixture from a plugin has no baseid set, which screwed up
# the autouse fixture handling.
testdir.makepyfile(testplugin="""
import pytest
@pytest.fixture(autouse=True)
def foo(request):
request.function.foo = 7
""")
testdir.syspathinsert()
testdir.makepyfile("""
pytest_plugins = 'testplugin'
def test_foo(request):
assert request.function.foo == 7
""")
result = testdir.runpytest()
assert result.ret == 0
def test_funcarg_lookup_error(self, testdir):
testdir.makepyfile("""
def test_lookup_error(unknown):
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*ERROR*test_lookup_error*",
"*def test_lookup_error(unknown):*",
"*fixture*unknown*not found*",
"*available fixtures*",
"*1 error*",
])
assert "INTERNAL" not in result.stdout.str()
def test_fixture_excinfo_leak(self, testdir):
# on python2 sys.excinfo would leak into fixture executions
testdir.makepyfile("""
import sys
import traceback
import pytest
@pytest.fixture
def leak():
if sys.exc_info()[0]: # python3 bug :)
traceback.print_exc()
#fails
assert sys.exc_info() == (None, None, None)
def test_leak(leak):
if sys.exc_info()[0]: # python3 bug :)
traceback.print_exc()
assert sys.exc_info() == (None, None, None)
""")
result = testdir.runpytest()
assert result.ret == 0
class TestRequestBasic:
def test_request_attributes(self, testdir):
item = testdir.getitem("""
def pytest_funcarg__something(request): pass
def test_func(something): pass
""")
req = funcargs.FixtureRequest(item)
assert req.function == item.obj
assert req.keywords == item.keywords
assert hasattr(req.module, 'test_func')
assert req.cls is None
assert req.function.__name__ == "test_func"
assert req.config == item.config
assert repr(req).find(req.function.__name__) != -1
def test_request_attributes_method(self, testdir):
item, = testdir.getitems("""
class TestB:
def pytest_funcarg__something(self, request):
return 1
def test_func(self, something):
pass
""")
req = item._request
assert req.cls.__name__ == "TestB"
assert req.instance.__class__ == req.cls
def XXXtest_request_contains_funcarg_arg2fixturedefs(self, testdir):
modcol = testdir.getmodulecol("""
def pytest_funcarg__something(request):
pass
class TestClass:
def test_method(self, something):
pass
""")
item1, = testdir.genitems([modcol])
assert item1.name == "test_method"
arg2fixturedefs = funcargs.FixtureRequest(item1)._arg2fixturedefs
assert len(arg2fixturedefs) == 1
assert arg2fixturedefs[0].__name__ == "pytest_funcarg__something"
def test_getfuncargvalue_recursive(self, testdir):
testdir.makeconftest("""
def pytest_funcarg__something(request):
return 1
""")
testdir.makepyfile("""
def pytest_funcarg__something(request):
return request.getfuncargvalue("something") + 1
def test_func(something):
assert something == 2
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_getfuncargvalue(self, testdir):
item = testdir.getitem("""
l = [2]
def pytest_funcarg__something(request): return 1
def pytest_funcarg__other(request):
return l.pop()
def test_func(something): pass
""")
req = item._request
pytest.raises(FixtureLookupError, req.getfuncargvalue, "notexists")
val = req.getfuncargvalue("something")
assert val == 1
val = req.getfuncargvalue("something")
assert val == 1
val2 = req.getfuncargvalue("other")
assert val2 == 2
val2 = req.getfuncargvalue("other") # see about caching
assert val2 == 2
pytest._fillfuncargs(item)
assert item.funcargs["something"] == 1
assert len(get_public_names(item.funcargs)) == 2
assert "request" in item.funcargs
#assert item.funcargs == {'something': 1, "other": 2}
def test_request_addfinalizer(self, testdir):
item = testdir.getitem("""
teardownlist = []
def pytest_funcarg__something(request):
request.addfinalizer(lambda: teardownlist.append(1))
def test_func(something): pass
""")
item.session._setupstate.prepare(item)
pytest._fillfuncargs(item)
# successively check finalization calls
teardownlist = item.getparent(pytest.Module).obj.teardownlist
ss = item.session._setupstate
assert not teardownlist
ss.teardown_exact(item, None)
print(ss.stack)
assert teardownlist == [1]
def test_request_addfinalizer_failing_setup(self, testdir):
testdir.makepyfile("""
import pytest
l = [1]
@pytest.fixture
def myfix(request):
request.addfinalizer(l.pop)
assert 0
def test_fix(myfix):
pass
def test_finalizer_ran():
assert not l
""")
reprec = testdir.inline_run("-s")
reprec.assertoutcome(failed=1, passed=1)
def test_request_addfinalizer_failing_setup_module(self, testdir):
testdir.makepyfile("""
import pytest
l = [1, 2]
@pytest.fixture(scope="module")
def myfix(request):
request.addfinalizer(l.pop)
request.addfinalizer(l.pop)
assert 0
def test_fix(myfix):
pass
""")
reprec = testdir.inline_run("-s")
mod = reprec.getcalls("pytest_runtest_setup")[0].item.module
assert not mod.l
def test_request_addfinalizer_partial_setup_failure(self, testdir):
p = testdir.makepyfile("""
l = []
def pytest_funcarg__something(request):
request.addfinalizer(lambda: l.append(None))
def test_func(something, missingarg):
pass
def test_second():
assert len(l) == 1
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*1 error*" # XXX the whole module collection fails
])
def test_request_getmodulepath(self, testdir):
modcol = testdir.getmodulecol("def test_somefunc(): pass")
item, = testdir.genitems([modcol])
req = funcargs.FixtureRequest(item)
assert req.fspath == modcol.fspath
def test_request_fixturenames(self, testdir):
testdir.makepyfile("""
import pytest
from _pytest.pytester import get_public_names
@pytest.fixture()
def arg1():
pass
@pytest.fixture()
def farg(arg1):
pass
@pytest.fixture(autouse=True)
def sarg(tmpdir):
pass
def test_function(request, farg):
assert set(get_public_names(request.fixturenames)) == \
set(["tmpdir", "sarg", "arg1", "request", "farg",
"tmpdir_factory"])
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_funcargnames_compatattr(self, testdir):
testdir.makepyfile("""
def pytest_generate_tests(metafunc):
assert metafunc.funcargnames == metafunc.fixturenames
def pytest_funcarg__fn(request):
assert request._pyfuncitem.funcargnames == \
request._pyfuncitem.fixturenames
return request.funcargnames, request.fixturenames
def test_hello(fn):
assert fn[0] == fn[1]
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_setupdecorator_and_xunit(self, testdir):
testdir.makepyfile("""
import pytest
l = []
@pytest.fixture(scope='module', autouse=True)
def setup_module():
l.append("module")
@pytest.fixture(autouse=True)
def setup_function():
l.append("function")
def test_func():
pass
class TestClass:
@pytest.fixture(scope="class", autouse=True)
def setup_class(self):
l.append("class")
@pytest.fixture(autouse=True)
def setup_method(self):
l.append("method")
def test_method(self):
pass
def test_all():
assert l == ["module", "function", "class",
"function", "method", "function"]
""")
reprec = testdir.inline_run("-v")
reprec.assertoutcome(passed=3)
def test_fixtures_sub_subdir_normalize_sep(self, testdir):
# this tests that normalization of nodeids takes place
b = testdir.mkdir("tests").mkdir("unit")
b.join("conftest.py").write(py.code.Source("""
def pytest_funcarg__arg1():
pass
"""))
p = b.join("test_module.py")
p.write("def test_func(arg1): pass")
result = testdir.runpytest(p, "--fixtures")
assert result.ret == 0
result.stdout.fnmatch_lines("""
*fixtures defined*conftest*
*arg1*
""")
def test_show_fixtures_color_yes(self, testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest('--color=yes', '--fixtures')
assert '\x1b[32mtmpdir' in result.stdout.str()
def test_newstyle_with_request(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture()
def arg(request):
pass
def test_1(arg):
pass
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_setupcontext_no_param(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1,2])
def arg(request):
return request.param
@pytest.fixture(autouse=True)
def mysetup(request, arg):
assert not hasattr(request, "param")
def test_1(arg):
assert arg in (1,2)
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
class TestRequestMarking:
def test_applymarker(self, testdir):
item1,item2 = testdir.getitems("""
def pytest_funcarg__something(request):
pass
class TestClass:
def test_func1(self, something):
pass
def test_func2(self, something):
pass
""")
req1 = funcargs.FixtureRequest(item1)
assert 'xfail' not in item1.keywords
req1.applymarker(pytest.mark.xfail)
assert 'xfail' in item1.keywords
assert 'skipif' not in item1.keywords
req1.applymarker(pytest.mark.skipif)
assert 'skipif' in item1.keywords
pytest.raises(ValueError, "req1.applymarker(42)")
def test_accesskeywords(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture()
def keywords(request):
return request.keywords
@pytest.mark.XYZ
def test_function(keywords):
assert keywords["XYZ"]
assert "abc" not in keywords
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_accessmarker_dynamic(self, testdir):
testdir.makeconftest("""
import pytest
@pytest.fixture()
def keywords(request):
return request.keywords
@pytest.fixture(scope="class", autouse=True)
def marking(request):
request.applymarker(pytest.mark.XYZ("hello"))
""")
testdir.makepyfile("""
import pytest
def test_fun1(keywords):
assert keywords["XYZ"] is not None
assert "abc" not in keywords
def test_fun2(keywords):
assert keywords["XYZ"] is not None
assert "abc" not in keywords
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
class TestRequestCachedSetup:
def test_request_cachedsetup_defaultmodule(self, testdir):
reprec = testdir.inline_runsource("""
mysetup = ["hello",].pop
def pytest_funcarg__something(request):
return request.cached_setup(mysetup, scope="module")
def test_func1(something):
assert something == "hello"
class TestClass:
def test_func1a(self, something):
assert something == "hello"
""")
reprec.assertoutcome(passed=2)
def test_request_cachedsetup_class(self, testdir):
reprec = testdir.inline_runsource("""
mysetup = ["hello", "hello2", "hello3"].pop
def pytest_funcarg__something(request):
return request.cached_setup(mysetup, scope="class")
def test_func1(something):
assert something == "hello3"
def test_func2(something):
assert something == "hello2"
class TestClass:
def test_func1a(self, something):
assert something == "hello"
def test_func2b(self, something):
assert something == "hello"
""")
reprec.assertoutcome(passed=4)
def test_request_cachedsetup_extrakey(self, testdir):
item1 = testdir.getitem("def test_func(): pass")
req1 = funcargs.FixtureRequest(item1)
l = ["hello", "world"]
def setup():
return l.pop()
ret1 = req1.cached_setup(setup, extrakey=1)
ret2 = req1.cached_setup(setup, extrakey=2)
assert ret2 == "hello"
assert ret1 == "world"
ret1b = req1.cached_setup(setup, extrakey=1)
ret2b = req1.cached_setup(setup, extrakey=2)
assert ret1 == ret1b
assert ret2 == ret2b
def test_request_cachedsetup_cache_deletion(self, testdir):
item1 = testdir.getitem("def test_func(): pass")
req1 = funcargs.FixtureRequest(item1)
l = []
def setup():
l.append("setup")
def teardown(val):
l.append("teardown")
req1.cached_setup(setup, teardown, scope="function")
assert l == ['setup']
# artificial call of finalizer
setupstate = req1._pyfuncitem.session._setupstate
setupstate._callfinalizers(item1)
assert l == ["setup", "teardown"]
req1.cached_setup(setup, teardown, scope="function")
assert l == ["setup", "teardown", "setup"]
setupstate._callfinalizers(item1)
assert l == ["setup", "teardown", "setup", "teardown"]
def test_request_cached_setup_two_args(self, testdir):
testdir.makepyfile("""
def pytest_funcarg__arg1(request):
return request.cached_setup(lambda: 42)
def pytest_funcarg__arg2(request):
return request.cached_setup(lambda: 17)
def test_two_different_setups(arg1, arg2):
assert arg1 != arg2
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines([
"*1 passed*"
])
def test_request_cached_setup_getfuncargvalue(self, testdir):
testdir.makepyfile("""
def pytest_funcarg__arg1(request):
arg1 = request.getfuncargvalue("arg2")
return request.cached_setup(lambda: arg1 + 1)
def pytest_funcarg__arg2(request):
return request.cached_setup(lambda: 10)
def test_two_funcarg(arg1):
assert arg1 == 11
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines([
"*1 passed*"
])
def test_request_cached_setup_functional(self, testdir):
testdir.makepyfile(test_0="""
l = []
def pytest_funcarg__something(request):
val = request.cached_setup(fsetup, fteardown)
return val
def fsetup(mycache=[1]):
l.append(mycache.pop())
return l
def fteardown(something):
l.remove(something[0])
l.append(2)
def test_list_once(something):
assert something == [1]
def test_list_twice(something):
assert something == [1]
""")
testdir.makepyfile(test_1="""
import test_0 # should have run already
def test_check_test0_has_teardown_correct():
assert test_0.l == [2]
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines([
"*3 passed*"
])
def test_issue117_sessionscopeteardown(self, testdir):
testdir.makepyfile("""
def pytest_funcarg__app(request):
app = request.cached_setup(
scope='session',
setup=lambda: 0,
teardown=lambda x: 3/x)
return app
def test_func(app):
pass
""")
result = testdir.runpytest()
assert result.ret != 0
result.stdout.fnmatch_lines([
"*3/x*",
"*ZeroDivisionError*",
])
class TestFixtureUsages:
def test_noargfixturedec(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def arg1():
return 1
def test_func(arg1):
assert arg1 == 1
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_receives_funcargs(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture()
def arg1():
return 1
@pytest.fixture()
def arg2(arg1):
return arg1 + 1
def test_add(arg2):
assert arg2 == 2
def test_all(arg1, arg2):
assert arg1 == 1
assert arg2 == 2
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_receives_funcargs_scope_mismatch(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope="function")
def arg1():
return 1
@pytest.fixture(scope="module")
def arg2(arg1):
return arg1 + 1
def test_add(arg2):
assert arg2 == 2
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*ScopeMismatch*involved factories*",
"* def arg2*",
"* def arg1*",
"*1 error*"
])
def test_receives_funcargs_scope_mismatch_issue660(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope="function")
def arg1():
return 1
@pytest.fixture(scope="module")
def arg2(arg1):
return arg1 + 1
def test_add(arg1, arg2):
assert arg2 == 2
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*ScopeMismatch*involved factories*",
"* def arg2*",
"*1 error*"
])
def test_funcarg_parametrized_and_used_twice(self, testdir):
testdir.makepyfile("""
import pytest
l = []
@pytest.fixture(params=[1,2])
def arg1(request):
l.append(1)
return request.param
@pytest.fixture()
def arg2(arg1):
return arg1 + 1
def test_add(arg1, arg2):
assert arg2 == arg1 + 1
assert len(l) == arg1
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 passed*"
])
def test_factory_uses_unknown_funcarg_as_dependency_error(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture()
def fail(missing):
return
@pytest.fixture()
def call_fail(fail):
return
def test_missing(call_fail):
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
*pytest.fixture()*
*def call_fail(fail)*
*pytest.fixture()*
*def fail*
*fixture*'missing'*not found*
""")
def test_factory_setup_as_classes_fails(self, testdir):
testdir.makepyfile("""
import pytest
class arg1:
def __init__(self, request):
self.x = 1
arg1 = pytest.fixture()(arg1)
""")
reprec = testdir.inline_run()
l = reprec.getfailedcollections()
assert len(l) == 1
def test_request_can_be_overridden(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture()
def request(request):
request.a = 1
return request
def test_request(request):
assert request.a == 1
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_usefixtures_marker(self, testdir):
testdir.makepyfile("""
import pytest
l = []
@pytest.fixture(scope="class")
def myfix(request):
request.cls.hello = "world"
l.append(1)
class TestClass:
def test_one(self):
assert self.hello == "world"
assert len(l) == 1
def test_two(self):
assert self.hello == "world"
assert len(l) == 1
pytest.mark.usefixtures("myfix")(TestClass)
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_usefixtures_ini(self, testdir):
testdir.makeini("""
[pytest]
usefixtures = myfix
""")
testdir.makeconftest("""
import pytest
@pytest.fixture(scope="class")
def myfix(request):
request.cls.hello = "world"
""")
testdir.makepyfile("""
class TestClass:
def test_one(self):
assert self.hello == "world"
def test_two(self):
assert self.hello == "world"
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_usefixtures_seen_in_showmarkers(self, testdir):
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines("""
*usefixtures(fixturename1*mark tests*fixtures*
""")
def test_request_instance_issue203(self, testdir):
testdir.makepyfile("""
import pytest
class TestClass:
@pytest.fixture
def setup1(self, request):
assert self == request.instance
self.arg1 = 1
def test_hello(self, setup1):
assert self.arg1 == 1
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_fixture_parametrized_with_iterator(self, testdir):
testdir.makepyfile("""
import pytest
l = []
def f():
yield 1
yield 2
dec = pytest.fixture(scope="module", params=f())
@dec
def arg(request):
return request.param
@dec
def arg2(request):
return request.param
def test_1(arg):
l.append(arg)
def test_2(arg2):
l.append(arg2*10)
""")
reprec = testdir.inline_run("-v")
reprec.assertoutcome(passed=4)
l = reprec.getcalls("pytest_runtest_call")[0].item.module.l
assert l == [1,2, 10,20]
class TestFixtureManagerParseFactories:
def pytest_funcarg__testdir(self, request):
testdir = request.getfuncargvalue("testdir")
testdir.makeconftest("""
def pytest_funcarg__hello(request):
return "conftest"
def pytest_funcarg__fm(request):
return request._fixturemanager
def pytest_funcarg__item(request):
return request._pyfuncitem
""")
return testdir
def test_parsefactories_evil_objects_issue214(self, testdir):
testdir.makepyfile("""
class A:
def __call__(self):
pass
def __getattr__(self, name):
raise RuntimeError()
a = A()
def test_hello():
pass
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1, failed=0)
def test_parsefactories_conftest(self, testdir):
testdir.makepyfile("""
def test_hello(item, fm):
for name in ("fm", "hello", "item"):
faclist = fm.getfixturedefs(name, item.nodeid)
assert len(faclist) == 1
fac = faclist[0]
assert fac.func.__name__ == "pytest_funcarg__" + name
""")
reprec = testdir.inline_run("-s")
reprec.assertoutcome(passed=1)
def test_parsefactories_conftest_and_module_and_class(self, testdir):
testdir.makepyfile("""
def pytest_funcarg__hello(request):
return "module"
class TestClass:
def pytest_funcarg__hello(self, request):
return "class"
def test_hello(self, item, fm):
faclist = fm.getfixturedefs("hello", item.nodeid)
print (faclist)
assert len(faclist) == 3
assert faclist[0].func(item._request) == "conftest"
assert faclist[1].func(item._request) == "module"
assert faclist[2].func(item._request) == "class"
""")
reprec = testdir.inline_run("-s")
reprec.assertoutcome(passed=1)
def test_parsefactories_relative_node_ids(self, testdir):
# example mostly taken from:
# https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html
runner = testdir.mkdir("runner")
package = testdir.mkdir("package")
package.join("conftest.py").write(dedent("""\
import pytest
@pytest.fixture
def one():
return 1
"""))
package.join("test_x.py").write(dedent("""\
def test_x(one):
assert one == 1
"""))
sub = package.mkdir("sub")
sub.join("__init__.py").ensure()
sub.join("conftest.py").write(dedent("""\
import pytest
@pytest.fixture
def one():
return 2
"""))
sub.join("test_y.py").write(dedent("""\
def test_x(one):
assert one == 2
"""))
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
with runner.as_cwd():
reprec = testdir.inline_run("..")
reprec.assertoutcome(passed=2)
class TestAutouseDiscovery:
def pytest_funcarg__testdir(self, testdir):
testdir.makeconftest("""
import pytest
@pytest.fixture(autouse=True)
def perfunction(request, tmpdir):
pass
@pytest.fixture()
def arg1(tmpdir):
pass
@pytest.fixture(autouse=True)
def perfunction2(arg1):
pass
def pytest_funcarg__fm(request):
return request._fixturemanager
def pytest_funcarg__item(request):
return request._pyfuncitem
""")
return testdir
def test_parsefactories_conftest(self, testdir):
testdir.makepyfile("""
from _pytest.pytester import get_public_names
def test_check_setup(item, fm):
autousenames = fm._getautousenames(item.nodeid)
assert len(get_public_names(autousenames)) == 2
assert "perfunction2" in autousenames
assert "perfunction" in autousenames
""")
reprec = testdir.inline_run("-s")
reprec.assertoutcome(passed=1)
def test_two_classes_separated_autouse(self, testdir):
testdir.makepyfile("""
import pytest
class TestA:
l = []
@pytest.fixture(autouse=True)
def setup1(self):
self.l.append(1)
def test_setup1(self):
assert self.l == [1]
class TestB:
l = []
@pytest.fixture(autouse=True)
def setup2(self):
self.l.append(1)
def test_setup2(self):
assert self.l == [1]
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_setup_at_classlevel(self, testdir):
testdir.makepyfile("""
import pytest
class TestClass:
@pytest.fixture(autouse=True)
def permethod(self, request):
request.instance.funcname = request.function.__name__
def test_method1(self):
assert self.funcname == "test_method1"
def test_method2(self):
assert self.funcname == "test_method2"
""")
reprec = testdir.inline_run("-s")
reprec.assertoutcome(passed=2)
@pytest.mark.xfail(reason="'enabled' feature not implemented")
def test_setup_enabled_functionnode(self, testdir):
testdir.makepyfile("""
import pytest
def enabled(parentnode, markers):
return "needsdb" in markers
@pytest.fixture(params=[1,2])
def db(request):
return request.param
@pytest.fixture(enabled=enabled, autouse=True)
def createdb(db):
pass
def test_func1(request):
assert "db" not in request.fixturenames
@pytest.mark.needsdb
def test_func2(request):
assert "db" in request.fixturenames
""")
reprec = testdir.inline_run("-s")
reprec.assertoutcome(passed=2)
def test_callables_nocode(self, testdir):
"""
a imported mock.call would break setup/factory discovery
due to it being callable and __code__ not being a code object
"""
testdir.makepyfile("""
class _call(tuple):
def __call__(self, *k, **kw):
pass
def __getattr__(self, k):
return self
call = _call()
""")
reprec = testdir.inline_run("-s")
reprec.assertoutcome(failed=0, passed=0)
def test_autouse_in_conftests(self, testdir):
a = testdir.mkdir("a")
b = testdir.mkdir("a1")
conftest = testdir.makeconftest("""
import pytest
@pytest.fixture(autouse=True)
def hello():
xxx
""")
conftest.move(a.join(conftest.basename))
a.join("test_something.py").write("def test_func(): pass")
b.join("test_otherthing.py").write("def test_func(): pass")
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
*1 passed*1 error*
""")
def test_autouse_in_module_and_two_classes(self, testdir):
testdir.makepyfile("""
import pytest
l = []
@pytest.fixture(autouse=True)
def append1():
l.append("module")
def test_x():
assert l == ["module"]
class TestA:
@pytest.fixture(autouse=True)
def append2(self):
l.append("A")
def test_hello(self):
assert l == ["module", "module", "A"], l
class TestA2:
def test_world(self):
assert l == ["module", "module", "A", "module"], l
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=3)
class TestAutouseManagement:
def test_autouse_conftest_mid_directory(self, testdir):
pkgdir = testdir.mkpydir("xyz123")
pkgdir.join("conftest.py").write(py.code.Source("""
import pytest
@pytest.fixture(autouse=True)
def app():
import sys
sys._myapp = "hello"
"""))
t = pkgdir.ensure("tests", "test_app.py")
t.write(py.code.Source("""
import sys
def test_app():
assert sys._myapp == "hello"
"""))
reprec = testdir.inline_run("-s")
reprec.assertoutcome(passed=1)
def test_autouse_honored_for_yield(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(autouse=True)
def tst():
global x
x = 3
def test_gen():
def f(hello):
assert x == abs(hello)
yield f, 3
yield f, -3
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_funcarg_and_setup(self, testdir):
testdir.makepyfile("""
import pytest
l = []
@pytest.fixture(scope="module")
def arg():
l.append(1)
return 0
@pytest.fixture(scope="module", autouse=True)
def something(arg):
l.append(2)
def test_hello(arg):
assert len(l) == 2
assert l == [1,2]
assert arg == 0
def test_hello2(arg):
assert len(l) == 2
assert l == [1,2]
assert arg == 0
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_uses_parametrized_resource(self, testdir):
testdir.makepyfile("""
import pytest
l = []
@pytest.fixture(params=[1,2])
def arg(request):
return request.param
@pytest.fixture(autouse=True)
def something(arg):
l.append(arg)
def test_hello():
if len(l) == 1:
assert l == [1]
elif len(l) == 2:
assert l == [1, 2]
else:
0/0
""")
reprec = testdir.inline_run("-s")
reprec.assertoutcome(passed=2)
def test_session_parametrized_function(self, testdir):
testdir.makepyfile("""
import pytest
l = []
@pytest.fixture(scope="session", params=[1,2])
def arg(request):
return request.param
@pytest.fixture(scope="function", autouse=True)
def append(request, arg):
if request.function.__name__ == "test_some":
l.append(arg)
def test_some():
pass
def test_result(arg):
assert len(l) == arg
assert l[:arg] == [1,2][:arg]
""")
reprec = testdir.inline_run("-v", "-s")
reprec.assertoutcome(passed=4)
def test_class_function_parametrization_finalization(self, testdir):
p = testdir.makeconftest("""
import pytest
import pprint
l = []
@pytest.fixture(scope="function", params=[1,2])
def farg(request):
return request.param
@pytest.fixture(scope="class", params=list("ab"))
def carg(request):
return request.param
@pytest.fixture(scope="function", autouse=True)
def append(request, farg, carg):
def fin():
l.append("fin_%s%s" % (carg, farg))
request.addfinalizer(fin)
""")
testdir.makepyfile("""
import pytest
class TestClass:
def test_1(self):
pass
class TestClass2:
def test_2(self):
pass
""")
reprec = testdir.inline_run("-v","-s")
reprec.assertoutcome(passed=8)
config = reprec.getcalls("pytest_unconfigure")[0].config
l = config.pluginmanager._getconftestmodules(p)[0].l
assert l == ["fin_a1", "fin_a2", "fin_b1", "fin_b2"] * 2
def test_scope_ordering(self, testdir):
testdir.makepyfile("""
import pytest
l = []
@pytest.fixture(scope="function", autouse=True)
def fappend2():
l.append(2)
@pytest.fixture(scope="class", autouse=True)
def classappend3():
l.append(3)
@pytest.fixture(scope="module", autouse=True)
def mappend():
l.append(1)
class TestHallo:
def test_method(self):
assert l == [1,3,2]
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_parametrization_setup_teardown_ordering(self, testdir):
testdir.makepyfile("""
import pytest
l = []
def pytest_generate_tests(metafunc):
if metafunc.cls is not None:
metafunc.parametrize("item", [1,2], scope="class")
class TestClass:
@pytest.fixture(scope="class", autouse=True)
def addteardown(self, item, request):
l.append("setup-%d" % item)
request.addfinalizer(lambda: l.append("teardown-%d" % item))
def test_step1(self, item):
l.append("step1-%d" % item)
def test_step2(self, item):
l.append("step2-%d" % item)
def test_finish():
print (l)
assert l == ["setup-1", "step1-1", "step2-1", "teardown-1",
"setup-2", "step1-2", "step2-2", "teardown-2",]
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=5)
def test_ordering_autouse_before_explicit(self, testdir):
testdir.makepyfile("""
import pytest
l = []
@pytest.fixture(autouse=True)
def fix1():
l.append(1)
@pytest.fixture()
def arg1():
l.append(2)
def test_hello(arg1):
assert l == [1,2]
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.issue226
@pytest.mark.parametrize("param1", ["", "params=[1]"], ids=["p00","p01"])
@pytest.mark.parametrize("param2", ["", "params=[1]"], ids=["p10","p11"])
def test_ordering_dependencies_torndown_first(self, testdir, param1, param2):
testdir.makepyfile("""
import pytest
l = []
@pytest.fixture(%(param1)s)
def arg1(request):
request.addfinalizer(lambda: l.append("fin1"))
l.append("new1")
@pytest.fixture(%(param2)s)
def arg2(request, arg1):
request.addfinalizer(lambda: l.append("fin2"))
l.append("new2")
def test_arg(arg2):
pass
def test_check():
assert l == ["new1", "new2", "fin2", "fin1"]
""" % locals())
reprec = testdir.inline_run("-s")
reprec.assertoutcome(passed=2)
class TestFixtureMarker:
def test_parametrize(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=["a", "b", "c"])
def arg(request):
return request.param
l = []
def test_param(arg):
l.append(arg)
def test_result():
assert l == list("abc")
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=4)
def test_multiple_parametrization_issue_736(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1,2,3])
def foo(request):
return request.param
@pytest.mark.parametrize('foobar', [4,5,6])
def test_issue(foo, foobar):
assert foo in [1,2,3]
assert foobar in [4,5,6]
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=9)
@pytest.mark.parametrize('param_args', ["'fixt, val'", "'fixt,val'", "['fixt', 'val']", "('fixt', 'val')"])
def test_override_parametrized_fixture_issue_979(self, testdir, param_args):
"""Make sure a parametrized argument can override a parametrized fixture.
This was a regression introduced in the fix for #736.
"""
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1, 2])
def fixt(request):
return request.param
@pytest.mark.parametrize(%s, [(3, 'x'), (4, 'x')])
def test_foo(fixt, val):
pass
""" % param_args)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=2)
def test_scope_session(self, testdir):
testdir.makepyfile("""
import pytest
l = []
@pytest.fixture(scope="module")
def arg():
l.append(1)
return 1
def test_1(arg):
assert arg == 1
def test_2(arg):
assert arg == 1
assert len(l) == 1
class TestClass:
def test3(self, arg):
assert arg == 1
assert len(l) == 1
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=3)
def test_scope_session_exc(self, testdir):
testdir.makepyfile("""
import pytest
l = []
@pytest.fixture(scope="session")
def fix():
l.append(1)
pytest.skip('skipping')
def test_1(fix):
pass
def test_2(fix):
pass
def test_last():
assert l == [1]
""")
reprec = testdir.inline_run()
reprec.assertoutcome(skipped=2, passed=1)
def test_scope_session_exc_two_fix(self, testdir):
testdir.makepyfile("""
import pytest
l = []
m = []
@pytest.fixture(scope="session")
def a():
l.append(1)
pytest.skip('skipping')
@pytest.fixture(scope="session")
def b(a):
m.append(1)
def test_1(b):
pass
def test_2(b):
pass
def test_last():
assert l == [1]
assert m == []
""")
reprec = testdir.inline_run()
reprec.assertoutcome(skipped=2, passed=1)
def test_scope_exc(self, testdir):
testdir.makepyfile(
test_foo="""
def test_foo(fix):
pass
""",
test_bar="""
def test_bar(fix):
pass
""",
conftest="""
import pytest
reqs = []
@pytest.fixture(scope="session")
def fix(request):
reqs.append(1)
pytest.skip()
@pytest.fixture
def req_list():
return reqs
""",
test_real="""
def test_last(req_list):
assert req_list == [1]
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(skipped=2, passed=1)
def test_scope_module_uses_session(self, testdir):
testdir.makepyfile("""
import pytest
l = []
@pytest.fixture(scope="module")
def arg():
l.append(1)
return 1
def test_1(arg):
assert arg == 1
def test_2(arg):
assert arg == 1
assert len(l) == 1
class TestClass:
def test3(self, arg):
assert arg == 1
assert len(l) == 1
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=3)
def test_scope_module_and_finalizer(self, testdir):
testdir.makeconftest("""
import pytest
finalized = []
created = []
@pytest.fixture(scope="module")
def arg(request):
created.append(1)
assert request.scope == "module"
request.addfinalizer(lambda: finalized.append(1))
def pytest_funcarg__created(request):
return len(created)
def pytest_funcarg__finalized(request):
return len(finalized)
""")
testdir.makepyfile(
test_mod1="""
def test_1(arg, created, finalized):
assert created == 1
assert finalized == 0
def test_2(arg, created, finalized):
assert created == 1
assert finalized == 0""",
test_mod2="""
def test_3(arg, created, finalized):
assert created == 2
assert finalized == 1""",
test_mode3="""
def test_4(arg, created, finalized):
assert created == 3
assert finalized == 2
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=4)
@pytest.mark.parametrize("method", [
'request.getfuncargvalue("arg")',
'request.cached_setup(lambda: None, scope="function")',
], ids=["getfuncargvalue", "cached_setup"])
def test_scope_mismatch_various(self, testdir, method):
testdir.makeconftest("""
import pytest
finalized = []
created = []
@pytest.fixture(scope="function")
def arg(request):
pass
""")
testdir.makepyfile(
test_mod1="""
import pytest
@pytest.fixture(scope="session")
def arg(request):
%s
def test_1(arg):
pass
""" % method)
result = testdir.runpytest()
assert result.ret != 0
result.stdout.fnmatch_lines([
"*ScopeMismatch*You tried*function*session*request*",
])
def test_register_only_with_mark(self, testdir):
testdir.makeconftest("""
import pytest
@pytest.fixture()
def arg():
return 1
""")
testdir.makepyfile(
test_mod1="""
import pytest
@pytest.fixture()
def arg(arg):
return arg + 1
def test_1(arg):
assert arg == 2
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_parametrize_and_scope(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope="module", params=["a", "b", "c"])
def arg(request):
return request.param
l = []
def test_param(arg):
l.append(arg)
""")
reprec = testdir.inline_run("-v")
reprec.assertoutcome(passed=3)
l = reprec.getcalls("pytest_runtest_call")[0].item.module.l
assert len(l) == 3
assert "a" in l
assert "b" in l
assert "c" in l
def test_scope_mismatch(self, testdir):
testdir.makeconftest("""
import pytest
@pytest.fixture(scope="function")
def arg(request):
pass
""")
testdir.makepyfile("""
import pytest
@pytest.fixture(scope="session")
def arg(arg):
pass
def test_mismatch(arg):
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*ScopeMismatch*",
"*1 error*",
])
def test_parametrize_separated_order(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope="module", params=[1, 2])
def arg(request):
return request.param
l = []
def test_1(arg):
l.append(arg)
def test_2(arg):
l.append(arg)
""")
reprec = testdir.inline_run("-v")
reprec.assertoutcome(passed=4)
l = reprec.getcalls("pytest_runtest_call")[0].item.module.l
assert l == [1,1,2,2]
def test_module_parametrized_ordering(self, testdir):
testdir.makeconftest("""
import pytest
@pytest.fixture(scope="session", params="s1 s2".split())
def sarg():
pass
@pytest.fixture(scope="module", params="m1 m2".split())
def marg():
pass
""")
testdir.makepyfile(test_mod1="""
def test_func(sarg):
pass
def test_func1(marg):
pass
""", test_mod2="""
def test_func2(sarg):
pass
def test_func3(sarg, marg):
pass
def test_func3b(sarg, marg):
pass
def test_func4(marg):
pass
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines("""
test_mod1.py::test_func[s1] PASSED
test_mod2.py::test_func2[s1] PASSED
test_mod2.py::test_func3[s1-m1] PASSED
test_mod2.py::test_func3b[s1-m1] PASSED
test_mod2.py::test_func3[s1-m2] PASSED
test_mod2.py::test_func3b[s1-m2] PASSED
test_mod1.py::test_func[s2] PASSED
test_mod2.py::test_func2[s2] PASSED
test_mod2.py::test_func3[s2-m1] PASSED
test_mod2.py::test_func3b[s2-m1] PASSED
test_mod2.py::test_func4[m1] PASSED
test_mod2.py::test_func3[s2-m2] PASSED
test_mod2.py::test_func3b[s2-m2] PASSED
test_mod2.py::test_func4[m2] PASSED
test_mod1.py::test_func1[m1] PASSED
test_mod1.py::test_func1[m2] PASSED
""")
def test_class_ordering(self, testdir):
testdir.makeconftest("""
import pytest
l = []
@pytest.fixture(scope="function", params=[1,2])
def farg(request):
return request.param
@pytest.fixture(scope="class", params=list("ab"))
def carg(request):
return request.param
@pytest.fixture(scope="function", autouse=True)
def append(request, farg, carg):
def fin():
l.append("fin_%s%s" % (carg, farg))
request.addfinalizer(fin)
""")
testdir.makepyfile("""
import pytest
class TestClass2:
def test_1(self):
pass
def test_2(self):
pass
class TestClass:
def test_3(self):
pass
""")
result = testdir.runpytest("-vs")
result.stdout.fnmatch_lines("""
test_class_ordering.py::TestClass2::test_1[1-a] PASSED
test_class_ordering.py::TestClass2::test_1[2-a] PASSED
test_class_ordering.py::TestClass2::test_2[1-a] PASSED
test_class_ordering.py::TestClass2::test_2[2-a] PASSED
test_class_ordering.py::TestClass2::test_1[1-b] PASSED
test_class_ordering.py::TestClass2::test_1[2-b] PASSED
test_class_ordering.py::TestClass2::test_2[1-b] PASSED
test_class_ordering.py::TestClass2::test_2[2-b] PASSED
test_class_ordering.py::TestClass::test_3[1-a] PASSED
test_class_ordering.py::TestClass::test_3[2-a] PASSED
test_class_ordering.py::TestClass::test_3[1-b] PASSED
test_class_ordering.py::TestClass::test_3[2-b] PASSED
""")
def test_parametrize_separated_order_higher_scope_first(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope="function", params=[1, 2])
def arg(request):
param = request.param
request.addfinalizer(lambda: l.append("fin:%s" % param))
l.append("create:%s" % param)
return request.param
@pytest.fixture(scope="module", params=["mod1", "mod2"])
def modarg(request):
param = request.param
request.addfinalizer(lambda: l.append("fin:%s" % param))
l.append("create:%s" % param)
return request.param
l = []
def test_1(arg):
l.append("test1")
def test_2(modarg):
l.append("test2")
def test_3(arg, modarg):
l.append("test3")
def test_4(modarg, arg):
l.append("test4")
""")
reprec = testdir.inline_run("-v")
reprec.assertoutcome(passed=12)
l = reprec.getcalls("pytest_runtest_call")[0].item.module.l
expected = [
'create:1', 'test1', 'fin:1', 'create:2', 'test1',
'fin:2', 'create:mod1', 'test2', 'create:1', 'test3',
'fin:1', 'create:2', 'test3', 'fin:2', 'create:1',
'test4', 'fin:1', 'create:2', 'test4', 'fin:2',
'fin:mod1', 'create:mod2', 'test2', 'create:1', 'test3',
'fin:1', 'create:2', 'test3', 'fin:2', 'create:1',
'test4', 'fin:1', 'create:2', 'test4', 'fin:2',
'fin:mod2']
import pprint
pprint.pprint(list(zip(l, expected)))
assert l == expected
def test_parametrized_fixture_teardown_order(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[1,2], scope="class")
def param1(request):
return request.param
l = []
class TestClass:
@classmethod
@pytest.fixture(scope="class", autouse=True)
def setup1(self, request, param1):
l.append(1)
request.addfinalizer(self.teardown1)
@classmethod
def teardown1(self):
assert l.pop() == 1
@pytest.fixture(scope="class", autouse=True)
def setup2(self, request, param1):
l.append(2)
request.addfinalizer(self.teardown2)
@classmethod
def teardown2(self):
assert l.pop() == 2
def test(self):
pass
def test_finish():
assert not l
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines("""
*3 passed*
""")
assert "error" not in result.stdout.str()
def test_fixture_finalizer(self, testdir):
testdir.makeconftest("""
import pytest
import sys
@pytest.fixture
def browser(request):
def finalize():
sys.stdout.write('Finalized')
request.addfinalizer(finalize)
return {}
""")
b = testdir.mkdir("subdir")
b.join("test_overriden_fixture_finalizer.py").write(dedent("""
import pytest
@pytest.fixture
def browser(browser):
browser['visited'] = True
return browser
def test_browser(browser):
assert browser['visited'] is True
"""))
reprec = testdir.runpytest("-s")
for test in ['test_browser']:
reprec.stdout.fnmatch_lines('*Finalized*')
def test_class_scope_with_normal_tests(self, testdir):
testpath = testdir.makepyfile("""
import pytest
class Box:
value = 0
@pytest.fixture(scope='class')
def a(request):
Box.value += 1
return Box.value
def test_a(a):
assert a == 1
class Test1:
def test_b(self, a):
assert a == 2
class Test2:
def test_c(self, a):
assert a == 3""")
reprec = testdir.inline_run(testpath)
for test in ['test_a', 'test_b', 'test_c']:
assert reprec.matchreport(test).passed
def test_request_is_clean(self, testdir):
testdir.makepyfile("""
import pytest
l = []
@pytest.fixture(params=[1, 2])
def fix(request):
request.addfinalizer(lambda: l.append(request.param))
def test_fix(fix):
pass
""")
reprec = testdir.inline_run("-s")
l = reprec.getcalls("pytest_runtest_call")[0].item.module.l
assert l == [1,2]
def test_parametrize_separated_lifecycle(self, testdir):
testdir.makepyfile("""
import pytest
l = []
@pytest.fixture(scope="module", params=[1, 2])
def arg(request):
x = request.param
request.addfinalizer(lambda: l.append("fin%s" % x))
return request.param
def test_1(arg):
l.append(arg)
def test_2(arg):
l.append(arg)
""")
reprec = testdir.inline_run("-vs")
reprec.assertoutcome(passed=4)
l = reprec.getcalls("pytest_runtest_call")[0].item.module.l
import pprint
pprint.pprint(l)
#assert len(l) == 6
assert l[0] == l[1] == 1
assert l[2] == "fin1"
assert l[3] == l[4] == 2
assert l[5] == "fin2"
def test_parametrize_function_scoped_finalizers_called(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope="function", params=[1, 2])
def arg(request):
x = request.param
request.addfinalizer(lambda: l.append("fin%s" % x))
return request.param
l = []
def test_1(arg):
l.append(arg)
def test_2(arg):
l.append(arg)
def test_3():
assert len(l) == 8
assert l == [1, "fin1", 2, "fin2", 1, "fin1", 2, "fin2"]
""")
reprec = testdir.inline_run("-v")
reprec.assertoutcome(passed=5)
@pytest.mark.issue246
@pytest.mark.parametrize("scope", ["session", "function", "module"])
def test_finalizer_order_on_parametrization(self, scope, testdir):
testdir.makepyfile("""
import pytest
l = []
@pytest.fixture(scope=%(scope)r, params=["1"])
def fix1(request):
return request.param
@pytest.fixture(scope=%(scope)r)
def fix2(request, base):
def cleanup_fix2():
assert not l, "base should not have been finalized"
request.addfinalizer(cleanup_fix2)
@pytest.fixture(scope=%(scope)r)
def base(request, fix1):
def cleanup_base():
l.append("fin_base")
print ("finalizing base")
request.addfinalizer(cleanup_base)
def test_begin():
pass
def test_baz(base, fix2):
pass
def test_other():
pass
""" % {"scope": scope})
reprec = testdir.inline_run("-lvs")
reprec.assertoutcome(passed=3)
@pytest.mark.issue396
def test_class_scope_parametrization_ordering(self, testdir):
testdir.makepyfile("""
import pytest
l = []
@pytest.fixture(params=["John", "Doe"], scope="class")
def human(request):
request.addfinalizer(lambda: l.append("fin %s" % request.param))
return request.param
class TestGreetings:
def test_hello(self, human):
l.append("test_hello")
class TestMetrics:
def test_name(self, human):
l.append("test_name")
def test_population(self, human):
l.append("test_population")
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=6)
l = reprec.getcalls("pytest_runtest_call")[0].item.module.l
assert l == ["test_hello", "fin John", "test_hello", "fin Doe",
"test_name", "test_population", "fin John",
"test_name", "test_population", "fin Doe"]
def test_parametrize_setup_function(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope="module", params=[1, 2])
def arg(request):
return request.param
@pytest.fixture(scope="module", autouse=True)
def mysetup(request, arg):
request.addfinalizer(lambda: l.append("fin%s" % arg))
l.append("setup%s" % arg)
l = []
def test_1(arg):
l.append(arg)
def test_2(arg):
l.append(arg)
def test_3():
import pprint
pprint.pprint(l)
if arg == 1:
assert l == ["setup1", 1, 1, ]
elif arg == 2:
assert l == ["setup1", 1, 1, "fin1",
"setup2", 2, 2, ]
""")
reprec = testdir.inline_run("-v")
reprec.assertoutcome(passed=6)
def test_fixture_marked_function_not_collected_as_test(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def test_app():
return 1
def test_something(test_app):
assert test_app == 1
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_params_and_ids(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(params=[object(), object()],
ids=['alpha', 'beta'])
def fix(request):
return request.param
def test_foo(fix):
assert 1
""")
res = testdir.runpytest('-v')
res.stdout.fnmatch_lines([
'*test_foo*alpha*',
'*test_foo*beta*'])
def test_params_and_ids_yieldfixture(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.yield_fixture(params=[object(), object()],
ids=['alpha', 'beta'])
def fix(request):
yield request.param
def test_foo(fix):
assert 1
""")
res = testdir.runpytest('-v')
res.stdout.fnmatch_lines([
'*test_foo*alpha*',
'*test_foo*beta*'])
class TestRequestScopeAccess:
pytestmark = pytest.mark.parametrize(("scope", "ok", "error"),[
["session", "", "fspath class function module"],
["module", "module fspath", "cls function"],
["class", "module fspath cls", "function"],
["function", "module fspath cls function", ""]
])
def test_setup(self, testdir, scope, ok, error):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope=%r, autouse=True)
def myscoped(request):
for x in %r:
assert hasattr(request, x)
for x in %r:
pytest.raises(AttributeError, lambda:
getattr(request, x))
assert request.session
assert request.config
def test_func():
pass
""" %(scope, ok.split(), error.split()))
reprec = testdir.inline_run("-l")
reprec.assertoutcome(passed=1)
def test_funcarg(self, testdir, scope, ok, error):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope=%r)
def arg(request):
for x in %r:
assert hasattr(request, x)
for x in %r:
pytest.raises(AttributeError, lambda:
getattr(request, x))
assert request.session
assert request.config
def test_func(arg):
pass
""" %(scope, ok.split(), error.split()))
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
class TestErrors:
def test_subfactory_missing_funcarg(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture()
def gen(qwe123):
return 1
def test_something(gen):
pass
""")
result = testdir.runpytest()
assert result.ret != 0
result.stdout.fnmatch_lines([
"*def gen(qwe123):*",
"*fixture*qwe123*not found*",
"*1 error*",
])
def test_issue498_fixture_finalizer_failing(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def fix1(request):
def f():
raise KeyError
request.addfinalizer(f)
return object()
l = []
def test_1(fix1):
l.append(fix1)
def test_2(fix1):
l.append(fix1)
def test_3():
assert l[0] != l[1]
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
*ERROR*teardown*test_1*
*KeyError*
*ERROR*teardown*test_2*
*KeyError*
*3 pass*2 error*
""")
def test_setupfunc_missing_funcarg(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(autouse=True)
def gen(qwe123):
return 1
def test_something():
pass
""")
result = testdir.runpytest()
assert result.ret != 0
result.stdout.fnmatch_lines([
"*def gen(qwe123):*",
"*fixture*qwe123*not found*",
"*1 error*",
])
class TestShowFixtures:
def test_funcarg_compat(self, testdir):
config = testdir.parseconfigure("--funcargs")
assert config.option.showfixtures
def test_show_fixtures(self, testdir):
result = testdir.runpytest("--fixtures")
result.stdout.fnmatch_lines([
"*tmpdir*",
"*temporary directory*",
]
)
def test_show_fixtures_verbose(self, testdir):
result = testdir.runpytest("--fixtures", "-v")
result.stdout.fnmatch_lines([
"*tmpdir*--*tmpdir.py*",
"*temporary directory*",
]
)
def test_show_fixtures_testmodule(self, testdir):
p = testdir.makepyfile('''
import pytest
@pytest.fixture
def _arg0():
""" hidden """
@pytest.fixture
def arg1():
""" hello world """
''')
result = testdir.runpytest("--fixtures", p)
result.stdout.fnmatch_lines("""
*tmpdir
*fixtures defined from*
*arg1*
*hello world*
""")
assert "arg0" not in result.stdout.str()
@pytest.mark.parametrize("testmod", [True, False])
def test_show_fixtures_conftest(self, testdir, testmod):
testdir.makeconftest('''
import pytest
@pytest.fixture
def arg1():
""" hello world """
''')
if testmod:
testdir.makepyfile("""
def test_hello():
pass
""")
result = testdir.runpytest("--fixtures")
result.stdout.fnmatch_lines("""
*tmpdir*
*fixtures defined from*conftest*
*arg1*
*hello world*
""")
def test_show_fixtures_trimmed_doc(self, testdir):
p = testdir.makepyfile('''
import pytest
@pytest.fixture
def arg1():
"""
line1
line2
"""
@pytest.fixture
def arg2():
"""
line1
line2
"""
''')
result = testdir.runpytest("--fixtures", p)
result.stdout.fnmatch_lines("""
* fixtures defined from test_show_fixtures_trimmed_doc *
arg2
line1
line2
arg1
line1
line2
""")
def test_show_fixtures_different_files(self, testdir):
"""
#833: --fixtures only shows fixtures from first file
"""
testdir.makepyfile(test_a='''
import pytest
@pytest.fixture
def fix_a():
"""Fixture A"""
pass
def test_a(fix_a):
pass
''')
testdir.makepyfile(test_b='''
import pytest
@pytest.fixture
def fix_b():
"""Fixture B"""
pass
def test_b(fix_b):
pass
''')
result = testdir.runpytest("--fixtures")
result.stdout.fnmatch_lines("""
* fixtures defined from test_a *
fix_a
Fixture A
* fixtures defined from test_b *
fix_b
Fixture B
""")
class TestContextManagerFixtureFuncs:
def test_simple(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.yield_fixture
def arg1():
print ("setup")
yield 1
print ("teardown")
def test_1(arg1):
print ("test1 %s" % arg1)
def test_2(arg1):
print ("test2 %s" % arg1)
assert 0
""")
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines("""
*setup*
*test1 1*
*teardown*
*setup*
*test2 1*
*teardown*
""")
def test_scoped(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.yield_fixture(scope="module")
def arg1():
print ("setup")
yield 1
print ("teardown")
def test_1(arg1):
print ("test1 %s" % arg1)
def test_2(arg1):
print ("test2 %s" % arg1)
""")
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines("""
*setup*
*test1 1*
*test2 1*
*teardown*
""")
def test_setup_exception(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.yield_fixture(scope="module")
def arg1():
pytest.fail("setup")
yield 1
def test_1(arg1):
pass
""")
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines("""
*pytest.fail*setup*
*1 error*
""")
def test_teardown_exception(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.yield_fixture(scope="module")
def arg1():
yield 1
pytest.fail("teardown")
def test_1(arg1):
pass
""")
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines("""
*pytest.fail*teardown*
*1 passed*1 error*
""")
def test_yields_more_than_one(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.yield_fixture(scope="module")
def arg1():
yield 1
yield 2
def test_1(arg1):
pass
""")
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines("""
*fixture function*
*test_yields*:2*
""")
def test_no_yield(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.yield_fixture(scope="module")
def arg1():
return 1
def test_1(arg1):
pass
""")
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines("""
*yield_fixture*requires*yield*
*yield_fixture*
*def arg1*
""")
def test_yield_not_allowed_in_non_yield(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope="module")
def arg1():
yield 1
def test_1(arg1):
pass
""")
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines("""
*fixture*cannot use*yield*
*def arg1*
""")
| mit |
andrewgailey/robogen | robogen/rgkit/backup bots/kamikaze112213.py | 1 | 6184 | # kamikaze112213 by hephaestus
# http://robotgame.org/viewrobot/5830
import rg
import operator
class Robot:
def act(self, game):
adjacent_robots = self.get_adjacent_robots(game)
adjacent_friendlies = self.get_adjacent_robots(game, operator.__eq__)
adjacent_enemies = self.get_adjacent_robots(game, operator.__ne__)
all_enemies = self.get_all_robots(game, operator.__ne__)
# "The value of the key parameter should be a function that takes
# a single argument and returns a key to use for sorting purposes."
def query(bot_dict, sorting_function, offset=0):
organized = sorted(bot_dict.items(), key=sorting_function)
# returns a list of tuples, [(key, value),... ]
return organized
def get_weakest_enemy(offset=0):
return query(all_enemies, lambda t: t[1].hp)[offset][1]
def get_weakest_adjacent_enemy(offset=0):
return query(adjacent_enemies, lambda t: t[1].hp)[offset][1]
# first_enemy_location = get_first_enemy_location()
weakest_enemy = get_weakest_enemy()
target_enemy = weakest_enemy
if len(adjacent_enemies) > 0:
weakest_adjacent_enemy = get_weakest_adjacent_enemy()
target_enemy = weakest_adjacent_enemy
# move toward the center, if moving there would not put you in range of 2 robots
target_pos = rg.toward(self.location, weakest_enemy.location)
# figure out if any friendly robots would also want to move to our target
adjacent_to_target_friendlies = self.get_adjacent_robots_to(target_pos, game, operator.__eq__)
# if there are enemies around, attack them
# also consider suiciding when it will guarantee a kill, meaning enemy < 15 hp
suicide_threshold = 3 # 3 is better than 4 with 83% confidence, 7-42, 10-34 vs 3-43, 7-38
# 4 is [55, 30, 15] against 3
def has_suicide_priority():
adjacent_allies_to_target_enemy = self.get_adjacent_robots(game, operator.__eq__)
weakest_allies_next_to_adjacent_target_enemy = query(adjacent_allies_to_target_enemy, lambda t: t[1].hp)
return self.location == weakest_allies_next_to_adjacent_target_enemy[0][0]
if len(adjacent_enemies) > 0 and len(adjacent_enemies) < suicide_threshold:
# following line is better by 102-20-17 over just self.hp < 10
# inspired by peterm's stupid 2.6 bot
# assuming all adjacent enemies attacked me, if I would die
# i should instead suicide
if self.hp < (10*len(adjacent_enemies)):
return ['suicide']
# IDEA: if i could kill the enemy with 1 suicide instead of two attacks
# NOTE: if multiple allies are going for this target, i'll actually lose too many bots
# bad idea, 0-20 against self
# if weakest_adjacent_enemy.hp < 15 and weakest_adjacent_enemy.hp > 8 and has_suicide_priority():
# return ['suicide']
# if you could kill 2+ bots by suidiciding, do it
# should also avoid over-killing robots
return ['attack', weakest_adjacent_enemy.location]
elif len(adjacent_enemies) >= suicide_threshold:
return ['suicide']
#not using this priority method because it breaks on the server for some reason
def byroboidhas_priority(): # if i'm a newer bot, I have priority
for loc,bot in adjacent_to_target_friendlies.items():
their_target_pos = rg.toward(loc, weakest_enemy.location)
# check if bots would collide
if their_target_pos == target_pos:
if self.robot_id > bot.robot_id:
return False
return True
def has_priority(): # if i'm more bottom or more to the right, i'll take priority
for loc,bot in adjacent_to_target_friendlies.items():
their_target_pos = rg.toward(loc, weakest_enemy.location)
# check if bots would collide
if their_target_pos == target_pos:
if self.location[0] < loc[0] or self.location[1] < loc[1]:
#don't move then, do something else
return False
return True
if self.location != target_pos and has_priority():
if 'obstacle' not in rg.loc_types(target_pos):
adjacent_to_target_enemies = self.get_adjacent_robots_to(target_pos, game, operator.__ne__)
# if len(adjacent_to_target_enemies) <= 1 or len(adjacent_to_target_enemies) >= 3:
return ['move', target_pos]
#if we couldn't decide to do anything else, just guard
return self.guard()
def toward(curr, dest):
if curr == dest:
return curr
x0, y0 = curr
x, y = dest
x_diff, y_diff = x - x0, y - y0
if abs(x_diff) < abs(y_diff):
return (x0, y0 + y_diff / abs(y_diff))
elif abs(x_diff) == abs(y_diff):
# BROKEN FIX
return (0, 0)
else:
return (x0 + x_diff / abs(x_diff), y0)
def guard(self):
return ['guard']
def get_all_robots(self, game, player_comparator=None):
def generate():
for loc,bot in game.get('robots').items():
if player_comparator == None or player_comparator(self.player_id, bot.player_id):
yield (loc, bot)
return dict(generate())
def get_adjacent_robots_to(self, some_location, game, player_comparator=None):
def generate():
for loc,bot in game.get('robots').items():
if rg.wdist(loc, some_location) <= 1:
if player_comparator == None or player_comparator(self.player_id, bot.player_id):
yield (loc, bot)
return dict(generate())
def get_adjacent_robots(self, game, player_comparator=None):
return self.get_adjacent_robots_to(self.location, game, player_comparator)
| unlicense |
tjcsl/cslbot | cslbot/commands/issue.py | 1 | 4125 | # -*- coding: utf-8 -*-
# Copyright (C) 2013-2018 Samuel Damashek, Peter Foley, James Forcier, Srijay Kasturi, Reed Koser, Christopher Reffett, and Tris Wilson
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from random import choice
from requests import get
from ..helpers import arguments
from ..helpers.command import Command
from ..helpers.orm import Issues
from ..helpers.web import create_issue
@Command(['issue', 'bug'], ['source', 'db', 'config', 'type', 'is_admin', 'nick'])
def cmd(send, msg, args):
"""Files a github issue or gets a open one.
Syntax: {command} <title [--desc description]|--get <number>>
"""
repo = args['config']['api']['githubrepo']
apikey = args['config']['api']['githubapikey']
if not repo:
send("GitHub repository undefined in config.cfg!")
return
parser = arguments.ArgParser(args['config'])
parser.add_argument('title', nargs='*', default='')
parser.add_argument('--get', '--show', action='store_true')
parser.add_argument('--description', nargs='+', default="No description given.")
cmdargs, remainder = parser.parse_known_args(msg)
if isinstance(cmdargs.title, list):
cmdargs.title = ' '.join(cmdargs.title)
if isinstance(cmdargs.description, list):
cmdargs.description = ' '.join(cmdargs.description)
if remainder:
cmdargs.title = "%s %s" % (cmdargs.title, ' '.join(remainder))
if args['type'] == 'privmsg':
send('You want to let everybody know about your problems, right?')
elif cmdargs.get or cmdargs.title.isdigit():
issue = get('https://api.github.com/repos/%s/issues/%d' % (repo, int(cmdargs.title))).json()
if 'message' in issue:
send("Invalid Issue Number")
else:
send("%s (%s) -- %s" % (issue['title'], issue['state'], issue['html_url']))
elif not cmdargs.title:
issues = []
n = 1
while True:
headers = {'Authorization': 'token %s' % apikey}
page = get('https://api.github.com/repos/%s/issues' % repo, params={'per_page': '100', 'page': n}, headers=headers).json()
n += 1
if page:
issues += page
else:
break
if len(issues) == 0:
send("No open issues to choose from!")
else:
issue = choice(issues)
num_issues = len([x for x in issues if 'pull_request' not in x])
send("There are %d open issues, here's one." % num_issues)
send("#%d -- %s -- %s" % (issue['number'], issue['title'], issue['html_url']))
elif cmdargs.title and args['is_admin'](args['nick']):
url, success = create_issue(cmdargs.title, cmdargs.description, args['source'], repo, apikey)
if success:
send("Issue created -- %s -- %s -- %s" % (url, cmdargs.title, cmdargs.description))
else:
send("Error creating issue: %s" % url)
elif cmdargs.title:
row = Issues(title=cmdargs.title, description=cmdargs.description, source=str(args['source'])) # str needed to make mysqlconnector happy
args['db'].add(row)
args['db'].flush()
send("New Issue: #%d -- %s -- %s, Submitted by %s" % (row.id, cmdargs.title, cmdargs.description, args['nick']),
target=args['config']['core']['ctrlchan'])
send("Issue submitted for approval.", target=args['nick'])
else:
send("Invalid arguments.")
| gpl-2.0 |
wmvanvliet/mne-python | mne/io/tests/test_compensator.py | 8 | 4250 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.testing import assert_allclose
import pytest
from mne import Epochs, read_evokeds, pick_types
from mne.io.compensator import make_compensator, get_current_comp
from mne.io import read_raw_fif
from mne.utils import requires_mne, run_subprocess, run_tests_if_main
base_dir = op.join(op.dirname(__file__), 'data')
ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
def test_compensation_identity():
"""Test compensation identity."""
raw = read_raw_fif(ctf_comp_fname)
assert get_current_comp(raw.info) == 3
comp1 = make_compensator(raw.info, 3, 1, exclude_comp_chs=False)
assert comp1.shape == (340, 340)
comp2 = make_compensator(raw.info, 3, 1, exclude_comp_chs=True)
assert comp2.shape == (311, 340)
# round-trip
desired = np.eye(340)
for from_ in range(3):
for to in range(3):
if from_ == to:
continue
comp1 = make_compensator(raw.info, from_, to)
comp2 = make_compensator(raw.info, to, from_)
# To get 1e-12 here (instead of 1e-6) we must use the linalg.inv
# method mentioned in compensator.py
assert_allclose(np.dot(comp1, comp2), desired, atol=1e-12)
assert_allclose(np.dot(comp2, comp1), desired, atol=1e-12)
@pytest.mark.parametrize('preload', (True, False))
@pytest.mark.parametrize('pick', (False, True))
def test_compensation_apply(tmpdir, preload, pick):
"""Test applying compensation."""
# make sure that changing the comp doesn't modify the original data
raw = read_raw_fif(ctf_comp_fname, preload=preload)
assert raw._comp is None
raw2 = raw.copy()
raw2.apply_gradient_compensation(2)
if pick:
raw2.pick([0] + list(range(2, len(raw.ch_names))))
raw.pick([0] + list(range(2, len(raw.ch_names))))
assert get_current_comp(raw2.info) == 2
if preload:
assert raw2._comp is None
else:
assert raw2._comp.shape == (len(raw2.ch_names),) * 2
fname = op.join(tmpdir, 'ctf-raw.fif')
raw2.save(fname)
raw2 = read_raw_fif(fname)
assert raw2.compensation_grade == 2
raw2.apply_gradient_compensation(3)
assert raw2.compensation_grade == 3
data, _ = raw[:, :]
data2, _ = raw2[:, :]
# channels have norm ~1e-12
assert_allclose(data, data2, rtol=1e-9, atol=1e-18)
for ch1, ch2 in zip(raw.info['chs'], raw2.info['chs']):
assert ch1['coil_type'] == ch2['coil_type']
@requires_mne
def test_compensation_mne(tmpdir):
"""Test comensation by comparing with MNE."""
def make_evoked(fname, comp):
"""Make evoked data."""
raw = read_raw_fif(fname)
if comp is not None:
raw.apply_gradient_compensation(comp)
picks = pick_types(raw.info, meg=True, ref_meg=True)
events = np.array([[0, 0, 1]], dtype=np.int64)
evoked = Epochs(raw, events, 1, 0, 20e-3, picks=picks,
baseline=None).average()
return evoked
def compensate_mne(fname, comp):
"""Compensate using MNE-C."""
tmp_fname = '%s-%d-ave.fif' % (fname[:-4], comp)
cmd = ['mne_compensate_data', '--in', fname,
'--out', tmp_fname, '--grad', str(comp)]
run_subprocess(cmd)
return read_evokeds(tmp_fname)[0]
# save evoked response with default compensation
fname_default = op.join(tmpdir, 'ctf_default-ave.fif')
make_evoked(ctf_comp_fname, None).save(fname_default)
for comp in [0, 1, 2, 3]:
evoked_py = make_evoked(ctf_comp_fname, comp)
evoked_c = compensate_mne(fname_default, comp)
picks_py = pick_types(evoked_py.info, meg=True, ref_meg=True)
picks_c = pick_types(evoked_c.info, meg=True, ref_meg=True)
assert_allclose(evoked_py.data[picks_py], evoked_c.data[picks_c],
rtol=1e-3, atol=1e-17)
chs_py = [evoked_py.info['chs'][ii] for ii in picks_py]
chs_c = [evoked_c.info['chs'][ii] for ii in picks_c]
for ch_py, ch_c in zip(chs_py, chs_c):
assert ch_py['coil_type'] == ch_c['coil_type']
run_tests_if_main()
| bsd-3-clause |
weolar/miniblink49 | third_party/skia/tools/skp/page_sets/skia_gamedeksiam_nexus10.py | 32 | 1251 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry import story
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
class SkiaBuildbotDesktopPage(page_module.Page):
def __init__(self, url, page_set):
super(SkiaBuildbotDesktopPage, self).__init__(
url=url,
page_set=page_set,
credentials_path='data/credentials.json',
shared_page_state_class=shared_page_state.Shared10InchTabletPageState)
self.archive_data_file = 'data/skia_gamedeksiam_nexus10.json'
def RunNavigateSteps(self, action_runner):
action_runner.Navigate(self.url)
action_runner.Wait(5)
class SkiaGamedeksiamNexus10PageSet(story.StorySet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self):
super(SkiaGamedeksiamNexus10PageSet, self).__init__(
archive_data_file='data/skia_gamedeksiam_nexus10.json')
urls_list = [
# Why: from Tom W's list.
'http://game.deksiam.in.th/',
]
for url in urls_list:
self.AddStory(SkiaBuildbotDesktopPage(url, self))
| apache-2.0 |
srimai/odoo | addons/portal_gamification/__openerp__.py | 381 | 1571 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Gamification',
'version': '1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds security rules for gamification to allow portal users to participate to challenges
===================================================================================================
""",
'author': 'OpenERP SA',
'depends': ['gamification','portal'],
'data': [
'security/ir.model.access.csv',
'security/portal_security.xml',
],
'installable': True,
'auto_install': True,
'category': 'Hidden',
}
| agpl-3.0 |
black-knight/EtikTak | etiktak/model/stores/admin.py | 1 | 1966 | # Copyright (c) 2012, Daniel Andersen (dani_ande@yahoo.dk)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from etiktak.model.stores import models as stores
from django.contrib import admin
from django_google_maps import widgets as map_widgets
from django_google_maps import fields as map_fields
class StoreLocationAdmin(admin.ModelAdmin):
formfield_overrides = {
map_fields.AddressField: {'widget': map_widgets.GoogleMapsAddressWidget},
}
admin.site.register(stores.Store)
admin.site.register(stores.StoreInstance, StoreLocationAdmin)
| bsd-3-clause |
lagooned/emacs | elpa/treemacs-20200901.1550/treemacs-single-file-git-status.py | 3 | 2696 | from subprocess import Popen, PIPE, DEVNULL
import sys
# There are 3+ command line arguments:
# 1) the file to update
# 2) the file's previous state, to check if things changed at all
# 3) the file's parents that need to be updated as well
FILE = sys.argv[1]
OLD_STATE = sys.argv[2]
PARENTS = [p for p in sys.argv[3:]]
FILE_STATE_CMD = "git status --porcelain --ignored "
IS_IGNORED_CMD = "git check-ignore "
IS_TRACKED_CMD = "git ls-files --error-unmatch "
IS_CHANGED_CMD = "git diff-index --quiet HEAD "
def main():
new_state = determine_file_git_state()
# nothing to do
if OLD_STATE == new_state:
sys.exit(2)
proc_list = []
# for every parent file start all necessary git processes immediately
# even if we don't need them later
for p in PARENTS:
add_git_processes(proc_list, p)
result_list = [(FILE, new_state)]
# iterate through the parents and propagate ignored and untracked states downwards
# the following states are possible for *directories*:
# 0 -> clean
# ! -> ignored
# ? -> untracked
# M -> modified
i = 0
l = len(proc_list)
propagate_state = None
while i < l:
path, ignore_proc, tracked_proc, changed_proc = proc_list[i]
if ignore_proc.wait() == 0:
propagate_state = "!"
result_list.append((path, propagate_state))
break
elif tracked_proc.wait() == 1:
propagate_state = "?"
result_list.append((path, propagate_state))
break
elif changed_proc.wait() == 1:
result_list.append((path, "M"))
else:
result_list.append((path, "0"))
i += 1
if propagate_state:
i += 1
while i < l:
result_list.append((proc_list[i][0], propagate_state))
i += 1
elisp_conses = "".join(['("{}" . "{}")'.format(path, state) for path,state in result_list])
elisp_alist = "({})".format(elisp_conses)
print(elisp_alist)
def add_git_processes(status_listings, path):
ignored_proc = Popen(IS_IGNORED_CMD + path, shell=True, stdout=DEVNULL, stderr=DEVNULL)
tracked_proc = Popen(IS_TRACKED_CMD + path, shell=True, stdout=DEVNULL, stderr=DEVNULL)
changed_proc = Popen(IS_CHANGED_CMD + path, shell=True, stdout=DEVNULL, stderr=DEVNULL)
status_listings.append((path, ignored_proc, tracked_proc, changed_proc))
def determine_file_git_state():
proc = Popen(FILE_STATE_CMD + FILE, shell=True, stdout=PIPE, stderr=DEVNULL)
line = proc.stdout.readline()
if line:
state = line.lstrip().split(b" ")[0]
return state.decode('utf-8').strip()[0]
else:
return "0"
main()
| apache-2.0 |
ohsu-computational-biology/euler | services/api/manifest.py | 1 | 1651 | #!/usr/bin/env python
"""
Formulate manifest for exacloud
"""
from flask import send_file, abort
from json import dumps
import urllib
import requests
import StringIO
from flask import current_app as app
def create(params, PROXY_TARGET, _check_projects):
""" create an exacloud manifest """
# if 'filters' not in params:
# abort(400, {'message': 'no filters on manifest request'})
# call the files api to get details
files_params = {}
filters = params['filters']
files_params['filters'] = dumps(filters)
query = urllib.urlencode(files_params)
url = "{}{}?{}".format(PROXY_TARGET,
'/api/v1/repository/files',
query)
# call the remote
files_response = requests.get(url)
files = files_response.json()
if 'hits' not in files or len(files['hits']) == 0:
abort(400, {'message': 'no files found on manifest request'})
# collect info about the file
project_codes = []
paths = []
for hit in files['hits']:
for donor in hit['donors']:
project_codes.append(donor['projectCode'])
for fileCopy in hit['fileCopies']:
# if fileCopy['repoCode'] == 'exacloud':
paths.append(fileCopy['fileName'])
# ensure project authorization
_check_projects(set(project_codes))
template = app.jinja_env.get_template('exacloud_manifest.txt')
strIO = StringIO.StringIO()
strIO.write(str(template.render(paths=set(paths))))
strIO.seek(0)
return send_file(strIO,
attachment_filename="exacloud_manifest.sh",
as_attachment=True)
| mit |
agconti/Shopify-Django | venv/lib/python2.7/site-packages/django/contrib/gis/tests/geoapp/feeds.py | 308 | 1841 | from __future__ import absolute_import
from django.contrib.gis import feeds
from .models import City
class TestGeoRSS1(feeds.Feed):
link = '/city/'
title = 'Test GeoDjango Cities'
def items(self):
return City.objects.all()
def item_link(self, item):
return '/city/%s/' % item.pk
def item_geometry(self, item):
return item.point
class TestGeoRSS2(TestGeoRSS1):
def geometry(self, obj):
# This should attach a <georss:box> element for the extent of
# of the cities in the database. This tuple came from
# calling `City.objects.extent()` -- we can't do that call here
# because `extent` is not implemented for MySQL/Oracle.
return (-123.30, -41.32, 174.78, 48.46)
def item_geometry(self, item):
# Returning a simple tuple for the geometry.
return item.point.x, item.point.y
class TestGeoAtom1(TestGeoRSS1):
feed_type = feeds.GeoAtom1Feed
class TestGeoAtom2(TestGeoRSS2):
feed_type = feeds.GeoAtom1Feed
def geometry(self, obj):
# This time we'll use a 2-tuple of coordinates for the box.
return ((-123.30, -41.32), (174.78, 48.46))
class TestW3CGeo1(TestGeoRSS1):
feed_type = feeds.W3CGeoFeed
# The following feeds are invalid, and will raise exceptions.
class TestW3CGeo2(TestGeoRSS2):
feed_type = feeds.W3CGeoFeed
class TestW3CGeo3(TestGeoRSS1):
feed_type = feeds.W3CGeoFeed
def item_geometry(self, item):
from django.contrib.gis.geos import Polygon
return Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
# The feed dictionary to use for URLs.
feed_dict = {
'rss1' : TestGeoRSS1,
'rss2' : TestGeoRSS2,
'atom1' : TestGeoAtom1,
'atom2' : TestGeoAtom2,
'w3cgeo1' : TestW3CGeo1,
'w3cgeo2' : TestW3CGeo2,
'w3cgeo3' : TestW3CGeo3,
}
| mit |
sloanyang/depends | third_party/logilab/astng/scoped_nodes.py | 14 | 34565 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
# copyright 2003-2010 Sylvain Thenault, all rights reserved.
# contact mailto:thenault@gmail.com
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""This module contains the classes for "scoped" node, i.e. which are opening a
new local scope in the language definition : Module, Class, Function (and
Lambda, GenExpr, DictComp and SetComp to some extent).
"""
from __future__ import with_statement
__doctype__ = "restructuredtext en"
import sys
from itertools import chain
from logilab.common.compat import builtins
from logilab.common.decorators import cached
from logilab.astng import BUILTINS_MODULE
from logilab.astng.exceptions import NotFoundError, NoDefault, \
ASTNGBuildingException, InferenceError
from logilab.astng.node_classes import Const, DelName, DelAttr, \
Dict, From, List, Name, Pass, Raise, Return, Tuple, Yield, \
are_exclusive, LookupMixIn, const_factory as cf, unpack_infer
from logilab.astng.bases import NodeNG, InferenceContext, Instance,\
YES, Generator, UnboundMethod, BoundMethod, _infer_stmts, copy_context, \
BUILTINS_NAME
from logilab.astng.mixins import FilterStmtsMixin
from logilab.astng.bases import Statement
from logilab.astng.manager import ASTNGManager
def remove_nodes(func, cls):
def wrapper(*args, **kwargs):
nodes = [n for n in func(*args, **kwargs) if not isinstance(n, cls)]
if not nodes:
raise NotFoundError()
return nodes
return wrapper
def function_to_method(n, klass):
if isinstance(n, Function):
if n.type == 'classmethod':
return BoundMethod(n, klass)
if n.type != 'staticmethod':
return UnboundMethod(n)
return n
def std_special_attributes(self, name, add_locals=True):
if add_locals:
locals = self.locals
else:
locals = {}
if name == '__name__':
return [cf(self.name)] + locals.get(name, [])
if name == '__doc__':
return [cf(self.doc)] + locals.get(name, [])
if name == '__dict__':
return [Dict()] + locals.get(name, [])
raise NotFoundError(name)
MANAGER = ASTNGManager()
def builtin_lookup(name):
"""lookup a name into the builtin module
return the list of matching statements and the astng for the builtin
module
"""
builtin_astng = MANAGER.astng_from_module(builtins)
if name == '__dict__':
return builtin_astng, ()
try:
stmts = builtin_astng.locals[name]
except KeyError:
stmts = ()
return builtin_astng, stmts
# TODO move this Mixin to mixins.py; problem: 'Function' in _scope_lookup
class LocalsDictNodeNG(LookupMixIn, NodeNG):
""" this class provides locals handling common to Module, Function
and Class nodes, including a dict like interface for direct access
to locals information
"""
# attributes below are set by the builder module or by raw factories
# dictionary of locals with name as key and node defining the local as
# value
def qname(self):
"""return the 'qualified' name of the node, eg module.name,
module.class.name ...
"""
if self.parent is None:
return self.name
return '%s.%s' % (self.parent.frame().qname(), self.name)
def frame(self):
"""return the first parent frame node (i.e. Module, Function or Class)
"""
return self
def scope(self):
"""return the first node defining a new scope (i.e. Module,
Function, Class, Lambda but also GenExpr, DictComp and SetComp)
"""
return self
def _scope_lookup(self, node, name, offset=0):
"""XXX method for interfacing the scope lookup"""
try:
stmts = node._filter_stmts(self.locals[name], self, offset)
except KeyError:
stmts = ()
if stmts:
return self, stmts
if self.parent: # i.e. not Module
# nested scope: if parent scope is a function, that's fine
# else jump to the module
pscope = self.parent.scope()
if not pscope.is_function:
pscope = pscope.root()
return pscope.scope_lookup(node, name)
return builtin_lookup(name) # Module
def set_local(self, name, stmt):
"""define <name> in locals (<stmt> is the node defining the name)
if the node is a Module node (i.e. has globals), add the name to
globals
if the name is already defined, ignore it
"""
#assert not stmt in self.locals.get(name, ()), (self, stmt)
self.locals.setdefault(name, []).append(stmt)
__setitem__ = set_local
def _append_node(self, child):
"""append a child, linking it in the tree"""
self.body.append(child)
child.parent = self
def add_local_node(self, child_node, name=None):
"""append a child which should alter locals to the given node"""
if name != '__class__':
# add __class__ node as a child will cause infinite recursion later!
self._append_node(child_node)
self.set_local(name or child_node.name, child_node)
def __getitem__(self, item):
"""method from the `dict` interface returning the first node
associated with the given name in the locals dictionary
:type item: str
:param item: the name of the locally defined object
:raises KeyError: if the name is not defined
"""
return self.locals[item][0]
def __iter__(self):
"""method from the `dict` interface returning an iterator on
`self.keys()`
"""
return iter(self.keys())
def keys(self):
"""method from the `dict` interface returning a tuple containing
locally defined names
"""
return self.locals.keys()
def values(self):
"""method from the `dict` interface returning a tuple containing
locally defined nodes which are instance of `Function` or `Class`
"""
return [self[key] for key in self.keys()]
def items(self):
"""method from the `dict` interface returning a list of tuple
containing each locally defined name with its associated node,
which is an instance of `Function` or `Class`
"""
return zip(self.keys(), self.values())
def __contains__(self, name):
return name in self.locals
has_key = __contains__
# Module #####################################################################
class Module(LocalsDictNodeNG):
_astng_fields = ('body',)
fromlineno = 0
lineno = 0
# attributes below are set by the builder module or by raw factories
# the file from which as been extracted the astng representation. It may
# be None if the representation has been built from a built-in module
file = None
# the module name
name = None
# boolean for astng built from source (i.e. ast)
pure_python = None
# boolean for package module
package = None
# dictionary of globals with name as key and node defining the global
# as value
globals = None
# names of python special attributes (handled by getattr impl.)
special_attributes = set(('__name__', '__doc__', '__file__', '__path__',
'__dict__'))
# names of module attributes available through the global scope
scope_attrs = set(('__name__', '__doc__', '__file__', '__path__'))
def __init__(self, name, doc, pure_python=True):
self.name = name
self.doc = doc
self.pure_python = pure_python
self.locals = self.globals = {}
self.body = []
@property
def file_stream(self):
if self.file is not None:
return file(self.file)
return None
def block_range(self, lineno):
"""return block line numbers.
start from the beginning whatever the given lineno
"""
return self.fromlineno, self.tolineno
def scope_lookup(self, node, name, offset=0):
if name in self.scope_attrs and not name in self.locals:
try:
return self, self.getattr(name)
except NotFoundError:
return self, ()
return self._scope_lookup(node, name, offset)
def pytype(self):
return '%s.module' % BUILTINS_MODULE
def display_type(self):
return 'Module'
def getattr(self, name, context=None, ignore_locals=False):
if name in self.special_attributes:
if name == '__file__':
return [cf(self.file)] + self.locals.get(name, [])
if name == '__path__' and self.package:
return [List()] + self.locals.get(name, [])
return std_special_attributes(self, name)
if not ignore_locals and name in self.locals:
return self.locals[name]
if self.package:
try:
return [self.import_module(name, relative_only=True)]
except ASTNGBuildingException:
raise NotFoundError(name)
except Exception:# XXX pylint tests never pass here; do we need it?
import traceback
traceback.print_exc()
raise NotFoundError(name)
getattr = remove_nodes(getattr, DelName)
def igetattr(self, name, context=None):
"""inferred getattr"""
# set lookup name since this is necessary to infer on import nodes for
# instance
context = copy_context(context)
context.lookupname = name
try:
return _infer_stmts(self.getattr(name, context), context, frame=self)
except NotFoundError:
raise InferenceError(name)
def fully_defined(self):
"""return True if this module has been built from a .py file
and so contains a complete representation including the code
"""
return self.file is not None and self.file.endswith('.py')
def statement(self):
"""return the first parent node marked as statement node
consider a module as a statement...
"""
return self
def previous_sibling(self):
"""module has no sibling"""
return
def next_sibling(self):
"""module has no sibling"""
return
if sys.version_info < (2, 8):
def absolute_import_activated(self):
for stmt in self.locals.get('absolute_import', ()):
if isinstance(stmt, From) and stmt.modname == '__future__':
return True
return False
else:
absolute_import_activated = lambda self: True
def import_module(self, modname, relative_only=False, level=None):
"""import the given module considering self as context"""
if relative_only and level is None:
level = 0
absmodname = self.relative_to_absolute_name(modname, level)
try:
return MANAGER.astng_from_module_name(absmodname)
except ASTNGBuildingException:
# we only want to import a sub module or package of this module,
# skip here
if relative_only:
raise
return MANAGER.astng_from_module_name(modname)
def relative_to_absolute_name(self, modname, level):
"""return the absolute module name for a relative import.
The relative import can be implicit or explicit.
"""
# XXX this returns non sens when called on an absolute import
# like 'pylint.checkers.logilab.astng.utils'
# XXX doesn't return absolute name if self.name isn't absolute name
if self.absolute_import_activated() and level is None:
return modname
if level:
if self.package:
level = level - 1
package_name = self.name.rsplit('.', level)[0]
elif self.package:
package_name = self.name
else:
package_name = self.name.rsplit('.', 1)[0]
if package_name:
if not modname:
return package_name
return '%s.%s' % (package_name, modname)
return modname
def wildcard_import_names(self):
"""return the list of imported names when this module is 'wildcard
imported'
It doesn't include the '__builtins__' name which is added by the
current CPython implementation of wildcard imports.
"""
# take advantage of a living module if it exists
try:
living = sys.modules[self.name]
except KeyError:
pass
else:
try:
return living.__all__
except AttributeError:
return [name for name in living.__dict__.keys()
if not name.startswith('_')]
# else lookup the astng
#
# We separate the different steps of lookup in try/excepts
# to avoid catching too many Exceptions
# However, we can not analyse dynamically constructed __all__
try:
all = self['__all__']
except KeyError:
return [name for name in self.keys() if not name.startswith('_')]
try:
explicit = all.assigned_stmts().next()
except InferenceError:
return [name for name in self.keys() if not name.startswith('_')]
except AttributeError:
# not an assignment node
# XXX infer?
return [name for name in self.keys() if not name.startswith('_')]
try:
# should be a Tuple/List of constant string / 1 string not allowed
return [const.value for const in explicit.elts]
except AttributeError:
return [name for name in self.keys() if not name.startswith('_')]
class ComprehensionScope(LocalsDictNodeNG):
def frame(self):
return self.parent.frame()
scope_lookup = LocalsDictNodeNG._scope_lookup
class GenExpr(ComprehensionScope):
_astng_fields = ('elt', 'generators')
def __init__(self):
self.locals = {}
self.elt = None
self.generators = []
class DictComp(ComprehensionScope):
_astng_fields = ('key', 'value', 'generators')
def __init__(self):
self.locals = {}
self.key = None
self.value = None
self.generators = []
class SetComp(ComprehensionScope):
_astng_fields = ('elt', 'generators')
def __init__(self):
self.locals = {}
self.elt = None
self.generators = []
class _ListComp(NodeNG):
"""class representing a ListComp node"""
_astng_fields = ('elt', 'generators')
elt = None
generators = None
if sys.version_info >= (3, 0):
class ListComp(_ListComp, ComprehensionScope):
"""class representing a ListComp node"""
def __init__(self):
self.locals = {}
else:
class ListComp(_ListComp):
"""class representing a ListComp node"""
# Function ###################################################################
class Lambda(LocalsDictNodeNG, FilterStmtsMixin):
_astng_fields = ('args', 'body',)
name = '<lambda>'
# function's type, 'function' | 'method' | 'staticmethod' | 'classmethod'
type = 'function'
def __init__(self):
self.locals = {}
self.args = []
self.body = []
def pytype(self):
if 'method' in self.type:
return '%s.instancemethod' % BUILTINS_MODULE
return '%s.function' % BUILTINS_MODULE
def display_type(self):
if 'method' in self.type:
return 'Method'
return 'Function'
def callable(self):
return True
def argnames(self):
"""return a list of argument names"""
if self.args.args: # maybe None with builtin functions
names = _rec_get_names(self.args.args)
else:
names = []
if self.args.vararg:
names.append(self.args.vararg)
if self.args.kwarg:
names.append(self.args.kwarg)
return names
def infer_call_result(self, caller, context=None):
"""infer what a function is returning when called"""
return self.body.infer(context)
def scope_lookup(self, node, name, offset=0):
if node in self.args.defaults:
frame = self.parent.frame()
# line offset to avoid that def func(f=func) resolve the default
# value to the defined function
offset = -1
else:
# check this is not used in function decorators
frame = self
return frame._scope_lookup(node, name, offset)
class Function(Statement, Lambda):
_astng_fields = ('decorators', 'args', 'body')
special_attributes = set(('__name__', '__doc__', '__dict__'))
is_function = True
# attributes below are set by the builder module or by raw factories
blockstart_tolineno = None
decorators = None
def __init__(self, name, doc):
self.locals = {}
self.args = []
self.body = []
self.decorators = None
self.name = name
self.doc = doc
self.extra_decorators = []
self.instance_attrs = {}
def set_line_info(self, lastchild):
self.fromlineno = self.lineno
# lineno is the line number of the first decorator, we want the def statement lineno
if self.decorators is not None:
self.fromlineno += len(self.decorators.nodes)
self.tolineno = lastchild.tolineno
self.blockstart_tolineno = self.args.tolineno
def block_range(self, lineno):
"""return block line numbers.
start from the "def" position whatever the given lineno
"""
return self.fromlineno, self.tolineno
def getattr(self, name, context=None):
"""this method doesn't look in the instance_attrs dictionary since it's
done by an Instance proxy at inference time.
"""
if name == '__module__':
return [cf(self.root().qname())]
if name in self.instance_attrs:
return self.instance_attrs[name]
return std_special_attributes(self, name, False)
def is_method(self):
"""return true if the function node should be considered as a method"""
# check we are defined in a Class, because this is usually expected
# (e.g. pylint...) when is_method() return True
return self.type != 'function' and isinstance(self.parent.frame(), Class)
def decoratornames(self):
"""return a list of decorator qualified names"""
result = set()
decoratornodes = []
if self.decorators is not None:
decoratornodes += self.decorators.nodes
decoratornodes += self.extra_decorators
for decnode in decoratornodes:
for infnode in decnode.infer():
result.add(infnode.qname())
return result
decoratornames = cached(decoratornames)
def is_bound(self):
"""return true if the function is bound to an Instance or a class"""
return self.type == 'classmethod'
def is_abstract(self, pass_is_abstract=True):
"""return true if the method is abstract
It's considered as abstract if the only statement is a raise of
NotImplementError, or, if pass_is_abstract, a pass statement
"""
for child_node in self.body:
if isinstance(child_node, Raise):
if child_node.raises_not_implemented():
return True
if pass_is_abstract and isinstance(child_node, Pass):
return True
return False
# empty function is the same as function with a single "pass" statement
if pass_is_abstract:
return True
def is_generator(self):
"""return true if this is a generator function"""
# XXX should be flagged, not computed
try:
return self.nodes_of_class(Yield, skip_klass=Function).next()
except StopIteration:
return False
def infer_call_result(self, caller, context=None):
"""infer what a function is returning when called"""
if self.is_generator():
yield Generator(self)
return
returns = self.nodes_of_class(Return, skip_klass=Function)
for returnnode in returns:
if returnnode.value is None:
yield Const(None)
else:
try:
for infered in returnnode.value.infer(context):
yield infered
except InferenceError:
yield YES
def _rec_get_names(args, names=None):
"""return a list of all argument names"""
if names is None:
names = []
for arg in args:
if isinstance(arg, Tuple):
_rec_get_names(arg.elts, names)
else:
names.append(arg.name)
return names
# Class ######################################################################
def _class_type(klass, ancestors=None):
"""return a Class node type to differ metaclass, interface and exception
from 'regular' classes
"""
# XXX we have to store ancestors in case we have a ancestor loop
if klass._type is not None:
return klass._type
if klass.name == 'type':
klass._type = 'metaclass'
elif klass.name.endswith('Interface'):
klass._type = 'interface'
elif klass.name.endswith('Exception'):
klass._type = 'exception'
else:
if ancestors is None:
ancestors = set()
if klass in ancestors:
# XXX we are in loop ancestors, and have found no type
klass._type = 'class'
return 'class'
ancestors.add(klass)
# print >> sys.stderr, '_class_type', repr(klass)
for base in klass.ancestors(recurs=False):
if _class_type(base, ancestors) != 'class':
klass._type = base.type
break
if klass._type is None:
klass._type = 'class'
return klass._type
def _iface_hdlr(iface_node):
"""a handler function used by interfaces to handle suspicious
interface nodes
"""
return True
class Class(Statement, LocalsDictNodeNG, FilterStmtsMixin):
# some of the attributes below are set by the builder module or
# by a raw factories
# a dictionary of class instances attributes
_astng_fields = ('decorators', 'bases', 'body') # name
decorators = None
special_attributes = set(('__name__', '__doc__', '__dict__', '__module__',
'__bases__', '__mro__', '__subclasses__'))
blockstart_tolineno = None
_type = None
type = property(_class_type,
doc="class'type, possible values are 'class' | "
"'metaclass' | 'interface' | 'exception'")
def __init__(self, name, doc):
self.instance_attrs = {}
self.locals = {}
self.bases = []
self.body = []
self.name = name
self.doc = doc
def _newstyle_impl(self, context=None):
if context is None:
context = InferenceContext()
if self._newstyle is not None:
return self._newstyle
for base in self.ancestors(recurs=False, context=context):
if base._newstyle_impl(context):
self._newstyle = True
break
if self._newstyle is None:
self._newstyle = False
return self._newstyle
_newstyle = None
newstyle = property(_newstyle_impl,
doc="boolean indicating if it's a new style class"
"or not")
def set_line_info(self, lastchild):
self.fromlineno = self.lineno
self.blockstart_tolineno = self.bases and self.bases[-1].tolineno or self.fromlineno
if lastchild is not None:
self.tolineno = lastchild.tolineno
# else this is a class with only a docstring, then tolineno is (should be) already ok
def block_range(self, lineno):
"""return block line numbers.
start from the "class" position whatever the given lineno
"""
return self.fromlineno, self.tolineno
def pytype(self):
if self.newstyle:
return '%s.type' % BUILTINS_MODULE
return '%s.classobj' % BUILTINS_MODULE
def display_type(self):
return 'Class'
def callable(self):
return True
def infer_call_result(self, caller, context=None):
"""infer what a class is returning when called"""
yield Instance(self)
def scope_lookup(self, node, name, offset=0):
if node in self.bases:
frame = self.parent.frame()
# line offset to avoid that class A(A) resolve the ancestor to
# the defined class
offset = -1
else:
frame = self
return frame._scope_lookup(node, name, offset)
# list of parent class as a list of string (i.e. names as they appear
# in the class definition) XXX bw compat
def basenames(self):
return [bnode.as_string() for bnode in self.bases]
basenames = property(basenames)
def ancestors(self, recurs=True, context=None):
"""return an iterator on the node base classes in a prefixed
depth first order
:param recurs:
boolean indicating if it should recurse or return direct
ancestors only
"""
# FIXME: should be possible to choose the resolution order
# XXX inference make infinite loops possible here (see BaseTransformer
# manipulation in the builder module for instance)
yielded = set([self])
if context is None:
context = InferenceContext()
for stmt in self.bases:
with context.restore_path():
try:
for baseobj in stmt.infer(context):
if not isinstance(baseobj, Class):
# duh ?
continue
if baseobj in yielded:
continue # cf xxx above
yielded.add(baseobj)
yield baseobj
if recurs:
for grandpa in baseobj.ancestors(True, context):
if grandpa in yielded:
continue # cf xxx above
yielded.add(grandpa)
yield grandpa
except InferenceError:
# XXX log error ?
continue
def local_attr_ancestors(self, name, context=None):
"""return an iterator on astng representation of parent classes
which have <name> defined in their locals
"""
for astng in self.ancestors(context=context):
if name in astng:
yield astng
def instance_attr_ancestors(self, name, context=None):
"""return an iterator on astng representation of parent classes
which have <name> defined in their instance attribute dictionary
"""
for astng in self.ancestors(context=context):
if name in astng.instance_attrs:
yield astng
def has_base(self, node):
return node in self.bases
def local_attr(self, name, context=None):
"""return the list of assign node associated to name in this class
locals or in its parents
:raises `NotFoundError`:
if no attribute with this name has been find in this class or
its parent classes
"""
try:
return self.locals[name]
except KeyError:
# get if from the first parent implementing it if any
for class_node in self.local_attr_ancestors(name, context):
return class_node.locals[name]
raise NotFoundError(name)
local_attr = remove_nodes(local_attr, DelAttr)
def instance_attr(self, name, context=None):
"""return the astng nodes associated to name in this class instance
attributes dictionary and in its parents
:raises `NotFoundError`:
if no attribute with this name has been find in this class or
its parent classes
"""
values = self.instance_attrs.get(name, [])
# get all values from parents
for class_node in self.instance_attr_ancestors(name, context):
values += class_node.instance_attrs[name]
if not values:
raise NotFoundError(name)
return values
instance_attr = remove_nodes(instance_attr, DelAttr)
def instanciate_class(self):
"""return Instance of Class node, else return self"""
return Instance(self)
def getattr(self, name, context=None):
"""this method doesn't look in the instance_attrs dictionary since it's
done by an Instance proxy at inference time.
It may return a YES object if the attribute has not been actually
found but a __getattr__ or __getattribute__ method is defined
"""
values = self.locals.get(name, [])
if name in self.special_attributes:
if name == '__module__':
return [cf(self.root().qname())] + values
# FIXME : what is expected by passing the list of ancestors to cf:
# you can just do [cf(tuple())] + values without breaking any test
# this is ticket http://www.logilab.org/ticket/52785
if name == '__bases__':
return [cf(tuple(self.ancestors(recurs=False, context=context)))] + values
# XXX need proper meta class handling + MRO implementation
if name == '__mro__' and self.newstyle:
# XXX mro is read-only but that's not our job to detect that
return [cf(tuple(self.ancestors(recurs=True, context=context)))] + values
return std_special_attributes(self, name)
# don't modify the list in self.locals!
values = list(values)
for classnode in self.ancestors(recurs=True, context=context):
values += classnode.locals.get(name, [])
if not values:
raise NotFoundError(name)
return values
def igetattr(self, name, context=None):
"""inferred getattr, need special treatment in class to handle
descriptors
"""
# set lookup name since this is necessary to infer on import nodes for
# instance
context = copy_context(context)
context.lookupname = name
try:
for infered in _infer_stmts(self.getattr(name, context), context,
frame=self):
# yield YES object instead of descriptors when necessary
if not isinstance(infered, Const) and isinstance(infered, Instance):
try:
infered._proxied.getattr('__get__', context)
except NotFoundError:
yield infered
else:
yield YES
else:
yield function_to_method(infered, self)
except NotFoundError:
if not name.startswith('__') and self.has_dynamic_getattr(context):
# class handle some dynamic attributes, return a YES object
yield YES
else:
raise InferenceError(name)
def has_dynamic_getattr(self, context=None):
"""return True if the class has a custom __getattr__ or
__getattribute__ method
"""
# need to explicitly handle optparse.Values (setattr is not detected)
if self.name == 'Values' and self.root().name == 'optparse':
return True
try:
self.getattr('__getattr__', context)
return True
except NotFoundError:
#if self.newstyle: XXX cause an infinite recursion error
try:
getattribute = self.getattr('__getattribute__', context)[0]
if getattribute.root().name != BUILTINS_NAME:
# class has a custom __getattribute__ defined
return True
except NotFoundError:
pass
return False
def methods(self):
"""return an iterator on all methods defined in the class and
its ancestors
"""
done = {}
for astng in chain(iter((self,)), self.ancestors()):
for meth in astng.mymethods():
if meth.name in done:
continue
done[meth.name] = None
yield meth
def mymethods(self):
"""return an iterator on all methods defined in the class"""
for member in self.values():
if isinstance(member, Function):
yield member
def interfaces(self, herited=True, handler_func=_iface_hdlr):
"""return an iterator on interfaces implemented by the given
class node
"""
# FIXME: what if __implements__ = (MyIFace, MyParent.__implements__)...
try:
implements = Instance(self).getattr('__implements__')[0]
except NotFoundError:
return
if not herited and not implements.frame() is self:
return
found = set()
missing = False
for iface in unpack_infer(implements):
if iface is YES:
missing = True
continue
if not iface in found and handler_func(iface):
found.add(iface)
yield iface
if missing:
raise InferenceError()
| gpl-2.0 |
DIRACGrid/DIRAC | src/DIRAC/DataManagementSystem/scripts/dirac_admin_user_quota.py | 2 | 1472 | #!/usr/bin/env python
"""
Show storage quotas for specified users or for all registered users if nobody is specified
Usage:
dirac-admin-user-quota [user1 ...]
Example:
$ dirac-admin-user-quota
------------------------------
Username | Quota (GB)
------------------------------
atsareg | None
msapunov | None
vhamar | None
------------------------------
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
Script.parseCommandLine()
users = Script.getPositionalArgs()
from DIRAC import gLogger, gConfig
if not users:
res = gConfig.getSections('/Registry/Users')
if not res['OK']:
gLogger.error("Failed to retrieve user list from CS", res['Message'])
DIRAC.exit(2)
users = res['Value']
gLogger.notice("-" * 30)
gLogger.notice("%s|%s" % ('Username'.ljust(15), 'Quota (GB)'.rjust(15)))
gLogger.notice("-" * 30)
for user in sorted(users):
quota = gConfig.getValue('/Registry/Users/%s/Quota' % user, 0)
if not quota:
quota = gConfig.getValue('/Registry/DefaultStorageQuota')
gLogger.notice("%s|%s" % (user.ljust(15), str(quota).rjust(15)))
gLogger.notice("-" * 30)
DIRAC.exit(0)
if __name__ == "__main__":
main()
| gpl-3.0 |
LazoCoder/Pokemon-Terminal | tests/test_database.py | 1 | 4401 | #!/usr/bin/env python3
# To run the tests, use: python3 -m pytest --capture=sys
from pokemonterminal.database import Database
from tests.test_utils import region_dict, get_region, make_extra_counts, MAX_ID
def test_first_database():
print('{} items in first database.'.format(len(Database())))
def test_second_database():
print('{} items in second database.'.format(len(Database())))
def test_len():
db = Database()
assert len(db) == MAX_ID + len(db.get_extra())
assert len(db.get_all()) == MAX_ID + len(db.get_extra())
def test_extra_counts():
assert len(Database()) == MAX_ID + sum(make_extra_counts().values())
def test_get_extras():
db = Database()
assert db.get_extra(), 'db.get_extra() returns no pokemon'
assert len(db.get_extra()) == sum(make_extra_counts().values())
def region_length_test(region_name):
db = Database()
# test db.get_region()
pokemon = get_region(db, region_name)
assert pokemon, 'No pokemon found in region: ' + region_name
# test that region_name is in region_dict
region_info = region_dict[region_name]
# extra_count = extra_counts.get(region_name, 0)
expected_len = region_info.end - region_info.start + 1 # + extra_count
fmt = 'Testing {}({} vs. {}): {}'
print(fmt.format(region_name, len(pokemon), expected_len, region_info))
# test the number of pokemon returned by db.get_region()
assert len(pokemon) == expected_len
def test_kanto_length():
region_length_test('kanto')
def test_johto_length():
region_length_test('johto')
def test_hoenn_length():
region_length_test('hoenn')
def test_sinnoh_length():
region_length_test('sinnoh')
def test_unova_length():
region_length_test('unova')
def test_kalos_length():
region_length_test('kalos')
def region_test(region_name):
db = Database()
# test db.get_region()
pokemon = get_region(db, region_name)
assert pokemon, 'No pokemon found in region: ' + region_name
# test that region_name is in region_dict
region_info = region_dict[region_name]
delta = region_info.end - region_info.start
fmt = 'Testing {}({} vs. {}): {}'
print(fmt.format(region_name, len(pokemon), delta + 1, region_info))
# test db.get_pokemon(id)
middle_pokemon = db.get_pokemon(region_info.start + (delta // 2))
assert middle_pokemon in pokemon
# test db.get_pokemon(name)
name = middle_pokemon.get_name()
assert db.get_pokemon(name) in pokemon
# test the case insensivity of db.get_pokemon(name)
# assert db.get_pokemon(name.upper()) in pokemon # !!! FixMe !!!
def test_kanto():
region_test('kanto')
def test_johto():
region_test('johto')
def test_hoenn():
region_test('hoenn')
def test_sinnoh():
region_test('sinnoh')
def test_unova():
region_test('unova')
def test_kalos():
region_test('kalos')
def test_regions():
for region_name in region_dict:
region_test(region_name)
def _test_region(region_name):
db = Database()
# Database unfortunately makes db.__get_region() private :-(
func = {
"kanto": db.get_kanto,
"johto": db.get_johto,
"hoenn": db.get_hoenn,
"sinnoh": db.get_sinnoh,
"unova": db.get_unova,
"kalos": db.get_kalos
}[region_name]
pokemon_list = func()
region_record = region_dict[region_name]
# make sure there are no missing pokemon
start = region_record.start
end = region_record.end
# extra_count = extra_counts.get(region_name, 0)
assert len(pokemon_list) == end - start + 1 # + extra_count
# make sure that all pokemon.id == '---' or are in the ID range
assert all([start <= int(p.get_id()) <= end for p in pokemon_list
if p.get_id() != '---'])
def test_regions_two():
for region_name in region_dict:
_test_region(region_name)
def test_ids():
db = Database()
numbered_ids = [p.get_id() for p in db.get_all() if p.get_id() != '---']
# test that all that are not --- are unique (no duplicate ids)
assert len(set(numbered_ids)) == len(numbered_ids) == MAX_ID
for id_str in numbered_ids:
assert len(id_str) == 3
assert isinstance(id_str, str)
assert 1 <= int(id_str) <= MAX_ID
def test_thresholds():
db = Database()
assert all(isinstance(p.get_dark_threshold(), float) for p in db.get_all())
| gpl-3.0 |
caphrim007/ansible-modules-extras | cloud/rackspace/rax_mon_notification_plan.py | 153 | 6060 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_mon_notification_plan
short_description: Create or delete a Rackspace Cloud Monitoring notification
plan.
description:
- Create or delete a Rackspace Cloud Monitoring notification plan by
associating existing rax_mon_notifications with severity levels. Rackspace
monitoring module flow | rax_mon_entity -> rax_mon_check ->
rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm
version_added: "2.0"
options:
state:
description:
- Ensure that the notification plan with this C(label) exists or does not
exist.
choices: ['present', 'absent']
label:
description:
- Defines a friendly name for this notification plan. String between 1 and
255 characters long.
required: true
critical_state:
description:
- Notification list to use when the alarm state is CRITICAL. Must be an
array of valid rax_mon_notification ids.
warning_state:
description:
- Notification list to use when the alarm state is WARNING. Must be an array
of valid rax_mon_notification ids.
ok_state:
description:
- Notification list to use when the alarm state is OK. Must be an array of
valid rax_mon_notification ids.
author: Ash Wilson
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Example notification plan
gather_facts: False
hosts: local
connection: local
tasks:
- name: Establish who gets called when.
rax_mon_notification_plan:
credentials: ~/.rax_pub
state: present
label: defcon1
critical_state:
- "{{ everyone['notification']['id'] }}"
warning_state:
- "{{ opsfloor['notification']['id'] }}"
register: defcon1
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def notification_plan(module, state, label, critical_state, warning_state, ok_state):
if len(label) < 1 or len(label) > 255:
module.fail_json(msg='label must be between 1 and 255 characters long')
changed = False
notification_plan = None
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
existing = []
for n in cm.list_notification_plans():
if n.label == label:
existing.append(n)
if existing:
notification_plan = existing[0]
if state == 'present':
should_create = False
should_delete = False
if len(existing) > 1:
module.fail_json(msg='%s notification plans are labelled %s.' %
(len(existing), label))
if notification_plan:
should_delete = (critical_state and critical_state != notification_plan.critical_state) or \
(warning_state and warning_state != notification_plan.warning_state) or \
(ok_state and ok_state != notification_plan.ok_state)
if should_delete:
notification_plan.delete()
should_create = True
else:
should_create = True
if should_create:
notification_plan = cm.create_notification_plan(label=label,
critical_state=critical_state,
warning_state=warning_state,
ok_state=ok_state)
changed = True
else:
for np in existing:
np.delete()
changed = True
if notification_plan:
notification_plan_dict = {
"id": notification_plan.id,
"critical_state": notification_plan.critical_state,
"warning_state": notification_plan.warning_state,
"ok_state": notification_plan.ok_state,
"metadata": notification_plan.metadata
}
module.exit_json(changed=changed, notification_plan=notification_plan_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
label=dict(required=True),
critical_state=dict(type='list'),
warning_state=dict(type='list'),
ok_state=dict(type='list')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
critical_state = module.params.get('critical_state')
warning_state = module.params.get('warning_state')
ok_state = module.params.get('ok_state')
setup_rax_module(module, pyrax)
notification_plan(module, state, label, critical_state, warning_state, ok_state)
# Import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# Invoke the module.
main()
| gpl-3.0 |
karllessard/tensorflow | tensorflow/python/keras/utils/metrics_utils.py | 5 | 21073 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utils related to keras metrics.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import weakref
from enum import Enum
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils.generic_utils import to_list
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.tpu import tpu
from tensorflow.python.util import tf_decorator
NEG_INF = -1e10
class Reduction(Enum):
"""Types of metrics reduction.
Contains the following values:
* `SUM`: Scalar sum of weighted values.
* `SUM_OVER_BATCH_SIZE`: Scalar sum of weighted values divided by
number of elements.
* `WEIGHTED_MEAN`: Scalar sum of weighted values divided by sum of weights.
"""
SUM = 'sum'
SUM_OVER_BATCH_SIZE = 'sum_over_batch_size'
WEIGHTED_MEAN = 'weighted_mean'
def update_state_wrapper(update_state_fn):
"""Decorator to wrap metric `update_state()` with `add_update()`.
Args:
update_state_fn: function that accumulates metric statistics.
Returns:
Decorated function that wraps `update_state_fn()` with `add_update()`.
"""
def decorated(metric_obj, *args, **kwargs):
"""Decorated function with `add_update()`."""
strategy = distribution_strategy_context.get_strategy()
# TODO(b/142574744): Remove this check if a better solution is found for
# declaring keras Metric outside of TPUStrategy and then updating it per
# replica.
for weight in metric_obj.weights:
if (tpu.is_tpu_strategy(strategy) and
not strategy.extended.variable_created_in_scope(weight)
and not distribution_strategy_context.in_cross_replica_context()):
raise ValueError(
'Trying to run metric.update_state in replica context when '
'the metric was not created in TPUStrategy scope. '
'Make sure the keras Metric is created in TPUstrategy scope. ')
with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs):
update_op = update_state_fn(*args, **kwargs)
if update_op is not None: # update_op will be None in eager execution.
metric_obj.add_update(update_op)
return update_op
return tf_decorator.make_decorator(update_state_fn, decorated)
def result_wrapper(result_fn):
"""Decorator to wrap metric `result()` function in `merge_call()`.
Result computation is an idempotent operation that simply calculates the
metric value using the state variables.
If metric state variables are distributed across replicas/devices and
`result()` is requested from the context of one device - This function wraps
`result()` in a distribution strategy `merge_call()`. With this,
the metric state variables will be aggregated across devices.
Args:
result_fn: function that computes the metric result.
Returns:
Decorated function that wraps `result_fn()` in distribution strategy
`merge_call()`.
"""
def decorated(metric_obj, *args):
"""Decorated function with merge_call."""
has_strategy = distribution_strategy_context.has_strategy()
replica_context = distribution_strategy_context.get_replica_context()
if not has_strategy or replica_context is None:
result_t = array_ops.identity(result_fn(*args))
else:
# TODO(psv): Test distribution of metrics using different distribution
# strategies.
# Creating a wrapper for merge_fn. merge_call invokes the given merge_fn
# with distribution object as the first parameter. We create a wrapper
# here so that the result function need not have that parameter.
def merge_fn_wrapper(distribution, merge_fn, *args):
# We will get `PerReplica` merge function. Taking the first one as all
# are identical copies of the function that we had passed below.
result = distribution.experimental_local_results(merge_fn)[0](*args)
# Wrapping result in identity so that control dependency between
# update_op from `update_state` and result works in case result returns
# a tensor.
return array_ops.identity(result)
# Wrapping result in merge_call. merge_call is used when we want to leave
# replica mode and compute a value in cross replica mode.
result_t = replica_context.merge_call(
merge_fn_wrapper, args=(result_fn,) + args)
# We are saving the result op here to be used in train/test execution
# functions. This basically gives the result op that was generated with a
# control dep to the updates for these workflows.
metric_obj._call_result = result_t
return result_t
return tf_decorator.make_decorator(result_fn, decorated)
def weakmethod(method):
"""Creates a weak reference to the bound method."""
cls = method.im_class
func = method.im_func
instance_ref = weakref.ref(method.im_self)
@functools.wraps(method)
def inner(*args, **kwargs):
return func.__get__(instance_ref(), cls)(*args, **kwargs)
del method
return inner
def assert_thresholds_range(thresholds):
if thresholds is not None:
invalid_thresholds = [t for t in thresholds if t is None or t < 0 or t > 1]
if invalid_thresholds:
raise ValueError(
'Threshold values must be in [0, 1]. Invalid values: {}'.format(
invalid_thresholds))
def parse_init_thresholds(thresholds, default_threshold=0.5):
if thresholds is not None:
assert_thresholds_range(to_list(thresholds))
thresholds = to_list(default_threshold if thresholds is None else thresholds)
return thresholds
class ConfusionMatrix(Enum):
TRUE_POSITIVES = 'tp'
FALSE_POSITIVES = 'fp'
TRUE_NEGATIVES = 'tn'
FALSE_NEGATIVES = 'fn'
class AUCCurve(Enum):
"""Type of AUC Curve (ROC or PR)."""
ROC = 'ROC'
PR = 'PR'
@staticmethod
def from_str(key):
if key in ('pr', 'PR'):
return AUCCurve.PR
elif key in ('roc', 'ROC'):
return AUCCurve.ROC
else:
raise ValueError('Invalid AUC curve value "%s".' % key)
class AUCSummationMethod(Enum):
"""Type of AUC summation method.
https://en.wikipedia.org/wiki/Riemann_sum)
Contains the following values:
* 'interpolation': Applies mid-point summation scheme for `ROC` curve. For
`PR` curve, interpolates (true/false) positives but not the ratio that is
precision (see Davis & Goadrich 2006 for details).
* 'minoring': Applies left summation for increasing intervals and right
summation for decreasing intervals.
* 'majoring': Applies right summation for increasing intervals and left
summation for decreasing intervals.
"""
INTERPOLATION = 'interpolation'
MAJORING = 'majoring'
MINORING = 'minoring'
@staticmethod
def from_str(key):
if key in ('interpolation', 'Interpolation'):
return AUCSummationMethod.INTERPOLATION
elif key in ('majoring', 'Majoring'):
return AUCSummationMethod.MAJORING
elif key in ('minoring', 'Minoring'):
return AUCSummationMethod.MINORING
else:
raise ValueError('Invalid AUC summation method value "%s".' % key)
def update_confusion_matrix_variables(variables_to_update,
y_true,
y_pred,
thresholds,
top_k=None,
class_id=None,
sample_weight=None,
multi_label=False,
label_weights=None):
"""Returns op to update the given confusion matrix variables.
For every pair of values in y_true and y_pred:
true_positive: y_true == True and y_pred > thresholds
false_negatives: y_true == True and y_pred <= thresholds
true_negatives: y_true == False and y_pred <= thresholds
false_positive: y_true == False and y_pred > thresholds
The results will be weighted and added together. When multiple thresholds are
provided, we will repeat the same for every threshold.
For estimation of these metrics over a stream of data, the function creates an
`update_op` operation that updates the given variables.
If `sample_weight` is `None`, weights default to 1.
Use weights of 0 to mask values.
Args:
variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid keys
and corresponding variables to update as values.
y_true: A `Tensor` whose shape matches `y_pred`. Will be cast to `bool`.
y_pred: A floating point `Tensor` of arbitrary shape and whose values are in
the range `[0, 1]`.
thresholds: A float value, float tensor, python list, or tuple of float
thresholds in `[0, 1]`, or NEG_INF (used when top_k is set).
top_k: Optional int, indicates that the positive labels should be limited to
the top k predictions.
class_id: Optional int, limits the prediction and labels to the class
specified by this argument.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true` (i.e., all dimensions must
be either `1`, or the same as the corresponding `y_true` dimension).
multi_label: Optional boolean indicating whether multidimensional
prediction/labels should be treated as multilabel responses, or flattened
into a single label. When True, the valus of `variables_to_update` must
have a second dimension equal to the number of labels in y_true and
y_pred, and those tensors must not be RaggedTensors.
label_weights: (optional) tensor of non-negative weights for multilabel
data. The weights are applied when calculating TP, FP, FN, and TN without
explicit multilabel handling (i.e. when the data is to be flattened).
Returns:
Update op.
Raises:
ValueError: If `y_pred` and `y_true` have mismatched shapes, or if
`sample_weight` is not `None` and its shape doesn't match `y_pred`, or if
`variables_to_update` contains invalid keys.
"""
if multi_label and label_weights is not None:
raise ValueError('`label_weights` for multilabel data should be handled '
'outside of `update_confusion_matrix_variables` when '
'`multi_label` is True.')
if variables_to_update is None:
return
if not any(
key for key in variables_to_update if key in list(ConfusionMatrix)):
raise ValueError(
'Please provide at least one valid confusion matrix '
'variable to update. Valid variable key options are: "{}". '
'Received: "{}"'.format(
list(ConfusionMatrix), variables_to_update.keys()))
variable_dtype = list(variables_to_update.values())[0].dtype
y_true = math_ops.cast(y_true, dtype=variable_dtype)
y_pred = math_ops.cast(y_pred, dtype=variable_dtype)
thresholds = ops.convert_to_tensor_v2_with_dispatch(
thresholds, dtype=variable_dtype)
num_thresholds = thresholds.shape[0]
if multi_label:
one_thresh = math_ops.equal(
math_ops.cast(1, dtype=dtypes.int32),
array_ops.rank(thresholds),
name='one_set_of_thresholds_cond')
else:
[y_pred,
y_true], _ = ragged_assert_compatible_and_get_flat_values([y_pred, y_true],
sample_weight)
one_thresh = math_ops.cast(True, dtype=dtypes.bool)
invalid_keys = [
key for key in variables_to_update if key not in list(ConfusionMatrix)
]
if invalid_keys:
raise ValueError(
'Invalid keys: {}. Valid variable key options are: "{}"'.format(
invalid_keys, list(ConfusionMatrix)))
with ops.control_dependencies([
check_ops.assert_greater_equal(
y_pred,
math_ops.cast(0.0, dtype=y_pred.dtype),
message='predictions must be >= 0'),
check_ops.assert_less_equal(
y_pred,
math_ops.cast(1.0, dtype=y_pred.dtype),
message='predictions must be <= 1')
]):
if sample_weight is None:
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
else:
sample_weight = math_ops.cast(sample_weight, dtype=variable_dtype)
y_pred, y_true, sample_weight = (
losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true, sample_weight=sample_weight))
y_pred.shape.assert_is_compatible_with(y_true.shape)
if top_k is not None:
y_pred = _filter_top_k(y_pred, top_k)
if class_id is not None:
y_true = y_true[..., class_id]
y_pred = y_pred[..., class_id]
pred_shape = array_ops.shape(y_pred)
num_predictions = pred_shape[0]
if y_pred.shape.ndims == 1:
num_labels = 1
else:
num_labels = gen_math_ops.Prod(input=pred_shape[1:], axis=0)
thresh_label_tile = control_flow_ops.cond(
one_thresh, lambda: num_labels,
lambda: math_ops.cast(1, dtype=dtypes.int32))
# Reshape predictions and labels, adding a dim for thresholding.
if multi_label:
predictions_extra_dim = array_ops.expand_dims(y_pred, 0)
labels_extra_dim = array_ops.expand_dims(
math_ops.cast(y_true, dtype=dtypes.bool), 0)
else:
# Flatten predictions and labels when not multilabel.
predictions_extra_dim = array_ops.reshape(y_pred, [1, -1])
labels_extra_dim = array_ops.reshape(
math_ops.cast(y_true, dtype=dtypes.bool), [1, -1])
# Tile the thresholds for every prediction.
if multi_label:
thresh_pretile_shape = [num_thresholds, 1, -1]
thresh_tiles = [1, num_predictions, thresh_label_tile]
data_tiles = [num_thresholds, 1, 1]
else:
thresh_pretile_shape = [num_thresholds, -1]
thresh_tiles = [1, num_predictions * num_labels]
data_tiles = [num_thresholds, 1]
thresh_tiled = array_ops.tile(
array_ops.reshape(thresholds, thresh_pretile_shape),
array_ops.stack(thresh_tiles))
# Tile the predictions for every threshold.
preds_tiled = array_ops.tile(predictions_extra_dim, data_tiles)
# Compare predictions and threshold.
pred_is_pos = math_ops.greater(preds_tiled, thresh_tiled)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_extra_dim, data_tiles)
if sample_weight is not None:
sample_weight = weights_broadcast_ops.broadcast_weights(
math_ops.cast(sample_weight, dtype=variable_dtype), y_pred)
weights_tiled = array_ops.tile(
array_ops.reshape(sample_weight, thresh_tiles), data_tiles)
else:
weights_tiled = None
if label_weights is not None and not multi_label:
label_weights = array_ops.expand_dims(label_weights, 0)
label_weights = weights_broadcast_ops.broadcast_weights(label_weights,
y_pred)
label_weights_tiled = array_ops.tile(
array_ops.reshape(label_weights, thresh_tiles), data_tiles)
if weights_tiled is None:
weights_tiled = label_weights_tiled
else:
weights_tiled = math_ops.multiply(weights_tiled, label_weights_tiled)
update_ops = []
def weighted_assign_add(label, pred, weights, var):
label_and_pred = math_ops.cast(
math_ops.logical_and(label, pred), dtype=var.dtype)
if weights is not None:
label_and_pred *= math_ops.cast(weights, dtype=var.dtype)
return var.assign_add(math_ops.reduce_sum(label_and_pred, 1))
loop_vars = {
ConfusionMatrix.TRUE_POSITIVES: (label_is_pos, pred_is_pos),
}
update_tn = ConfusionMatrix.TRUE_NEGATIVES in variables_to_update
update_fp = ConfusionMatrix.FALSE_POSITIVES in variables_to_update
update_fn = ConfusionMatrix.FALSE_NEGATIVES in variables_to_update
if update_fn or update_tn:
pred_is_neg = math_ops.logical_not(pred_is_pos)
loop_vars[ConfusionMatrix.FALSE_NEGATIVES] = (label_is_pos, pred_is_neg)
if update_fp or update_tn:
label_is_neg = math_ops.logical_not(label_is_pos)
loop_vars[ConfusionMatrix.FALSE_POSITIVES] = (label_is_neg, pred_is_pos)
if update_tn:
loop_vars[ConfusionMatrix.TRUE_NEGATIVES] = (label_is_neg, pred_is_neg)
for matrix_cond, (label, pred) in loop_vars.items():
if matrix_cond in variables_to_update:
update_ops.append(
weighted_assign_add(label, pred, weights_tiled,
variables_to_update[matrix_cond]))
return control_flow_ops.group(update_ops)
def _filter_top_k(x, k):
"""Filters top-k values in the last dim of x and set the rest to NEG_INF.
Used for computing top-k prediction values in dense labels (which has the same
shape as predictions) for recall and precision top-k metrics.
Args:
x: tensor with any dimensions.
k: the number of values to keep.
Returns:
tensor with same shape and dtype as x.
"""
_, top_k_idx = nn_ops.top_k(x, k, sorted=False)
top_k_mask = math_ops.reduce_sum(
array_ops.one_hot(top_k_idx, array_ops.shape(x)[-1], axis=-1), axis=-2)
return x * top_k_mask + NEG_INF * (1 - top_k_mask)
def ragged_assert_compatible_and_get_flat_values(values, mask=None):
"""If ragged, it checks the compatibility and then returns the flat_values.
Note: If two tensors are dense, it does not check their compatibility.
Note: Although two ragged tensors with different ragged ranks could have
identical overall rank and dimension sizes and hence be compatible,
we do not support those cases.
Args:
values: A list of potentially ragged tensor of the same ragged_rank.
mask: A potentially ragged tensor of the same ragged_rank as elements in
Values.
Returns:
A tuple in which the first element is the list of tensors and the second
is the mask tensor. ([Values], mask). Mask and the element in Values
are equal to the flat_values of the input arguments (if they were ragged).
"""
if isinstance(values, list):
is_all_ragged = \
all(isinstance(rt, ragged_tensor.RaggedTensor) for rt in values)
is_any_ragged = \
any(isinstance(rt, ragged_tensor.RaggedTensor) for rt in values)
else:
is_all_ragged = isinstance(values, ragged_tensor.RaggedTensor)
is_any_ragged = is_all_ragged
if (is_all_ragged and
((mask is None) or isinstance(mask, ragged_tensor.RaggedTensor))):
to_be_stripped = False
if not isinstance(values, list):
values = [values]
to_be_stripped = True
# NOTE: we leave the flat_values compatibility to
# tf.TensorShape `assert_is_compatible_with`
# check if both dynamic dimensions are equal and then use the flat_values.
nested_row_split_list = [rt.nested_row_splits for rt in values]
assertion_list = ragged_util.assert_splits_match(nested_row_split_list)
# if both are ragged sample_weights also should be ragged with same dims.
if isinstance(mask, ragged_tensor.RaggedTensor):
assertion_list_for_mask = ragged_util.assert_splits_match(
[nested_row_split_list[0], mask.nested_row_splits])
tmp = control_flow_ops.with_dependencies(assertion_list_for_mask,
mask.flat_values)
mask = array_ops.expand_dims(tmp, -1)
# values has at least 1 element.
flat_values = []
for value in values:
tmp = control_flow_ops.with_dependencies(assertion_list,
value.flat_values)
flat_values.append(array_ops.expand_dims(tmp, -1))
values = flat_values[0] if to_be_stripped else flat_values
elif is_any_ragged:
raise TypeError('One of the inputs does not have acceptable types.')
# values are empty or value are not ragged and mask is ragged.
elif isinstance(mask, ragged_tensor.RaggedTensor):
raise TypeError('Ragged mask is not allowed with non-ragged inputs.')
return values, mask
| apache-2.0 |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/scipy/io/matlab/tests/test_streams.py | 9 | 5460 | """ Testing
"""
from __future__ import division, print_function, absolute_import
import os
import sys
import zlib
from io import BytesIO
if sys.version_info[0] >= 3:
cStringIO = BytesIO
else:
from cStringIO import StringIO as cStringIO
from tempfile import mkstemp
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_, assert_equal
from pytest import raises as assert_raises
from scipy.io.matlab.streams import (make_stream,
GenericStream, cStringStream, FileStream, ZlibInputStream,
_read_into, _read_string)
@contextmanager
def setup_test_file():
val = b'a\x00string'
fd, fname = mkstemp()
with os.fdopen(fd, 'wb') as fs:
fs.write(val)
with open(fname, 'rb') as fs:
gs = BytesIO(val)
cs = cStringIO(val)
yield fs, gs, cs
os.unlink(fname)
def test_make_stream():
with setup_test_file() as (fs, gs, cs):
# test stream initialization
assert_(isinstance(make_stream(gs), GenericStream))
if sys.version_info[0] < 3:
assert_(isinstance(make_stream(cs), cStringStream))
assert_(isinstance(make_stream(fs), FileStream))
def test_tell_seek():
with setup_test_file() as (fs, gs, cs):
for s in (fs, gs, cs):
st = make_stream(s)
res = st.seek(0)
assert_equal(res, 0)
assert_equal(st.tell(), 0)
res = st.seek(5)
assert_equal(res, 0)
assert_equal(st.tell(), 5)
res = st.seek(2, 1)
assert_equal(res, 0)
assert_equal(st.tell(), 7)
res = st.seek(-2, 2)
assert_equal(res, 0)
assert_equal(st.tell(), 6)
def test_read():
with setup_test_file() as (fs, gs, cs):
for s in (fs, gs, cs):
st = make_stream(s)
st.seek(0)
res = st.read(-1)
assert_equal(res, b'a\x00string')
st.seek(0)
res = st.read(4)
assert_equal(res, b'a\x00st')
# read into
st.seek(0)
res = _read_into(st, 4)
assert_equal(res, b'a\x00st')
res = _read_into(st, 4)
assert_equal(res, b'ring')
assert_raises(IOError, _read_into, st, 2)
# read alloc
st.seek(0)
res = _read_string(st, 4)
assert_equal(res, b'a\x00st')
res = _read_string(st, 4)
assert_equal(res, b'ring')
assert_raises(IOError, _read_string, st, 2)
class TestZlibInputStream(object):
def _get_data(self, size):
data = np.random.randint(0, 256, size).astype(np.uint8).tostring()
compressed_data = zlib.compress(data)
stream = BytesIO(compressed_data)
return stream, len(compressed_data), data
def test_read(self):
block_size = 131072
SIZES = [0, 1, 10, block_size//2, block_size-1,
block_size, block_size+1, 2*block_size-1]
READ_SIZES = [block_size//2, block_size-1,
block_size, block_size+1]
def check(size, read_size):
compressed_stream, compressed_data_len, data = self._get_data(size)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
data2 = b''
so_far = 0
while True:
block = stream.read(min(read_size,
size - so_far))
if not block:
break
so_far += len(block)
data2 += block
assert_equal(data, data2)
for size in SIZES:
for read_size in READ_SIZES:
check(size, read_size)
def test_read_max_length(self):
size = 1234
data = np.random.randint(0, 256, size).astype(np.uint8).tostring()
compressed_data = zlib.compress(data)
compressed_stream = BytesIO(compressed_data + b"abbacaca")
stream = ZlibInputStream(compressed_stream, len(compressed_data))
stream.read(len(data))
assert_equal(compressed_stream.tell(), len(compressed_data))
assert_raises(IOError, stream.read, 1)
def test_seek(self):
compressed_stream, compressed_data_len, data = self._get_data(1024)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
stream.seek(123)
p = 123
assert_equal(stream.tell(), p)
d1 = stream.read(11)
assert_equal(d1, data[p:p+11])
stream.seek(321, 1)
p = 123+11+321
assert_equal(stream.tell(), p)
d2 = stream.read(21)
assert_equal(d2, data[p:p+21])
stream.seek(641, 0)
p = 641
assert_equal(stream.tell(), p)
d3 = stream.read(11)
assert_equal(d3, data[p:p+11])
assert_raises(IOError, stream.seek, 10, 2)
assert_raises(IOError, stream.seek, -1, 1)
assert_raises(ValueError, stream.seek, 1, 123)
stream.seek(10000, 1)
assert_raises(IOError, stream.read, 12)
def test_all_data_read(self):
compressed_stream, compressed_data_len, data = self._get_data(1024)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
assert_(not stream.all_data_read())
stream.seek(512)
assert_(not stream.all_data_read())
stream.seek(1024)
assert_(stream.all_data_read())
| mit |
sstone/bitcoin | test/functional/feature_nulldummy.py | 10 | 7178 | #!/usr/bin/env python3
# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test NULLDUMMY softfork.
Connect to a single node.
Generate 2 blocks (save the coinbases for later).
Generate COINBASE_MATURITY (CB) more blocks to ensure the coinbases are mature.
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in block CB + 3.
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
[Consensus] Check that the new NULLDUMMY rules are not enforced on block CB + 4.
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on block CB + 5.
"""
import time
from test_framework.blocktools import (
COINBASE_MATURITY,
NORMAL_GBT_REQUEST_PARAMS,
add_witness_commitment,
create_block,
create_transaction,
)
from test_framework.messages import CTransaction
from test_framework.script import CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
NULLDUMMY_ERROR = "non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if len(newscript) == 0:
assert len(i) == 0
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
class NULLDUMMYTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
# This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through
# normal segwit activation here (and don't use the default always-on behaviour).
self.extra_args = [[
f'-segwitheight={COINBASE_MATURITY + 5}',
'-addresstype=legacy',
]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].createwallet(wallet_name='wmulti', disable_private_keys=True)
wmulti = self.nodes[0].get_wallet_rpc('wmulti')
w0 = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
self.address = w0.getnewaddress()
self.pubkey = w0.getaddressinfo(self.address)['pubkey']
self.ms_address = wmulti.addmultisigaddress(1, [self.pubkey])['address']
self.wit_address = w0.getnewaddress(address_type='p2sh-segwit')
self.wit_ms_address = wmulti.addmultisigaddress(1, [self.pubkey], '', 'p2sh-segwit')['address']
if not self.options.descriptors:
# Legacy wallets need to import these so that they are watched by the wallet. This is unnecessary (and does not need to be tested) for descriptor wallets
wmulti.importaddress(self.ms_address)
wmulti.importaddress(self.wit_ms_address)
self.coinbase_blocks = self.nodes[0].generate(2) # block height = 2
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(COINBASE_MATURITY) # block height = COINBASE_MATURITY + 2
self.lastblockhash = self.nodes[0].getbestblockhash()
self.lastblockheight = COINBASE_MATURITY + 2
self.lastblocktime = int(time.time()) + self.lastblockheight
self.log.info(f"Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [{COINBASE_MATURITY + 3}]")
test1txs = [create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, amount=49)]
txid1 = self.nodes[0].sendrawtransaction(test1txs[0].serialize_with_witness().hex(), 0)
test1txs.append(create_transaction(self.nodes[0], txid1, self.ms_address, amount=48))
txid2 = self.nodes[0].sendrawtransaction(test1txs[1].serialize_with_witness().hex(), 0)
test1txs.append(create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, amount=49))
txid3 = self.nodes[0].sendrawtransaction(test1txs[2].serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], test1txs, False, True)
self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = create_transaction(self.nodes[0], txid2, self.ms_address, amount=47)
trueDummy(test2tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test2tx.serialize_with_witness().hex(), 0)
self.log.info(f"Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [{COINBASE_MATURITY + 4}]")
self.block_submit(self.nodes[0], [test2tx], False, True)
self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
test4tx = create_transaction(self.nodes[0], test2tx.hash, self.address, amount=46)
test6txs = [CTransaction(test4tx)]
trueDummy(test4tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test4tx.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], [test4tx])
self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
test5tx = create_transaction(self.nodes[0], txid3, self.wit_address, amount=48)
test6txs.append(CTransaction(test5tx))
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test5tx.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], [test5tx], True)
self.log.info(f"Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [{COINBASE_MATURITY + 5}]")
for i in test6txs:
self.nodes[0].sendrawtransaction(i.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], test6txs, True, True)
def block_submit(self, node, txs, witness=False, accept=False):
tmpl = node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
assert_equal(tmpl['previousblockhash'], self.lastblockhash)
assert_equal(tmpl['height'], self.lastblockheight + 1)
block = create_block(tmpl=tmpl, ntime=self.lastblocktime + 1)
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
assert_equal(None if accept else 'block-validation-failed', node.submitblock(block.serialize().hex()))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
| mit |
blindsighttf2/Astron | test/common/unittests.py | 4 | 6582 | import unittest, subprocess, tempfile, os, threading
from socket import socket, AF_INET, SOCK_STREAM
from astron import *
class ConfigTest(unittest.TestCase):
class ConfigRunner(object):
DAEMON_PATH = './astrond'
def __init__(self, config):
self.config = config
self.process = None
def run(self, timeout):
def target():
self.process = subprocess.Popen([self.DAEMON_PATH, self.config])
self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return 'Valid'
return 'Invalid'
@classmethod
def writeConfig(cls, config):
f = open(cls.config_path, "w")
f.write(config)
f.close()
@classmethod
def checkConfig(cls, config, timeout = 2):
cls.writeConfig(config)
return cls.test_runner.run(timeout)
@classmethod
def setUpClass(cls):
file_handle, cls.config_path = tempfile.mkstemp(prefix = 'astron-', suffix = '.cfg.yaml')
os.close(file_handle)
cls.test_runner = ConfigTest.ConfigRunner(cls.config_path)
@classmethod
def tearDownClass(cls):
if cls.config_path is not None:
os.remove(cls.config_path)
class ProtocolTest(unittest.TestCase):
@classmethod
def connectToServer(cls, addr = '127.0.0.1', port = 57123):
sock = socket(AF_INET, SOCK_STREAM)
sock.connect((addr, port))
return MDConnection(sock)
def writeUnexpectedAndFail(self, received):
testName = self.__class__.__name__
f = open("%s-received.bin" % testName, "wb")
f.write(received.get_data())
f.close()
self.fail("Received datagram when expecting none.\n" +
"\tWritten to \"%s-received.bin\"." % testName)
def writeDatagramsAndFail(self, expected, received):
testName = self.__class__.__name__
f = open("%s-expected.bin" % testName, "wb")
f.write(expected.get_data())
f.close()
f = open("%s-received.bin" % testName, "wb")
f.write(received.get_data())
f.close()
self.fail("Received datagram doesn't match expected.\n" +
"\tWritten to \"%s-{expected,received}.bin\"." % testName)
def assertDatagramsEqual(self, expected, received, isClient = False):
lhs = DatagramIterator(expected)
rhs = DatagramIterator(received)
if isClient:
expectedMsgtype = lhs.read_uint16()
receivedMsgtype = rhs.read_uint16()
self.assertEquals(expectedMsgtype, receivedMsgtype)
if not received.equals(expected):
self.writeDatagramsAndFail(expected, received)
else:
numChannelsExpected = lhs.read_uint8()
numChannelsReceived = rhs.read_uint8()
self.assertEquals(numChannelsExpected, numChannelsReceived)
expectedRecipients = expected.get_channels()
receivedRecipients = received.get_channels()
self.assertEquals(expectedRecipients, receivedRecipients)
lhs.seek(1 + CHANNEL_SIZE_BYTES * numChannelsExpected)
rhs.seek(1 + CHANNEL_SIZE_BYTES * numChannelsReceived)
if expectedRecipients != set([CONTROL_CHANNEL]):
# If we aren't a control datagram, check the sender
expectedSender = lhs.read_channel()
receivedSender = rhs.read_channel()
self.assertEquals(expectedSender, receivedSender)
expectedMsgtype = lhs.read_uint16()
receivedMsgtype = rhs.read_uint16()
self.assertEquals(expectedMsgtype, receivedMsgtype)
if not received.matches(expected):
self.writeDatagramsAndFail(expected, received)
def expect(self, conn, expected, isClient = False):
received = conn.recv_maybe()
if received is None:
self.fail("No datagram received.")
self.assertDatagramsEqual(expected, received, isClient)
def expectMany(self, conn, datagrams, ignoreExtra = False, isClient = False):
datagrams = list(datagrams) # We're going to be doing datagrams.remove()
recvs = []
numRecvd = 0
numMatch = 0
numExpct = len(datagrams)
while datagrams:
received = conn.recv_maybe()
if received is None:
if numMatch == 0:
self.fail("Received %d datagrams, but expected %d." % (numRecvd, numExpct))
else:
error = "Received %d datagrams, of which %d matched, but expected %d."
error += "\n Received msgtypes: ( "
for dg in recvs:
error += "%s " % dg.get_msgtype()
error = error % (numRecvd, numMatch, numExpct)
error += ")"
self.fail(error)
numRecvd += 1
for datagram in datagrams:
if (isClient and received.equals(datagram)) or received.matches(datagram):
recvs.append(datagram)
datagrams.remove(datagram)
numMatch += 1
break
else:
if not ignoreExtra:
best = None
for datagram in datagrams:
# Try to find the most similar datagram
if datagram.get_channels() == received.get_channels():
self.assertDatagramsEqual(datagram, received, isClient)
break
elif datagram.get_size() == received.get_size() and best is None:
best = datagram
else:
if best is not None:
self.assertDatagramsEqual(best, received, isClient)
else:
self.assertDatagramsEqual(datagrams[0], received, isClient)
# This should always fail, but it produces more useful
# debugging output. Lets guarantee that it fails for fun.
self.fail("Testsuite implementation error.")
def expectNone(self, conn):
received = conn.recv_maybe()
if received is not None:
self.writeUnexpectedAndFail(received) | bsd-3-clause |
cisco-open-source/selenium | py/test/selenium/webdriver/common/opacity_tests.py | 15 | 2086 | #!/usr/bin/python
# Copyright 2008-2012 WebDriver committers
# Copyright 2008-2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pytest
from selenium.webdriver.common.by import By
class OpacityTests(unittest.TestCase):
@pytest.mark.ignore_ie
@pytest.mark.ignore_opera
def testShouldBeAbleToClickOnElementsWithOpacityZero(self):
self._loadPage("click_jacker")
element = self.driver.find_element(By.ID, "clickJacker")
self.assertEquals('0', element.value_of_css_property("opacity"),
"Precondition failed: clickJacker should be transparent.\
Value was %s" % element.value_of_css_property("opacity"))
element.click()
self.assertEquals('1', element.value_of_css_property("opacity"))
@pytest.mark.ignore_ie
def testShouldBeAbleToSelectOptionsFromAnInvisibleSelect(self):
self._loadPage("formPage")
select = self.driver.find_element(By.ID, "invisi_select")
options = select.find_elements(By.TAG_NAME, "option")
apples = options[0]
oranges = options[1]
self.assertTrue(apples.is_selected())
self.assertFalse(oranges.is_selected())
oranges.click()
self.assertFalse(apples.is_selected())
self.assertTrue(oranges.is_selected())
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| apache-2.0 |
Esri/geoprocessing-tools-for-hadoop | requests/packages/charade/euckrprober.py | 2931 | 1675 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
| apache-2.0 |
mariaantoanelam/Licenta | Lib/test/test_compile.py | 6 | 3114 | from test_support import verbose, TestFailed
if verbose:
print "Testing whether compiler catches assignment to __debug__"
try:
compile('__debug__ = 1', '?', 'single')
except SyntaxError:
pass
import __builtin__
prev = __builtin__.__debug__
setattr(__builtin__, '__debug__', 'sure')
setattr(__builtin__, '__debug__', prev)
if verbose:
print 'Running tests on argument handling'
try:
exec 'def f(a, a): pass'
raise TestFailed, "duplicate arguments"
except SyntaxError:
pass
try:
exec 'def f(a = 0, a = 1): pass'
raise TestFailed, "duplicate keyword arguments"
except SyntaxError:
pass
try:
exec 'def f(a): global a; a = 1'
raise TestFailed, "variable is global and local"
except SyntaxError:
pass
if verbose:
print "testing complex args"
def comp_args((a, b)):
print a,b
comp_args((1, 2))
def comp_args((a, b)=(3, 4)):
print a, b
comp_args((1, 2))
comp_args()
def comp_args(a, (b, c)):
print a, b, c
comp_args(1, (2, 3))
def comp_args(a=2, (b, c)=(3, 4)):
print a, b, c
comp_args(1, (2, 3))
comp_args()
try:
exec 'def f(a=1, (b, c)): pass'
raise TestFailed, "non-default args after default"
except SyntaxError:
pass
if verbose:
print "testing bad float literals"
def expect_error(s):
try:
eval(s)
raise TestFailed("%r accepted" % s)
except SyntaxError:
pass
expect_error("2e")
expect_error("2.0e+")
expect_error("1e-")
expect_error("3-4e/21")
if verbose:
print "testing literals with leading zeroes"
def expect_same(test_source, expected):
got = eval(test_source)
if got != expected:
raise TestFailed("eval(%r) gave %r, but expected %r" %
(test_source, got, expected))
expect_error("077787")
expect_error("0xj")
expect_error("0x.")
expect_error("0e")
expect_same("0777", 511)
expect_same("0777L", 511)
expect_same("000777", 511)
expect_same("0xff", 255)
expect_same("0xffL", 255)
expect_same("0XfF", 255)
expect_same("0777.", 777)
expect_same("0777.0", 777)
expect_same("000000000000000000000000000000000000000000000000000777e0", 777)
expect_same("0777e1", 7770)
expect_same("0e0", 0)
expect_same("0000E-012", 0)
expect_same("09.5", 9.5)
expect_same("0777j", 777j)
expect_same("00j", 0j)
expect_same("00.0", 0)
expect_same("0e3", 0)
expect_same("090000000000000.", 90000000000000.)
expect_same("090000000000000.0000000000000000000000", 90000000000000.)
expect_same("090000000000000e0", 90000000000000.)
expect_same("090000000000000e-0", 90000000000000.)
expect_same("090000000000000j", 90000000000000j)
expect_error("090000000000000") # plain octal literal w/ decimal digit
expect_error("080000000000000") # plain octal literal w/ decimal digit
expect_error("000000000000009") # plain octal literal w/ decimal digit
expect_error("000000000000008") # plain octal literal w/ decimal digit
expect_same("000000000000007", 7)
expect_same("000000000000008.", 8.)
expect_same("000000000000009.", 9.)
# Verify treatment of unary minus on negative numbers SF bug #660455
expect_same("0xffffffff", -1)
expect_same("-0xffffffff", 1)
| mit |
wang1352083/pythontool | python-2.7.12-lib/distutils/tests/test_install_headers.py | 141 | 1269 | """Tests for distutils.command.install_headers."""
import sys
import os
import unittest
import getpass
from distutils.command.install_headers import install_headers
from distutils.tests import support
from test.test_support import run_unittest
class InstallHeadersTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def test_simple_run(self):
# we have two headers
header_list = self.mkdtemp()
header1 = os.path.join(header_list, 'header1')
header2 = os.path.join(header_list, 'header2')
self.write_file(header1)
self.write_file(header2)
headers = [header1, header2]
pkg_dir, dist = self.create_dist(headers=headers)
cmd = install_headers(dist)
self.assertEqual(cmd.get_inputs(), headers)
# let's run the command
cmd.install_dir = os.path.join(pkg_dir, 'inst')
cmd.ensure_finalized()
cmd.run()
# let's check the results
self.assertEqual(len(cmd.get_outputs()), 2)
def test_suite():
return unittest.makeSuite(InstallHeadersTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| mit |
chippey/gaffer | python/GafferUI/Viewer.py | 1 | 10321 | ##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
# import lazily to improve startup of apps which don't use GL functionality
IECoreGL = Gaffer.lazyImport( "IECoreGL" )
##########################################################################
# Viewer implementation
##########################################################################
## The Viewer provides the primary means of visualising the output
# of Nodes. It defers responsibility for the generation of content to
# the View classes, which are registered against specific types of
# Plug.
## \todo Support split screening two Views together and overlaying
# them etc. Hopefully we can support that entirely within the Viewer
# without modifying the Views themselves.
class Viewer( GafferUI.NodeSetEditor ) :
def __init__( self, scriptNode, **kw ) :
self.__gadgetWidget = GafferUI.GadgetWidget(
bufferOptions = set( (
GafferUI.GLWidget.BufferOptions.Depth,
GafferUI.GLWidget.BufferOptions.Double )
),
)
GafferUI.NodeSetEditor.__init__( self, self.__gadgetWidget, scriptNode, **kw )
self.__nodeToolbars = []
self.__viewToolbars = []
with GafferUI.GridContainer( borderWidth = 2, spacing = 0 ) as overlay :
with GafferUI.ListContainer(
orientation = GafferUI.ListContainer.Orientation.Horizontal,
parenting = {
"index" : ( slice( 0, 5 ), 0 ),
"alignment" : ( GafferUI.HorizontalAlignment.None, GafferUI.VerticalAlignment.Top )
}
) :
self.__toolMenuButton = GafferUI.MenuButton(
menu = GafferUI.Menu( Gaffer.WeakMethod( self.__toolMenuDefinition ) ),
hasFrame = False,
)
GafferUI.Spacer( IECore.V2i( 0 ), parenting = { "expand" : True } )
self.__viewToolbars.append(
_Toolbar( GafferUI.Edge.Top, parenting = { "verticalAlignment" : GafferUI.VerticalAlignment.Top } )
)
self.__nodeToolbars.append(
_Toolbar(
GafferUI.Edge.Top,
parenting = {
"index" : ( slice( 0, 5 ), 1 ),
"alignment" : ( GafferUI.HorizontalAlignment.Center, GafferUI.VerticalAlignment.Top ),
}
)
)
self.__viewToolbars.append(
_Toolbar( GafferUI.Edge.Left,
parenting = {
"index" : ( 0, 2 ),
"alignment" : ( GafferUI.HorizontalAlignment.Left, GafferUI.VerticalAlignment.Center ),
}
)
)
self.__nodeToolbars.append(
_Toolbar( GafferUI.Edge.Left,
parenting = {
"index" : ( 1, 2 ),
"alignment" : ( GafferUI.HorizontalAlignment.Left, GafferUI.VerticalAlignment.Center ),
}
)
)
self.__nodeToolbars.append(
_Toolbar( GafferUI.Edge.Right,
parenting = {
"index" : ( 3, 2 ),
"alignment" : ( GafferUI.HorizontalAlignment.Right, GafferUI.VerticalAlignment.Center ),
}
)
)
self.__viewToolbars.append(
_Toolbar( GafferUI.Edge.Right,
parenting = {
"index" : ( 4, 2 ),
"alignment" : ( GafferUI.HorizontalAlignment.Right, GafferUI.VerticalAlignment.Center ),
}
)
)
self.__nodeToolbars.append(
_Toolbar( GafferUI.Edge.Bottom,
parenting = {
"index" : ( slice( 0, 5 ), 3 ),
"alignment" : ( GafferUI.HorizontalAlignment.Center, GafferUI.VerticalAlignment.Bottom ),
}
)
)
self.__viewToolbars.append(
_Toolbar( GafferUI.Edge.Bottom,
parenting = {
"index" : ( slice( 0, 5 ), 4 ),
"alignment" : ( GafferUI.HorizontalAlignment.Center, GafferUI.VerticalAlignment.Bottom ),
}
)
)
## \todo Consider public API for this in the GridContainer class.
overlay._qtWidget().layout().setRowStretch( 2, 1 )
overlay._qtWidget().layout().setColumnStretch( 2, 1 )
self.__gadgetWidget.setOverlay( overlay )
self.__views = []
# Indexed by view instance. We would prefer to simply
# store tools as python attributes on the view instances
# themselves, but we can't because that would create
# circular references. Maybe it makes sense to be able to
# query tools from a view anyway?
self.__viewTools = {}
self.__currentView = None
self._updateFromSet()
def view( self ) :
return self.__currentView
def viewGadgetWidget( self ) :
return self.__gadgetWidget
def __repr__( self ) :
return "GafferUI.Viewer( scriptNode )"
def _updateFromSet( self ) :
GafferUI.NodeSetEditor._updateFromSet( self )
self.__currentView = None
node = self._lastAddedNode()
if node :
for plug in node.children( Gaffer.Plug ) :
if plug.direction() == Gaffer.Plug.Direction.Out and not plug.getName().startswith( "__" ) :
# try to reuse an existing view
for view in self.__views :
if view["in"].acceptsInput( plug ) :
self.__currentView = view
viewInput = self.__currentView["in"].getInput()
if not viewInput or not viewInput.isSame( plug ) :
self.__currentView["in"].setInput( plug )
break # break out of view loop
# if that failed then try to make a new one
if self.__currentView is None :
self.__currentView = GafferUI.View.create( plug )
if self.__currentView is not None:
self.__currentView.setContext( self.getContext() )
self.__viewTools[self.__currentView] = [ GafferUI.Tool.create( n, self.__currentView ) for n in GafferUI.Tool.registeredTools( self.__currentView.typeId() ) ]
self.__viewTools[self.__currentView].sort( key = lambda v : Gaffer.Metadata.value( v, "order" ) if Gaffer.Metadata.value( v, "order" ) is not None else 999 )
if len( self.__viewTools[self.__currentView] ) :
self.__activateTool( self.__viewTools[self.__currentView][0] )
self.__views.append( self.__currentView )
# if we succeeded in getting a suitable view, then
# don't bother checking the other plugs
if self.__currentView is not None :
break
for toolbar in self.__nodeToolbars :
toolbar.setNode( node )
for toolbar in self.__viewToolbars :
toolbar.setNode( self.__currentView )
if self.__currentView is not None :
self.__gadgetWidget.setViewportGadget( self.__currentView.viewportGadget() )
self.__toolMenuButton.setVisible( len( self.__viewTools[self.__currentView] ) != 0 )
else :
self.__gadgetWidget.setViewportGadget( GafferUI.ViewportGadget() )
self.__toolMenuButton.setVisible( False )
def _titleFormat( self ) :
return GafferUI.NodeSetEditor._titleFormat( self, _maxNodes = 1, _reverseNodes = True, _ellipsis = False )
def __toolMenuDefinition( self ) :
m = IECore.MenuDefinition()
if self.__currentView is None :
return m
for tool in self.__viewTools[self.__currentView] :
m.append(
"/" + IECore.CamelCase.toSpaced( tool.typeName().rpartition( ":" )[2] ),
{
"checkBox" : tool["active"].getValue(),
"active" : not tool["active"].getValue(),
"command" : IECore.curry( Gaffer.WeakMethod( self.__activateTool ), tool ),
"description" : self.__toolDescription( tool )
}
)
return m
def __activateTool( self, tool, *unused ) :
for t in self.__viewTools[self.__currentView] :
t["active"].setValue( t.isSame( tool ) )
iconName = tool.typeName().replace( ":", "" )
iconName = iconName[:1].lower() + iconName[1:] + ".png"
self.__toolMenuButton.setImage( iconName )
self.__toolMenuButton.setToolTip( self.__toolDescription( tool ) )
def __toolDescription( self, tool ) :
result = tool.getName()
description = Gaffer.Metadata.nodeDescription( tool )
if description :
result += "\n\n" + IECore.StringUtil.wrap( description, 80 )
return result
GafferUI.EditorWidget.registerType( "Viewer", Viewer )
# Internal widget to simplify the management of node toolbars.
class _Toolbar( GafferUI.Frame ) :
def __init__( self, edge, **kw ) :
GafferUI.Frame.__init__( self, borderWidth = 0, borderStyle = GafferUI.Frame.BorderStyle.None, **kw )
# We store the 5 most recently used toolbars in a cache,
# to avoid unnecessary reconstruction when switching back and
# forth between the same set of nodes.
self.__nodeToolbarCache = IECore.LRUCache( self.__cacheGetter, 5 )
self.__edge = edge
self.__node = []
def setNode( self, node ) :
if node == self.__node :
return
self.__node = node
if self.__node is not None :
self.setChild( self.__nodeToolbarCache.get( ( self.__node, self.__edge ) ) )
else :
self.setChild( None )
def getNode( self ) :
return self.__node
@staticmethod
def __cacheGetter( nodeAndEdge ) :
return ( GafferUI.NodeToolbar.create( nodeAndEdge[0], nodeAndEdge[1] ), 1 )
| bsd-3-clause |
James-Firth/steam-download-notifier | steamapi/app.py | 1 | 5327 | __author__ = 'SmileyBarry'
from .core import APIConnection, SteamObject, store
from .decorators import cached_property, INFINITE
class SteamApp(SteamObject):
def __init__(self, appid, name=None, owner=None):
self._id = appid
if name is not None:
import time
self._cache = dict()
self._cache['name'] = (name, time.time())
# Normally, the associated userid is also the owner.
# That would not be the case if the game is borrowed, though. In that case, the object creator
# usually defines attributes accordingly. However, at this time we can't ask the API "is this
# game borrowed?", unless it's the actively-played game, so this distinction isn't done in the
# object's context, but in the object creator's context.
self._owner = owner
self._userid = self._owner
@cached_property(ttl=INFINITE)
def _schema(self):
return APIConnection().call("ISteamUserStats", "GetSchemaForGame", "v2", appid=self._id)
@property
def appid(self):
return self._id
@cached_property(ttl=INFINITE)
def achievements(self):
global_percentages = APIConnection().call("ISteamUserStats", "GetGlobalAchievementPercentagesForApp", "v0002",
gameid=self._id)
if self._userid is not None:
# Ah-ha, this game is associated to a user!
userid = self._userid
unlocks = APIConnection().call("ISteamUserStats",
"GetUserStatsForGame",
"v2",
appid=self._id,
steamid=userid)
if 'achievements' in unlocks.playerstats:
unlocks = [associated_achievement.name
for associated_achievement in unlocks.playerstats.achievements
if associated_achievement.achieved != 0]
else:
userid = None
unlocks = None
achievements_list = []
for achievement in self._schema.game.availableGameStats.achievements:
achievement_obj = SteamAchievement(self._id, achievement.name, achievement.displayName, userid)
achievement_obj._cache = {}
if achievement.hidden == 0:
store(achievement_obj, "is_hidden", False)
else:
store(achievement_obj, "is_hidden", True)
for global_achievement in global_percentages.achievementpercentages.achievements:
if global_achievement.name == achievement.name:
achievement_obj.unlock_percentage = global_achievement.percent
achievements_list += [achievement_obj]
if unlocks is not None:
for achievement in achievements_list:
if achievement.apiname in unlocks:
store(achievement, "is_achieved", True)
else:
store(achievement, "is_achieved", False)
return achievements_list
@cached_property(ttl=INFINITE)
def name(self):
return self._schema.game.gameName
@cached_property(ttl=INFINITE)
def owner(self):
if self._owner is None:
return self._userid
else:
return self._owner
def __str__(self):
return self.name
class SteamAchievement(SteamObject):
def __init__(self, linked_appid, apiname, displayname, linked_userid=None):
self._appid = linked_appid
self._id = apiname
self._displayname = displayname
self._userid = linked_userid
self.unlock_percentage = 0.0
@property
def appid(self):
return self._appid
@property
def name(self):
return self._displayname
@property
def apiname(self):
return self._id
@property
def id(self):
return self._id
@cached_property(ttl=INFINITE)
def is_hidden(self):
response = APIConnection().call("ISteamUserStats",
"GetSchemaForGame",
"v2",
appid=self._appid)
for achievement in response.game.availableGameStats.achievements:
if achievement.name == self._id:
if achievement.hidden == 0:
return False
else:
return True
@cached_property(ttl=INFINITE)
def is_unlocked(self):
if self._userid is None:
raise ValueError("No Steam ID linked to this achievement!")
response = APIConnection().call("ISteamUserStats",
"GetPlayerAchievements",
"v1",
steamid=self._userid,
appid=self._appid,
l="English")
for achievement in response.playerstats.achievements:
if achievement.apiname == self._id:
if achievement.achieved == 1:
return True
else:
return False
# Cannot be found.
return False | mit |
mtreinish/os-testr | os_testr/subunit2html.py | 23 | 21784 | #!/usr/bin/python
"""
Utility to convert a subunit stream to an html results file.
Code is adapted from the pyunit Html test runner at
http://tungwaiyip.info/software/HTMLTestRunner.html
Takes two arguments. First argument is path to subunit log file, second
argument is path of desired output file. Second argument is optional,
defaults to 'results.html'.
Original HTMLTestRunner License:
------------------------------------------------------------------------
Copyright (c) 2004-2007, Wai Yip Tung
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name Wai Yip Tung nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import collections
import datetime
import io
import sys
import traceback
from xml.sax import saxutils
import subunit
import testtools
__version__ = '0.1'
class TemplateData(object):
"""
Define a HTML template for report customerization and generation.
Overall structure of an HTML report
HTML
+------------------------+
|<html> |
| <head> |
| |
| STYLESHEET |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </head> |
| |
| <body> |
| |
| HEADING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| REPORT |
| +----------------+ |
| | | |
| +----------------+ |
| |
| ENDING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </body> |
|</html> |
+------------------------+
"""
STATUS = {
0: 'pass',
1: 'fail',
2: 'error',
3: 'skip',
}
DEFAULT_TITLE = 'Unit Test Report'
DEFAULT_DESCRIPTION = ''
# ------------------------------------------------------------------------
# HTML Template
HTML_TMPL = r"""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(title)s</title>
<meta name="generator" content="%(generator)s"/>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
%(stylesheet)s
</head>
<body>
<script language="javascript" type="text/javascript"><!--
output_list = Array();
/* level - 0:Summary; 1:Failed; 2:All */
function showCase(level) {
trs = document.getElementsByTagName("tr");
for (var i = 0; i < trs.length; i++) {
tr = trs[i];
id = tr.id;
if (id.substr(0,2) == 'ft') {
if (level < 1) {
tr.className = 'hiddenRow';
}
else {
tr.className = '';
}
}
if (id.substr(0,2) == 'pt') {
if (level > 1) {
tr.className = '';
}
else {
tr.className = 'hiddenRow';
}
}
}
}
function showClassDetail(cid, count) {
var id_list = Array(count);
var toHide = 1;
for (var i = 0; i < count; i++) {
tid0 = 't' + cid.substr(1) + '.' + (i+1);
tid = 'f' + tid0;
tr = document.getElementById(tid);
if (!tr) {
tid = 'p' + tid0;
tr = document.getElementById(tid);
}
id_list[i] = tid;
if (tr.className) {
toHide = 0;
}
}
for (var i = 0; i < count; i++) {
tid = id_list[i];
if (toHide) {
document.getElementById('div_'+tid).style.display = 'none'
document.getElementById(tid).className = 'hiddenRow';
}
else {
document.getElementById(tid).className = '';
}
}
}
function showTestDetail(div_id){
var details_div = document.getElementById(div_id)
var displayState = details_div.style.display
// alert(displayState)
if (displayState != 'block' ) {
displayState = 'block'
details_div.style.display = 'block'
}
else {
details_div.style.display = 'none'
}
}
function html_escape(s) {
s = s.replace(/&/g,'&');
s = s.replace(/</g,'<');
s = s.replace(/>/g,'>');
return s;
}
/* obsoleted by detail in <div>
function showOutput(id, name) {
var w = window.open("", //url
name,
"resizable,scrollbars,status,width=800,height=450");
d = w.document;
d.write("<pre>");
d.write(html_escape(output_list[id]));
d.write("\n");
d.write("<a href='javascript:window.close()'>close</a>\n");
d.write("</pre>\n");
d.close();
}
*/
--></script>
%(heading)s
%(report)s
%(ending)s
</body>
</html>
"""
# variables: (title, generator, stylesheet, heading, report, ending)
# ------------------------------------------------------------------------
# Stylesheet
#
# alternatively use a <link> for external style sheet, e.g.
# <link rel="stylesheet" href="$url" type="text/css">
STYLESHEET_TMPL = """
<style type="text/css" media="screen">
body { font-family: verdana, arial, helvetica, sans-serif;
font-size: 80%; }
table { font-size: 100%; width: 100%;}
pre { font-size: 80%; }
/* -- heading -------------------------------------------------------------- */
h1 {
font-size: 16pt;
color: gray;
}
.heading {
margin-top: 0ex;
margin-bottom: 1ex;
}
.heading .attribute {
margin-top: 1ex;
margin-bottom: 0;
}
.heading .description {
margin-top: 4ex;
margin-bottom: 6ex;
}
/* -- css div popup -------------------------------------------------------- */
a.popup_link {
}
a.popup_link:hover {
color: red;
}
.popup_window {
display: none;
overflow-x: scroll;
/*border: solid #627173 1px; */
padding: 10px;
background-color: #E6E6D6;
font-family: "Ubuntu Mono", "Lucida Console", "Courier New", monospace;
text-align: left;
font-size: 8pt;
}
}
/* -- report --------------------------------------------------------------- */
#show_detail_line {
margin-top: 3ex;
margin-bottom: 1ex;
}
#result_table {
width: 100%;
border-collapse: collapse;
border: 1px solid #777;
}
#header_row {
font-weight: bold;
color: white;
background-color: #777;
}
#result_table td {
border: 1px solid #777;
padding: 2px;
}
#total_row { font-weight: bold; }
.passClass { background-color: #6c6; }
.failClass { background-color: #c60; }
.errorClass { background-color: #c00; }
.passCase { color: #6c6; }
.failCase { color: #c60; font-weight: bold; }
.errorCase { color: #c00; font-weight: bold; }
.hiddenRow { display: none; }
.testcase { margin-left: 2em; }
td.testname {width: 40%}
td.small {width: 40px}
/* -- ending --------------------------------------------------------------- */
#ending {
}
</style>
"""
# ------------------------------------------------------------------------
# Heading
#
HEADING_TMPL = """<div class='heading'>
<h1>%(title)s</h1>
%(parameters)s
<p class='description'>%(description)s</p>
</div>
""" # variables: (title, parameters, description)
HEADING_ATTRIBUTE_TMPL = """
<p class='attribute'><strong>%(name)s:</strong> %(value)s</p>
""" # variables: (name, value)
# ------------------------------------------------------------------------
# Report
#
REPORT_TMPL = """
<p id='show_detail_line'>Show
<a href='javascript:showCase(0)'>Summary</a>
<a href='javascript:showCase(1)'>Failed</a>
<a href='javascript:showCase(2)'>All</a>
</p>
<table id='result_table'>
<colgroup>
<col align='left' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
</colgroup>
<tr id='header_row'>
<td>Test Group/Test case</td>
<td>Count</td>
<td>Pass</td>
<td>Fail</td>
<td>Error</td>
<td>Skip</td>
<td>View</td>
<td> </td>
</tr>
%(test_list)s
<tr id='total_row'>
<td>Total</td>
<td>%(count)s</td>
<td>%(Pass)s</td>
<td>%(fail)s</td>
<td>%(error)s</td>
<td>%(skip)s</td>
<td> </td>
<td> </td>
</tr>
</table>
""" # variables: (test_list, count, Pass, fail, error)
REPORT_CLASS_TMPL = r"""
<tr class='%(style)s'>
<td class="testname">%(desc)s</td>
<td class="small">%(count)s</td>
<td class="small">%(Pass)s</td>
<td class="small">%(fail)s</td>
<td class="small">%(error)s</td>
<td class="small">%(skip)s</td>
<td class="small"><a href="javascript:showClassDetail('%(cid)s',%(count)s)"
>Detail</a></td>
<td> </td>
</tr>
""" # variables: (style, desc, count, Pass, fail, error, cid)
REPORT_TEST_WITH_OUTPUT_TMPL = r"""
<tr id='%(tid)s' class='%(Class)s'>
<td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
<td colspan='7' align='left'>
<!--css div popup start-->
<a class="popup_link" onfocus='this.blur();'
href="javascript:showTestDetail('div_%(tid)s')" >
%(status)s</a>
<div id='div_%(tid)s' class="popup_window">
<div style='text-align: right; color:red;cursor:pointer'>
<a onfocus='this.blur();'
onclick="document.getElementById('div_%(tid)s').style.display = 'none' " >
[x]</a>
</div>
<pre>
%(script)s
</pre>
</div>
<!--css div popup end-->
</td>
</tr>
""" # variables: (tid, Class, style, desc, status)
REPORT_TEST_NO_OUTPUT_TMPL = r"""
<tr id='%(tid)s' class='%(Class)s'>
<td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
<td colspan='6' align='center'>%(status)s</td>
</tr>
""" # variables: (tid, Class, style, desc, status)
REPORT_TEST_OUTPUT_TMPL = r"""
%(id)s: %(output)s
""" # variables: (id, output)
# ------------------------------------------------------------------------
# ENDING
#
ENDING_TMPL = """<div id='ending'> </div>"""
# -------------------- The end of the Template class -------------------
class ClassInfoWrapper(object):
def __init__(self, name, mod):
self.name = name
self.mod = mod
def __repr__(self):
return "%s" % (self.name)
class HtmlOutput(testtools.TestResult):
"""Output test results in html."""
def __init__(self, html_file='result.html'):
super(HtmlOutput, self).__init__()
self.success_count = 0
self.failure_count = 0
self.error_count = 0
self.skip_count = 0
self.result = []
self.html_file = html_file
def addSuccess(self, test):
self.success_count += 1
output = test.shortDescription()
if output is None:
output = test.id()
self.result.append((0, test, output, ''))
def addSkip(self, test, err):
output = test.shortDescription()
if output is None:
output = test.id()
self.skip_count += 1
self.result.append((3, test, output, ''))
def addError(self, test, err):
output = test.shortDescription()
if output is None:
output = test.id()
# Skipped tests are handled by SkipTest Exceptions.
#if err[0] == SkipTest:
# self.skip_count += 1
# self.result.append((3, test, output, ''))
else:
self.error_count += 1
_exc_str = self.formatErr(err)
self.result.append((2, test, output, _exc_str))
def addFailure(self, test, err):
print(test)
self.failure_count += 1
_exc_str = self.formatErr(err)
output = test.shortDescription()
if output is None:
output = test.id()
self.result.append((1, test, output, _exc_str))
def formatErr(self, err):
exctype, value, tb = err
return ''.join(traceback.format_exception(exctype, value, tb))
def stopTestRun(self):
super(HtmlOutput, self).stopTestRun()
self.stopTime = datetime.datetime.now()
report_attrs = self._getReportAttributes()
generator = 'subunit2html %s' % __version__
heading = self._generate_heading(report_attrs)
report = self._generate_report()
ending = self._generate_ending()
output = TemplateData.HTML_TMPL % dict(
title=saxutils.escape(TemplateData.DEFAULT_TITLE),
generator=generator,
stylesheet=TemplateData.STYLESHEET_TMPL,
heading=heading,
report=report,
ending=ending,
)
if self.html_file:
with open(self.html_file, 'wb') as html_file:
html_file.write(output.encode('utf8'))
def _getReportAttributes(self):
"""Return report attributes as a list of (name, value)."""
status = []
if self.success_count:
status.append('Pass %s' % self.success_count)
if self.failure_count:
status.append('Failure %s' % self.failure_count)
if self.error_count:
status.append('Error %s' % self.error_count)
if self.skip_count:
status.append('Skip %s' % self.skip_count)
if status:
status = ' '.join(status)
else:
status = 'none'
return [
('Status', status),
]
def _generate_heading(self, report_attrs):
a_lines = []
for name, value in report_attrs:
line = TemplateData.HEADING_ATTRIBUTE_TMPL % dict(
name=saxutils.escape(name),
value=saxutils.escape(value),
)
a_lines.append(line)
heading = TemplateData.HEADING_TMPL % dict(
title=saxutils.escape(TemplateData.DEFAULT_TITLE),
parameters=''.join(a_lines),
description=saxutils.escape(TemplateData.DEFAULT_DESCRIPTION),
)
return heading
def _generate_report(self):
rows = []
sortedResult = self._sortResult(self.result)
for cid, (cls, cls_results) in enumerate(sortedResult):
# subtotal for a class
np = nf = ne = ns = 0
for n, t, o, e in cls_results:
if n == 0:
np += 1
elif n == 1:
nf += 1
elif n == 2:
ne += 1
else:
ns += 1
# format class description
if cls.mod == "__main__":
name = cls.name
else:
name = "%s" % (cls.name)
doc = cls.__doc__ and cls.__doc__.split("\n")[0] or ""
desc = doc and '%s: %s' % (name, doc) or name
row = TemplateData.REPORT_CLASS_TMPL % dict(
style=(ne > 0 and 'errorClass' or nf > 0
and 'failClass' or 'passClass'),
desc = desc,
count = np + nf + ne + ns,
Pass = np,
fail = nf,
error = ne,
skip = ns,
cid = 'c%s' % (cid + 1),
)
rows.append(row)
for tid, (n, t, o, e) in enumerate(cls_results):
self._generate_report_test(rows, cid, tid, n, t, o, e)
report = TemplateData.REPORT_TMPL % dict(
test_list=''.join(rows),
count=str(self.success_count + self.failure_count +
self.error_count + self.skip_count),
Pass=str(self.success_count),
fail=str(self.failure_count),
error=str(self.error_count),
skip=str(self.skip_count),
)
return report
def _sortResult(self, result_list):
# unittest does not seems to run in any particular order.
# Here at least we want to group them together by class.
rmap = {}
classes = []
for n, t, o, e in result_list:
if hasattr(t, '_tests'):
for inner_test in t._tests:
self._add_cls(rmap, classes, inner_test,
(n, inner_test, o, e))
else:
self._add_cls(rmap, classes, t, (n, t, o, e))
classort = lambda s: str(s)
sortedclasses = sorted(classes, key=classort)
r = [(cls, rmap[str(cls)]) for cls in sortedclasses]
return r
def _add_cls(self, rmap, classes, test, data_tuple):
if hasattr(test, 'test'):
test = test.test
if test.__class__ == subunit.RemotedTestCase:
#print(test._RemotedTestCase__description.rsplit('.', 1)[0])
cl = test._RemotedTestCase__description.rsplit('.', 1)[0]
mod = cl.rsplit('.', 1)[0]
cls = ClassInfoWrapper(cl, mod)
else:
cls = ClassInfoWrapper(str(test.__class__), str(test.__module__))
if not str(cls) in rmap:
rmap[str(cls)] = []
classes.append(cls)
rmap[str(cls)].append(data_tuple)
def _generate_report_test(self, rows, cid, tid, n, t, o, e):
# e.g. 'pt1.1', 'ft1.1', etc
# ptx.x for passed/skipped tests and ftx.x for failed/errored tests.
has_output = bool(o or e)
tid = ((n == 0 or n == 3) and
'p' or 'f') + 't%s.%s' % (cid + 1, tid + 1)
name = t.id().split('.')[-1]
# if shortDescription is not the function name, use it
if t.shortDescription().find(name) == -1:
doc = t.shortDescription()
else:
doc = None
desc = doc and ('%s: %s' % (name, doc)) or name
tmpl = (has_output and TemplateData.REPORT_TEST_WITH_OUTPUT_TMPL
or TemplateData.REPORT_TEST_NO_OUTPUT_TMPL)
script = TemplateData.REPORT_TEST_OUTPUT_TMPL % dict(
id=tid,
output=saxutils.escape(o + e),
)
row = tmpl % dict(
tid=tid,
Class=((n == 0 or n == 3) and 'hiddenRow' or 'none'),
style=(n == 2 and 'errorCase' or
(n == 1 and 'failCase' or 'none')),
desc=desc,
script=script,
status=TemplateData.STATUS[n],
)
rows.append(row)
if not has_output:
return
def _generate_ending(self):
return TemplateData.ENDING_TMPL
def startTestRun(self):
super(HtmlOutput, self).startTestRun()
class FileAccumulator(testtools.StreamResult):
def __init__(self):
super(FileAccumulator, self).__init__()
self.route_codes = collections.defaultdict(io.BytesIO)
def status(self, **kwargs):
if kwargs.get('file_name') != 'stdout':
return
file_bytes = kwargs.get('file_bytes')
if not file_bytes:
return
route_code = kwargs.get('route_code')
stream = self.route_codes[route_code]
stream.write(file_bytes)
def main():
if len(sys.argv) < 2:
print("Need at least one argument: path to subunit log.")
exit(1)
subunit_file = sys.argv[1]
if len(sys.argv) > 2:
html_file = sys.argv[2]
else:
html_file = 'results.html'
html_result = HtmlOutput(html_file)
stream = open(subunit_file, 'rb')
# Feed the subunit stream through both a V1 and V2 parser.
# Depends on having the v2 capable libraries installed.
# First V2.
# Non-v2 content and captured non-test output will be presented as file
# segments called stdout.
suite = subunit.ByteStreamToStreamResult(stream, non_subunit_name='stdout')
# The HTML output code is in legacy mode.
result = testtools.StreamToExtendedDecorator(html_result)
# Divert non-test output
accumulator = FileAccumulator()
result = testtools.StreamResultRouter(result)
result.add_rule(accumulator, 'test_id', test_id=None)
result.startTestRun()
suite.run(result)
# Now reprocess any found stdout content as V1 subunit
for bytes_io in accumulator.route_codes.values():
bytes_io.seek(0)
suite = subunit.ProtocolTestCase(bytes_io)
suite.run(html_result)
result.stopTestRun()
if __name__ == '__main__':
main()
| apache-2.0 |
kevclarx/ansible | lib/ansible/modules/windows/win_dotnet_ngen.py | 78 | 1924 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Peter Mounce <public@neverrunwithscissors.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_dotnet_ngen
version_added: "2.0"
short_description: Runs ngen to recompile DLLs after .NET updates
description:
- After .NET framework is installed/updated, Windows will probably want to recompile things to optimise for the host.
- This happens via scheduled task, usually at some inopportune time.
- This module allows you to run this task on your own schedule, so you incur the CPU hit at some more convenient and controlled time.
- "http://blogs.msdn.com/b/dotnet/archive/2013/08/06/wondering-why-mscorsvw-exe-has-high-cpu-usage-you-can-speed-it-up.aspx"
notes:
- there are in fact two scheduled tasks for ngen but they have no triggers so aren't a problem
- there's no way to test if they've been completed (?)
- the stdout is quite likely to be several megabytes
author: Peter Mounce
options: {}
'''
EXAMPLES = r'''
# Run ngen tasks
win_dotnet_ngen:
'''
| gpl-3.0 |
knifeyspoony/pyswf | swf/geom.py | 2 | 14457 | import math
SNAP = 0.001
class Vector2(object):
def __init__(self, x=0.0, y=0.0):
self.x = x
self.y = y
class Vector3(object):
def __init__(self, x=0, y=0, z=0):
self.x = x
self.y = y
self.z = z
def clone(self):
return Vector3(self.x, self.y, self.z)
def cross(self, v1, v2):
self.x = v1.y * v2.z - v1.z * v2.y
self.y = v1.z * v2.x - v1.x * v2.z
self.z = v1.x * v2.y - v1.y * v2.x
return self
def distance(self, v):
dx = self.x - v.x
dy = self.y - v.y
dz = self.z - v.z
return math.sqrt(dx*dx + dy*dy + dz*dz)
def distanceSq(self, v):
dx = self.x - v.x
dy = self.y - v.y
dz = self.z - v.z
return (dx*dx + dy*dy + dz*dz)
def dot(self, v):
return self.x * v.x + self.y * v.y + self.z * v.z
def length(self):
return math.sqrt(self.x*self.x + self.y*self.y + self.z * self.z)
def lengthSq(self):
return (self.x*self.x + self.y*self.y + self.z * self.z)
def addScalar(self, s):
self.x += s
self.y += s
self.z += s
return self
def divScalar(self, s):
self.x /= s
self.y /= s
self.z /= s
return self
def multScalar(self, s):
self.x *= s
self.y *= s
self.z *= s
return self
def sub(self, a, b):
self.x = a.x - b.x
self.y = a.y - b.y
self.z = a.z - b.z
return self
def subScalar(self, s):
self.x -= s
self.y -= s
self.z -= s
return self
def equals(self, v, e=None):
e = SNAP if e is None else e
if v.x > self.x-e and v.x < self.x+e and \
v.y > self.y-e and v.y < self.y+e and \
v.z > self.z-e and v.z < self.z+e:
return True
else:
return False
def normalize(self):
len = self.length()
if len > 0.0:
self.multScalar(1.0 / len)
return self
def set(self, x, y, z):
self.x = x
self.y = y
self.z = z
def tostring(self):
return "%0.3f %0.3f %0.3f" % (self.x, self.y, self.z)
class Matrix2(object):
"""
Matrix2
"""
def __init__(self, a=1.0, b=0.0, c=0.0, d=1.0, tx=0.0, ty=0.0):
self.a = a
self.b = b
self.c = c
self.d = d
self.tx = tx
self.ty = ty
def append(self, a, b, c, d, tx, ty):
a1 = self.a
b1 = self.b
c1 = self.c
d1 = self.d
self.a = a*a1+b*c1
self.b = a*b1+b*d1
self.c = c*a1+d*c1
self.d = c*b1+d*d1
self.tx = tx*a1+ty*c1+self.tx
self.ty = tx*b1+ty*d1+self.ty
def append_matrix(self, m):
self.append(m.a, m.b, m.c, m.d, m.tx, m.ty)
def multiply_point(self, vec):
return [
self.a*vec[0] + self.c*vec[1] + self.tx,
self.b*vec[0] + self.d*vec[1] + self.ty
]
def prepend(self, a, b, c, d, tx, ty):
tx1 = self.tx
if (a != 1.0 or b != 0.0 or c != 0.0 or d != 1.0):
a1 = self.a
c1 = self.c
self.a = a1*a+self.b*c
self.b = a1*b+self.b*d
self.c = c1*a+self.d*c
self.d = c1*b+self.d*d
self.tx = tx1*a+self.ty*c+tx
self.ty = tx1*b+self.ty*d+ty
def prepend_matrix(self, m):
self.prepend(m.a, m.b, m.c, m.d, m.tx, m.ty)
def rotate(self, angle):
cos = math.cos(angle)
sin = math.sin(angle)
a1 = self.a
c1 = self.c
tx1 = self.tx
self.a = a1*cos-self.b*sin
self.b = a1*sin+self.b*cos
self.c = c1*cos-self.d*sin
self.d = c1*sin+self.d*cos
self.tx = tx1*cos-self.ty*sin
self.ty = tx1*sin+self.ty*cos
def scale(self, x, y):
self.a *= x;
self.d *= y;
self.tx *= x;
self.ty *= y;
def translate(self, x, y):
self.tx += x;
self.ty += y;
class Matrix4(object):
"""
Matrix4
"""
def __init__(self, data=None):
if not data is None and len(data) == 16:
self.n11 = data[0]; self.n12 = data[1]; self.n13 = data[2]; self.n14 = data[3]
self.n21 = data[4]; self.n22 = data[5]; self.n23 = data[6]; self.n24 = data[7]
self.n31 = data[8]; self.n32 = data[9]; self.n33 = data[10]; self.n34 = data[11]
self.n41 = data[12]; self.n42 = data[13]; self.n43 = data[14]; self.n44 = data[15]
else:
self.n11 = 1.0; self.n12 = 0.0; self.n13 = 0.0; self.n14 = 0.0
self.n21 = 0.0; self.n22 = 1.0; self.n23 = 0.0; self.n24 = 0.0
self.n31 = 0.0; self.n32 = 0.0; self.n33 = 1.0; self.n34 = 0.0
self.n41 = 0.0; self.n42 = 0.0; self.n43 = 0.0; self.n44 = 1.0
def clone(self):
return Matrix4(self.flatten())
def flatten(self):
return [self.n11, self.n12, self.n13, self.n14, \
self.n21, self.n22, self.n23, self.n24, \
self.n31, self.n32, self.n33, self.n34, \
self.n41, self.n42, self.n43, self.n44]
def identity(self):
self.n11 = 1.0; self.n12 = 0.0; self.n13 = 0.0; self.n14 = 0.0
self.n21 = 0.0; self.n22 = 1.0; self.n23 = 0.0; self.n24 = 0.0
self.n31 = 0.0; self.n32 = 0.0; self.n33 = 1.0; self.n34 = 0.0
self.n41 = 0.0; self.n42 = 0.0; self.n43 = 0.0; self.n44 = 1.0
return self
def multiply(self, a, b):
a11 = a.n11; a12 = a.n12; a13 = a.n13; a14 = a.n14
a21 = a.n21; a22 = a.n22; a23 = a.n23; a24 = a.n24
a31 = a.n31; a32 = a.n32; a33 = a.n33; a34 = a.n34
a41 = a.n41; a42 = a.n42; a43 = a.n43; a44 = a.n44
b11 = b.n11; b12 = b.n12; b13 = b.n13; b14 = b.n14
b21 = b.n21; b22 = b.n22; b23 = b.n23; b24 = b.n24
b31 = b.n31; b32 = b.n32; b33 = b.n33; b34 = b.n34
b41 = b.n41; b42 = b.n42; b43 = b.n43; b44 = b.n44
self.n11 = a11 * b11 + a12 * b21 + a13 * b31 + a14 * b41
self.n12 = a11 * b12 + a12 * b22 + a13 * b32 + a14 * b42
self.n13 = a11 * b13 + a12 * b23 + a13 * b33 + a14 * b43
self.n14 = a11 * b14 + a12 * b24 + a13 * b34 + a14 * b44
self.n21 = a21 * b11 + a22 * b21 + a23 * b31 + a24 * b41
self.n22 = a21 * b12 + a22 * b22 + a23 * b32 + a24 * b42
self.n23 = a21 * b13 + a22 * b23 + a23 * b33 + a24 * b43
self.n24 = a21 * b14 + a22 * b24 + a23 * b34 + a24 * b44
self.n31 = a31 * b11 + a32 * b21 + a33 * b31 + a34 * b41
self.n32 = a31 * b12 + a32 * b22 + a33 * b32 + a34 * b42
self.n33 = a31 * b13 + a32 * b23 + a33 * b33 + a34 * b43
self.n34 = a31 * b14 + a32 * b24 + a33 * b34 + a34 * b44
self.n41 = a41 * b11 + a42 * b21 + a43 * b31 + a44 * b41
self.n42 = a41 * b12 + a42 * b22 + a43 * b32 + a44 * b42
self.n43 = a41 * b13 + a42 * b23 + a43 * b33 + a44 * b43
self.n44 = a41 * b14 + a42 * b24 + a43 * b34 + a44 * b44
return self
def multiplyVector3(self, vec):
vx = vec[0]
vy = vec[1]
vz = vec[2]
d = 1.0 / (self.n41 * vx + self.n42 * vy + self.n43 * vz + self.n44)
x = (self.n11 * vx + self.n12 * vy + self.n13 * vz + self.n14) * d
y = (self.n21 * vx + self.n22 * vy + self.n23 * vz + self.n24) * d
z = (self.n31 * vx + self.n32 * vy + self.n33 * vz + self.n34) * d
return [x, y, z]
def multiplyVec3(self, vec):
vx = vec.x
vy = vec.y
vz = vec.z
d = 1.0 / (self.n41 * vx + self.n42 * vy + self.n43 * vz + self.n44)
x = (self.n11 * vx + self.n12 * vy + self.n13 * vz + self.n14) * d
y = (self.n21 * vx + self.n22 * vy + self.n23 * vz + self.n24) * d
z = (self.n31 * vx + self.n32 * vy + self.n33 * vz + self.n34) * d
return Vector3(x, y, z)
def multiplyVector4(self, v):
vx = v[0]; vy = v[1]; vz = v[2]; vw = v[3];
x = self.n11 * vx + self.n12 * vy + self.n13 * vz + self.n14 * vw;
y = self.n21 * vx + self.n22 * vy + self.n23 * vz + self.n24 * vw;
z = self.n31 * vx + self.n32 * vy + self.n33 * vz + self.n34 * vw;
w = self.n41 * vx + self.n42 * vy + self.n43 * vz + self.n44 * vw;
return [x, y, z, w];
def det(self):
#( based on http://www.euclideanspace.com/maths/algebra/matrix/functions/inverse/fourD/index.htm )
return (
self.n14 * self.n23 * self.n32 * self.n41-
self.n13 * self.n24 * self.n32 * self.n41-
self.n14 * self.n22 * self.n33 * self.n41+
self.n12 * self.n24 * self.n33 * self.n41+
self.n13 * self.n22 * self.n34 * self.n41-
self.n12 * self.n23 * self.n34 * self.n41-
self.n14 * self.n23 * self.n31 * self.n42+
self.n13 * self.n24 * self.n31 * self.n42+
self.n14 * self.n21 * self.n33 * self.n42-
self.n11 * self.n24 * self.n33 * self.n42-
self.n13 * self.n21 * self.n34 * self.n42+
self.n11 * self.n23 * self.n34 * self.n42+
self.n14 * self.n22 * self.n31 * self.n43-
self.n12 * self.n24 * self.n31 * self.n43-
self.n14 * self.n21 * self.n32 * self.n43+
self.n11 * self.n24 * self.n32 * self.n43+
self.n12 * self.n21 * self.n34 * self.n43-
self.n11 * self.n22 * self.n34 * self.n43-
self.n13 * self.n22 * self.n31 * self.n44+
self.n12 * self.n23 * self.n31 * self.n44+
self.n13 * self.n21 * self.n32 * self.n44-
self.n11 * self.n23 * self.n32 * self.n44-
self.n12 * self.n21 * self.n33 * self.n44+
self.n11 * self.n22 * self.n33 * self.n44)
def lookAt(self, eye, center, up):
x = Vector3(); y = Vector3(); z = Vector3();
z.sub(eye, center).normalize();
x.cross(up, z).normalize();
y.cross(z, x).normalize();
#eye.normalize()
self.n11 = x.x; self.n12 = x.y; self.n13 = x.z; self.n14 = -x.dot(eye);
self.n21 = y.x; self.n22 = y.y; self.n23 = y.z; self.n24 = -y.dot(eye);
self.n31 = z.x; self.n32 = z.y; self.n33 = z.z; self.n34 = -z.dot(eye);
self.n41 = 0.0; self.n42 = 0.0; self.n43 = 0.0; self.n44 = 1.0;
return self;
def multiplyScalar(self, s):
self.n11 *= s; self.n12 *= s; self.n13 *= s; self.n14 *= s;
self.n21 *= s; self.n22 *= s; self.n23 *= s; self.n24 *= s;
self.n31 *= s; self.n32 *= s; self.n33 *= s; self.n34 *= s;
self.n41 *= s; self.n42 *= s; self.n43 *= s; self.n44 *= s;
return self
@classmethod
def inverse(cls, m1):
# TODO: make this more efficient
#( based on http://www.euclideanspace.com/maths/algebra/matrix/functions/inverse/fourD/index.htm )
m2 = Matrix4();
m2.n11 = m1.n23*m1.n34*m1.n42 - m1.n24*m1.n33*m1.n42 + m1.n24*m1.n32*m1.n43 - m1.n22*m1.n34*m1.n43 - m1.n23*m1.n32*m1.n44 + m1.n22*m1.n33*m1.n44;
m2.n12 = m1.n14*m1.n33*m1.n42 - m1.n13*m1.n34*m1.n42 - m1.n14*m1.n32*m1.n43 + m1.n12*m1.n34*m1.n43 + m1.n13*m1.n32*m1.n44 - m1.n12*m1.n33*m1.n44;
m2.n13 = m1.n13*m1.n24*m1.n42 - m1.n14*m1.n23*m1.n42 + m1.n14*m1.n22*m1.n43 - m1.n12*m1.n24*m1.n43 - m1.n13*m1.n22*m1.n44 + m1.n12*m1.n23*m1.n44;
m2.n14 = m1.n14*m1.n23*m1.n32 - m1.n13*m1.n24*m1.n32 - m1.n14*m1.n22*m1.n33 + m1.n12*m1.n24*m1.n33 + m1.n13*m1.n22*m1.n34 - m1.n12*m1.n23*m1.n34;
m2.n21 = m1.n24*m1.n33*m1.n41 - m1.n23*m1.n34*m1.n41 - m1.n24*m1.n31*m1.n43 + m1.n21*m1.n34*m1.n43 + m1.n23*m1.n31*m1.n44 - m1.n21*m1.n33*m1.n44;
m2.n22 = m1.n13*m1.n34*m1.n41 - m1.n14*m1.n33*m1.n41 + m1.n14*m1.n31*m1.n43 - m1.n11*m1.n34*m1.n43 - m1.n13*m1.n31*m1.n44 + m1.n11*m1.n33*m1.n44;
m2.n23 = m1.n14*m1.n23*m1.n41 - m1.n13*m1.n24*m1.n41 - m1.n14*m1.n21*m1.n43 + m1.n11*m1.n24*m1.n43 + m1.n13*m1.n21*m1.n44 - m1.n11*m1.n23*m1.n44;
m2.n24 = m1.n13*m1.n24*m1.n31 - m1.n14*m1.n23*m1.n31 + m1.n14*m1.n21*m1.n33 - m1.n11*m1.n24*m1.n33 - m1.n13*m1.n21*m1.n34 + m1.n11*m1.n23*m1.n34;
m2.n31 = m1.n22*m1.n34*m1.n41 - m1.n24*m1.n32*m1.n41 + m1.n24*m1.n31*m1.n42 - m1.n21*m1.n34*m1.n42 - m1.n22*m1.n31*m1.n44 + m1.n21*m1.n32*m1.n44;
m2.n32 = m1.n14*m1.n32*m1.n41 - m1.n12*m1.n34*m1.n41 - m1.n14*m1.n31*m1.n42 + m1.n11*m1.n34*m1.n42 + m1.n12*m1.n31*m1.n44 - m1.n11*m1.n32*m1.n44;
m2.n33 = m1.n13*m1.n24*m1.n41 - m1.n14*m1.n22*m1.n41 + m1.n14*m1.n21*m1.n42 - m1.n11*m1.n24*m1.n42 - m1.n12*m1.n21*m1.n44 + m1.n11*m1.n22*m1.n44;
m2.n34 = m1.n14*m1.n22*m1.n31 - m1.n12*m1.n24*m1.n31 - m1.n14*m1.n21*m1.n32 + m1.n11*m1.n24*m1.n32 + m1.n12*m1.n21*m1.n34 - m1.n11*m1.n22*m1.n34;
m2.n41 = m1.n23*m1.n32*m1.n41 - m1.n22*m1.n33*m1.n41 - m1.n23*m1.n31*m1.n42 + m1.n21*m1.n33*m1.n42 + m1.n22*m1.n31*m1.n43 - m1.n21*m1.n32*m1.n43;
m2.n42 = m1.n12*m1.n33*m1.n41 - m1.n13*m1.n32*m1.n41 + m1.n13*m1.n31*m1.n42 - m1.n11*m1.n33*m1.n42 - m1.n12*m1.n31*m1.n43 + m1.n11*m1.n32*m1.n43;
m2.n43 = m1.n13*m1.n22*m1.n41 - m1.n12*m1.n23*m1.n41 - m1.n13*m1.n21*m1.n42 + m1.n11*m1.n23*m1.n42 + m1.n12*m1.n21*m1.n43 - m1.n11*m1.n22*m1.n43;
m2.n44 = m1.n12*m1.n23*m1.n31 - m1.n13*m1.n22*m1.n31 + m1.n13*m1.n21*m1.n32 - m1.n11*m1.n23*m1.n32 - m1.n12*m1.n21*m1.n33 + m1.n11*m1.n22*m1.n33;
m2.multiplyScalar(1.0 / m1.det());
return m2;
@classmethod
def rotationMatrix(cls, x, y, z, angle):
rot = Matrix4()
c = math.cos(angle)
s = math.sin(angle)
t = 1 - c
rot.n11 = t * x * x + c
rot.n12 = t * x * y - s * z
rot.n13 = t * x * z + s * y
rot.n21 = t * x * y + s * z
rot.n22 = t * y * y + c
rot.n23 = t * y * z - s * x
rot.n31 = t * x * z - s * y
rot.n32 = t * y * z + s * x
rot.n33 = t * z * z + c
return rot
@classmethod
def scaleMatrix(cls, x, y, z):
m = Matrix4()
m.n11 = x
m.n22 = y
m.n33 = z
return m
@classmethod
def translationMatrix(cls, x, y, z):
m = Matrix4()
m.n14 = x
m.n24 = y
m.n34 = z
return m
| mit |
YOTOV-LIMITED/mysql-server | storage/ndb/mcc/config_parser.py | 32 | 4606 | # Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
""" Tools for parsing and extracting information from cluster config.ini files. """
import StringIO
import ConfigParser
def parse_cluster_config_ini(path):
with open(path) as ini:
return parse_cluster_conifg_ini_(ini)
def parse_cluster_conifg_ini_(ini):
c = []
s = None
for l in map(str.rstrip, ini):
if l == '' or l.startswith('#'):
continue
if l.startswith('['):
s = { 'name': l[1:-1], 'options': {} }
c.append(s)
continue
(k,v) = l.split('=', 1)
s['options'][k] = v
return c
def write_cluster_config_ini(c):
return '\n'.join([ '[{0}]\n{1}\n'.format(s['name'], '\n'.join(['{0}={1}'.format(k, s['options'][k]) for k in s['options'].keys()])) for s in c ])
def parse_cluster_config_ini_x(path):
c = {}
with open(path) as ini:
key = None
opts = {}
for l in map(str.rstrip, ini):
if l == '' or l.startswith('#'):
continue
if l.startswith('['):
if key is not None:
c[key] = opts
key = l
continue
(k,v) = l.split('=', 1)
opts[k] = v
if k == 'NodeId':
key = (key, v)
return c
# Below is deprecated
def parse_config_ini(path):
ini = open(path)
buf = StringIO.StringIO()
sections = {}
for l in map(str.rstrip, ini):
if not 'DEFAULT' in l and len(l) > 2 and l[0] == '[' and l[-1] == ']':
section = l[1:-1]
n = 0
if sections.has_key(section):
n = sections[section] + 1
sections[section] = n
buf.write('[' + section + '_' + repr(n) + ']\n')
else:
buf.write(l+'\n')
ini.close()
buf.seek(0)
cp = ConfigParser.ConfigParser()
cp.optionxform = str
cp.readfp(buf)
buf.close()
return cp
def get_option_value_set(cp, option):
return set([cp.get(s, option) for s in filter(lambda s: cp.has_option(s, option), cp.sections())])
def get_node_dicts(cp, portbase):
node_sections = filter(lambda s: cp.has_option(s, 'NodeId') and not 'API' in s, cp.sections())
ndicts = []
for ns in node_sections:
t = '_'.join(ns.split('_')[:-1])
nalist = [('_NodeType', t.lower())]
if t == 'MYSQLD':
nalist += [('_MysqlPort', portbase)]
portbase += 1
if cp.has_section(t+' DEFAULT'):
nalist += cp.items(t+' DEFAULT')
nalist += cp.items(ns)
ndicts.append(dict(nalist))
return ndicts
def get_actual_section(s):
if 'DEFAULT' in s:
return s
return '_'.join(s.split('_')[:-1])
def get_proct1(s):
if 'DEFAULT' in s:
s = s.rstrip(' DEFAULT')
else:
s = '_'.join(s.split('_')[:-1])
return s.lower()
def get_pid1(cp, s):
if cp.has_option(s,'NodeId'):
return cp.get(s, 'NodeId')
return None
def get_ndbconnecturl(cp):
return ','.join(["{0}:{1}".format(cp.get(s, 'HostName'), cp.get(s, 'PortNumber') ) for s in filter(lambda se: 'NDB_MGMD_' in se, cp.sections())])
def get_configvalues(cp):
return [ {'section': get_actual_section(s), 'key': k, 'proct1': get_proct1(s), 'pid1': get_pid1(cp,s), 'proct2':None, 'pid2':None, 'val': v} for s in cp.sections() for (k,v) in cp.items(s) ]
def get_processes(cp):
return [ {'desired_process_status': 0,
'restartlevel': 0,
'xtraoptions': 0,
'processname': get_proct1(s),
'internalid': get_pid1(cp,s),
'package': {},
'hostaddress': cp.get(s, 'HostName'),
'configfilepath':None,
'ndbconnecturl': get_ndbconnecturl(cp)} for s in filter(lambda s: cp.has_option(s, 'NodeId') and not 'API' in s, cp.sections()) ]
| gpl-2.0 |
ARM-software/lisa | external/devlib/devlib/connection.py | 3 | 15639 | # Copyright 2019 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABC, abstractmethod
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from weakref import WeakSet
from shlex import quote
from time import monotonic
import os
import signal
import socket
import subprocess
import threading
import time
import logging
from devlib.utils.misc import InitCheckpoint
_KILL_TIMEOUT = 3
def _kill_pgid_cmd(pgid, sig):
return 'kill -{} -{}'.format(sig.value, pgid)
class ConnectionBase(InitCheckpoint):
"""
Base class for all connections.
"""
def __init__(self):
self._current_bg_cmds = WeakSet()
self._closed = False
self._close_lock = threading.Lock()
self.busybox = None
def cancel_running_command(self):
bg_cmds = set(self._current_bg_cmds)
for bg_cmd in bg_cmds:
bg_cmd.cancel()
@abstractmethod
def _close(self):
"""
Close the connection.
The public :meth:`close` method makes sure that :meth:`_close` will
only be called once, and will serialize accesses to it if it happens to
be called from multiple threads at once.
"""
def close(self):
# Locking the closing allows any thread to safely call close() as long
# as the connection can be closed from a thread that is not the one it
# started its life in.
with self._close_lock:
if not self._closed:
self._close()
self._closed = True
# Ideally, that should not be relied upon but that will improve the chances
# of the connection being properly cleaned up when it's not in use anymore.
def __del__(self):
# Since __del__ will be called if an exception is raised in __init__
# (e.g. we cannot connect), we only run close() when we are sure
# __init__ has completed successfully.
if self.initialized:
self.close()
class BackgroundCommand(ABC):
"""
Allows managing a running background command using a subset of the
:class:`subprocess.Popen` API.
Instances of this class can be used as context managers, with the same
semantic as :class:`subprocess.Popen`.
"""
@abstractmethod
def send_signal(self, sig):
"""
Send a POSIX signal to the background command's process group ID
(PGID).
:param signal: Signal to send.
:type signal: signal.Signals
"""
def kill(self):
"""
Send SIGKILL to the background command.
"""
self.send_signal(signal.SIGKILL)
def cancel(self, kill_timeout=_KILL_TIMEOUT):
"""
Try to gracefully terminate the process by sending ``SIGTERM``, then
waiting for ``kill_timeout`` to send ``SIGKILL``.
"""
if self.poll() is None:
self._cancel(kill_timeout=kill_timeout)
@abstractmethod
def _cancel(self, kill_timeout):
"""
Method to override in subclasses to implement :meth:`cancel`.
"""
pass
@abstractmethod
def wait(self):
"""
Block until the background command completes, and return its exit code.
"""
@abstractmethod
def poll(self):
"""
Return exit code if the command has exited, None otherwise.
"""
@property
@abstractmethod
def stdin(self):
"""
File-like object connected to the background's command stdin.
"""
@property
@abstractmethod
def stdout(self):
"""
File-like object connected to the background's command stdout.
"""
@property
@abstractmethod
def stderr(self):
"""
File-like object connected to the background's command stderr.
"""
@property
@abstractmethod
def pid(self):
"""
Process Group ID (PGID) of the background command.
Since the command is usually wrapped in shell processes for IO
redirections, sudo etc, the PID cannot be assumed to be the actual PID
of the command passed by the user. It's is guaranteed to be a PGID
instead, which means signals sent to it as such will target all
subprocesses involved in executing that command.
"""
@abstractmethod
def close(self):
"""
Close all opened streams and then wait for command completion.
:returns: Exit code of the command.
.. note:: If the command is writing to its stdout/stderr, it might be
blocked on that and die when the streams are closed.
"""
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
class PopenBackgroundCommand(BackgroundCommand):
"""
:class:`subprocess.Popen`-based background command.
"""
def __init__(self, popen):
self.popen = popen
def send_signal(self, sig):
return os.killpg(self.popen.pid, sig)
@property
def stdin(self):
return self.popen.stdin
@property
def stdout(self):
return self.popen.stdout
@property
def stderr(self):
return self.popen.stderr
@property
def pid(self):
return self.popen.pid
def wait(self):
return self.popen.wait()
def poll(self):
return self.popen.poll()
def _cancel(self, kill_timeout):
popen = self.popen
os.killpg(os.getpgid(popen.pid), signal.SIGTERM)
try:
popen.wait(timeout=kill_timeout)
except subprocess.TimeoutExpired:
os.killpg(os.getpgid(popen.pid), signal.SIGKILL)
def close(self):
self.popen.__exit__(None, None, None)
return self.popen.returncode
def __enter__(self):
self.popen.__enter__()
return self
def __exit__(self, *args, **kwargs):
self.popen.__exit__(*args, **kwargs)
class ParamikoBackgroundCommand(BackgroundCommand):
"""
:mod:`paramiko`-based background command.
"""
def __init__(self, conn, chan, pid, as_root, stdin, stdout, stderr, redirect_thread):
self.chan = chan
self.as_root = as_root
self.conn = conn
self._pid = pid
self._stdin = stdin
self._stdout = stdout
self._stderr = stderr
self.redirect_thread = redirect_thread
def send_signal(self, sig):
# If the command has already completed, we don't want to send a signal
# to another process that might have gotten that PID in the meantime.
if self.poll() is not None:
return
# Use -PGID to target a process group rather than just the process
# itself
cmd = _kill_pgid_cmd(self.pid, sig)
self.conn.execute(cmd, as_root=self.as_root)
@property
def pid(self):
return self._pid
def wait(self):
return self.chan.recv_exit_status()
def poll(self):
if self.chan.exit_status_ready():
return self.wait()
else:
return None
def _cancel(self, kill_timeout):
self.send_signal(signal.SIGTERM)
# Check if the command terminated quickly
time.sleep(10e-3)
# Otherwise wait for the full timeout and kill it
if self.poll() is None:
time.sleep(kill_timeout)
self.send_signal(signal.SIGKILL)
self.wait()
@property
def stdin(self):
return self._stdin
@property
def stdout(self):
return self._stdout
@property
def stderr(self):
return self._stderr
def close(self):
for x in (self.stdin, self.stdout, self.stderr):
if x is not None:
x.close()
exit_code = self.wait()
thread = self.redirect_thread
if thread:
thread.join()
return exit_code
class AdbBackgroundCommand(BackgroundCommand):
"""
``adb``-based background command.
"""
def __init__(self, conn, adb_popen, pid, as_root):
self.conn = conn
self.as_root = as_root
self.adb_popen = adb_popen
self._pid = pid
def send_signal(self, sig):
self.conn.execute(
_kill_pgid_cmd(self.pid, sig),
as_root=self.as_root,
)
@property
def stdin(self):
return self.adb_popen.stdin
@property
def stdout(self):
return self.adb_popen.stdout
@property
def stderr(self):
return self.adb_popen.stderr
@property
def pid(self):
return self._pid
def wait(self):
return self.adb_popen.wait()
def poll(self):
return self.adb_popen.poll()
def _cancel(self, kill_timeout):
self.send_signal(signal.SIGTERM)
try:
self.adb_popen.wait(timeout=kill_timeout)
except subprocess.TimeoutExpired:
self.send_signal(signal.SIGKILL)
self.adb_popen.kill()
def close(self):
self.adb_popen.__exit__(None, None, None)
return self.adb_popen.returncode
def __enter__(self):
self.adb_popen.__enter__()
return self
def __exit__(self, *args, **kwargs):
self.adb_popen.__exit__(*args, **kwargs)
class TransferManagerBase(ABC):
def _pull_dest_size(self, dest):
if os.path.isdir(dest):
return sum(
os.stat(os.path.join(dirpath, f)).st_size
for dirpath, _, fnames in os.walk(dest)
for f in fnames
)
else:
return os.stat(dest).st_size
return 0
def _push_dest_size(self, dest):
cmd = '{} du -s {}'.format(quote(self.conn.busybox), quote(dest))
out = self.conn.execute(cmd)
try:
return int(out.split()[0])
except ValueError:
return 0
def __init__(self, conn, poll_period, start_transfer_poll_delay, total_timeout):
self.conn = conn
self.poll_period = poll_period
self.total_timeout = total_timeout
self.start_transfer_poll_delay = start_transfer_poll_delay
self.logger = logging.getLogger('FileTransfer')
self.managing = threading.Event()
self.transfer_started = threading.Event()
self.transfer_completed = threading.Event()
self.transfer_aborted = threading.Event()
self.monitor_thread = None
self.sources = None
self.dest = None
self.direction = None
@abstractmethod
def _cancel(self):
pass
def cancel(self, reason=None):
msg = 'Cancelling file transfer {} -> {}'.format(self.sources, self.dest)
if reason is not None:
msg += ' due to \'{}\''.format(reason)
self.logger.warning(msg)
self.transfer_aborted.set()
self._cancel()
@abstractmethod
def isactive(self):
pass
@contextmanager
def manage(self, sources, dest, direction):
try:
self.sources, self.dest, self.direction = sources, dest, direction
m_thread = threading.Thread(target=self._monitor)
self.transfer_completed.clear()
self.transfer_aborted.clear()
self.transfer_started.set()
m_thread.start()
yield self
except BaseException:
self.cancel(reason='exception during transfer')
raise
finally:
self.transfer_completed.set()
self.transfer_started.set()
m_thread.join()
self.transfer_started.clear()
self.transfer_completed.clear()
self.transfer_aborted.clear()
def _monitor(self):
start_t = monotonic()
self.transfer_completed.wait(self.start_transfer_poll_delay)
while not self.transfer_completed.wait(self.poll_period):
if not self.isactive():
self.cancel(reason='transfer inactive')
elif monotonic() - start_t > self.total_timeout:
self.cancel(reason='transfer timed out')
class PopenTransferManager(TransferManagerBase):
def __init__(self, conn, poll_period=30, start_transfer_poll_delay=30, total_timeout=3600):
super().__init__(conn, poll_period, start_transfer_poll_delay, total_timeout)
self.transfer = None
self.last_sample = None
def _cancel(self):
if self.transfer:
self.transfer.cancel()
self.transfer = None
self.last_sample = None
def isactive(self):
size_fn = self._push_dest_size if self.direction == 'push' else self._pull_dest_size
curr_size = size_fn(self.dest)
self.logger.debug('Polled file transfer, destination size {}'.format(curr_size))
active = True if self.last_sample is None else curr_size > self.last_sample
self.last_sample = curr_size
return active
def set_transfer_and_wait(self, popen_bg_cmd):
self.transfer = popen_bg_cmd
self.last_sample = None
ret = self.transfer.wait()
if ret and not self.transfer_aborted.is_set():
raise subprocess.CalledProcessError(ret, self.transfer.popen.args)
elif self.transfer_aborted.is_set():
raise TimeoutError(self.transfer.popen.args)
class SSHTransferManager(TransferManagerBase):
def __init__(self, conn, poll_period=30, start_transfer_poll_delay=30, total_timeout=3600):
super().__init__(conn, poll_period, start_transfer_poll_delay, total_timeout)
self.transferer = None
self.progressed = False
self.transferred = None
self.to_transfer = None
def _cancel(self):
self.transferer.close()
def isactive(self):
progressed = self.progressed
self.progressed = False
msg = 'Polled transfer: {}% [{}B/{}B]'
pc = format((self.transferred / self.to_transfer) * 100, '.2f')
self.logger.debug(msg.format(pc, self.transferred, self.to_transfer))
return progressed
@contextmanager
def manage(self, sources, dest, direction, transferer):
with super().manage(sources, dest, direction):
try:
self.progressed = False
self.transferer = transferer # SFTPClient or SCPClient
yield self
except socket.error as e:
if self.transfer_aborted.is_set():
self.transfer_aborted.clear()
method = 'SCP' if self.conn.use_scp else 'SFTP'
raise TimeoutError('{} {}: {} -> {}'.format(method, self.direction, sources, self.dest))
else:
raise e
def progress_cb(self, *args):
if self.transfer_started.is_set():
self.progressed = True
if len(args) == 3: # For SCPClient callbacks
self.transferred = args[2]
self.to_transfer = args[1]
elif len(args) == 2: # For SFTPClient callbacks
self.transferred = args[0]
self.to_transfer = args[1]
| apache-2.0 |
Vimos/scikit-learn | benchmarks/bench_covertype.py | 57 | 7378 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
# Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
gennad/Django-nonrel-stub-for-Google-App-Engine | django/contrib/comments/views/comments.py | 306 | 5359 | from django import http
from django.conf import settings
from utils import next_redirect, confirmation_view
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db import models
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.html import escape
from django.views.decorators.http import require_POST
from django.contrib import comments
from django.contrib.comments import signals
from django.views.decorators.csrf import csrf_protect
class CommentPostBadRequest(http.HttpResponseBadRequest):
"""
Response returned when a comment post is invalid. If ``DEBUG`` is on a
nice-ish error message will be displayed (for debugging purposes), but in
production mode a simple opaque 400 page will be displayed.
"""
def __init__(self, why):
super(CommentPostBadRequest, self).__init__()
if settings.DEBUG:
self.content = render_to_string("comments/400-debug.html", {"why": why})
@csrf_protect
@require_POST
def post_comment(request, next=None, using=None):
"""
Post a comment.
HTTP POST is required. If ``POST['submit'] == "preview"`` or if there are
errors a preview template, ``comments/preview.html``, will be rendered.
"""
# Fill out some initial data fields from an authenticated user, if present
data = request.POST.copy()
if request.user.is_authenticated():
if not data.get('name', ''):
data["name"] = request.user.get_full_name() or request.user.username
if not data.get('email', ''):
data["email"] = request.user.email
# Check to see if the POST data overrides the view's next argument.
next = data.get("next", next)
# Look up the object we're trying to comment about
ctype = data.get("content_type")
object_pk = data.get("object_pk")
if ctype is None or object_pk is None:
return CommentPostBadRequest("Missing content_type or object_pk field.")
try:
model = models.get_model(*ctype.split(".", 1))
target = model._default_manager.using(using).get(pk=object_pk)
except TypeError:
return CommentPostBadRequest(
"Invalid content_type value: %r" % escape(ctype))
except AttributeError:
return CommentPostBadRequest(
"The given content-type %r does not resolve to a valid model." % \
escape(ctype))
except ObjectDoesNotExist:
return CommentPostBadRequest(
"No object matching content-type %r and object PK %r exists." % \
(escape(ctype), escape(object_pk)))
except (ValueError, ValidationError), e:
return CommentPostBadRequest(
"Attempting go get content-type %r and object PK %r exists raised %s" % \
(escape(ctype), escape(object_pk), e.__class__.__name__))
# Do we want to preview the comment?
preview = "preview" in data
# Construct the comment form
form = comments.get_form()(target, data=data)
# Check security information
if form.security_errors():
return CommentPostBadRequest(
"The comment form failed security verification: %s" % \
escape(str(form.security_errors())))
# If there are errors or if we requested a preview show the comment
if form.errors or preview:
template_list = [
# These first two exist for purely historical reasons.
# Django v1.0 and v1.1 allowed the underscore format for
# preview templates, so we have to preserve that format.
"comments/%s_%s_preview.html" % (model._meta.app_label, model._meta.module_name),
"comments/%s_preview.html" % model._meta.app_label,
# Now the usual directory based template heirarchy.
"comments/%s/%s/preview.html" % (model._meta.app_label, model._meta.module_name),
"comments/%s/preview.html" % model._meta.app_label,
"comments/preview.html",
]
return render_to_response(
template_list, {
"comment" : form.data.get("comment", ""),
"form" : form,
"next": next,
},
RequestContext(request, {})
)
# Otherwise create the comment
comment = form.get_comment_object()
comment.ip_address = request.META.get("REMOTE_ADDR", None)
if request.user.is_authenticated():
comment.user = request.user
# Signal that the comment is about to be saved
responses = signals.comment_will_be_posted.send(
sender = comment.__class__,
comment = comment,
request = request
)
for (receiver, response) in responses:
if response == False:
return CommentPostBadRequest(
"comment_will_be_posted receiver %r killed the comment" % receiver.__name__)
# Save the comment and signal that it was saved
comment.save()
signals.comment_was_posted.send(
sender = comment.__class__,
comment = comment,
request = request
)
return next_redirect(data, next, comment_done, c=comment._get_pk_val())
comment_done = confirmation_view(
template = "comments/posted.html",
doc = """Display a "comment was posted" success page."""
)
| bsd-3-clause |
potatolondon/django-nonrel-1-4 | django/contrib/formtools/tests/wizard/forms.py | 90 | 7471 | from django import forms, http
from django.conf import settings
from django.test import TestCase
from django.template.response import TemplateResponse
from django.utils.importlib import import_module
from django.contrib.auth.models import User
from django.contrib.formtools.wizard.views import (WizardView,
SessionWizardView,
CookieWizardView)
class DummyRequest(http.HttpRequest):
def __init__(self, POST=None):
super(DummyRequest, self).__init__()
self.method = POST and "POST" or "GET"
if POST is not None:
self.POST.update(POST)
self.session = {}
self._dont_enforce_csrf_checks = True
def get_request(*args, **kwargs):
request = DummyRequest(*args, **kwargs)
engine = import_module(settings.SESSION_ENGINE)
request.session = engine.SessionStore(None)
return request
class Step1(forms.Form):
name = forms.CharField()
class Step2(forms.Form):
name = forms.CharField()
class Step3(forms.Form):
data = forms.CharField()
class CustomKwargsStep1(Step1):
def __init__(self, test=None, *args, **kwargs):
self.test = test
return super(CustomKwargsStep1, self).__init__(*args, **kwargs)
class UserForm(forms.ModelForm):
class Meta:
model = User
UserFormSet = forms.models.modelformset_factory(User, form=UserForm, extra=2)
class TestWizard(WizardView):
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
def dispatch(self, request, *args, **kwargs):
response = super(TestWizard, self).dispatch(request, *args, **kwargs)
return response, self
def get_form_kwargs(self, step, *args, **kwargs):
kwargs = super(TestWizard, self).get_form_kwargs(step, *args, **kwargs)
if step == 'kwargs_test':
kwargs['test'] = True
return kwargs
class FormTests(TestCase):
def test_form_init(self):
testform = TestWizard.get_initkwargs([Step1, Step2])
self.assertEqual(testform['form_list'], {u'0': Step1, u'1': Step2})
testform = TestWizard.get_initkwargs([('start', Step1), ('step2', Step2)])
self.assertEqual(
testform['form_list'], {u'start': Step1, u'step2': Step2})
testform = TestWizard.get_initkwargs([Step1, Step2, ('finish', Step3)])
self.assertEqual(
testform['form_list'], {u'0': Step1, u'1': Step2, u'finish': Step3})
def test_first_step(self):
request = get_request()
testform = TestWizard.as_view([Step1, Step2])
response, instance = testform(request)
self.assertEqual(instance.steps.current, u'0')
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(instance.steps.current, 'start')
def test_persistence(self):
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
request = get_request({'test_wizard-current_step': 'start',
'name': 'data1'})
response, instance = testform(request)
self.assertEqual(instance.steps.current, 'start')
instance.storage.current_step = 'step2'
testform2 = TestWizard.as_view([('start', Step1), ('step2', Step2)])
request.POST = {'test_wizard-current_step': 'step2'}
response, instance = testform2(request)
self.assertEqual(instance.steps.current, 'step2')
def test_form_condition(self):
request = get_request()
testform = TestWizard.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)],
condition_dict={'step2': True})
response, instance = testform(request)
self.assertEqual(instance.get_next_step(), 'step2')
testform = TestWizard.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)],
condition_dict={'step2': False})
response, instance = testform(request)
self.assertEqual(instance.get_next_step(), 'step3')
def test_form_kwargs(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1),
('kwargs_test', CustomKwargsStep1)])
response, instance = testform(request)
self.assertEqual(instance.get_form_kwargs('start'), {})
self.assertEqual(instance.get_form_kwargs('kwargs_test'), {'test': True})
self.assertEqual(instance.get_form('kwargs_test').test, True)
def test_form_prefix(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(instance.get_form_prefix(), 'start')
self.assertEqual(instance.get_form_prefix('another'), 'another')
def test_form_initial(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)],
initial_dict={'start': {'name': 'value1'}})
response, instance = testform(request)
self.assertEqual(instance.get_form_initial('start'), {'name': 'value1'})
self.assertEqual(instance.get_form_initial('step2'), {})
def test_form_instance(self):
request = get_request()
the_instance = User()
testform = TestWizard.as_view([('start', UserForm), ('step2', Step2)],
instance_dict={'start': the_instance})
response, instance = testform(request)
self.assertEqual(
instance.get_form_instance('start'),
the_instance)
self.assertEqual(
instance.get_form_instance('non_exist_instance'),
None)
def test_formset_instance(self):
request = get_request()
the_instance1, created = User.objects.get_or_create(
username='testuser1')
the_instance2, created = User.objects.get_or_create(
username='testuser2')
testform = TestWizard.as_view([('start', UserFormSet), ('step2', Step2)],
instance_dict={'start': User.objects.filter(username='testuser1')})
response, instance = testform(request)
self.assertEqual(list(instance.get_form_instance('start')), [the_instance1])
self.assertEqual(instance.get_form_instance('non_exist_instance'), None)
self.assertEqual(instance.get_form().initial_form_count(), 1)
def test_done(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertRaises(NotImplementedError, instance.done, None)
def test_revalidation(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
instance.render_done(None)
self.assertEqual(instance.storage.current_step, 'start')
class SessionFormTests(TestCase):
def test_init(self):
request = get_request()
testform = SessionWizardView.as_view([('start', Step1)])
self.assertTrue(isinstance(testform(request), TemplateResponse))
class CookieFormTests(TestCase):
def test_init(self):
request = get_request()
testform = CookieWizardView.as_view([('start', Step1)])
self.assertTrue(isinstance(testform(request), TemplateResponse))
| bsd-3-clause |
jymannob/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/screencast.py | 12 | 4228 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
compat_parse_qs,
compat_urllib_request,
)
class ScreencastIE(InfoExtractor):
_VALID_URL = r'https?://www\.screencast\.com/t/(?P<id>[a-zA-Z0-9]+)'
_TESTS = [{
'url': 'http://www.screencast.com/t/3ZEjQXlT',
'md5': '917df1c13798a3e96211dd1561fded83',
'info_dict': {
'id': '3ZEjQXlT',
'ext': 'm4v',
'title': 'Color Measurement with Ocean Optics Spectrometers',
'description': 'md5:240369cde69d8bed61349a199c5fb153',
'thumbnail': 're:^https?://.*\.(?:gif|jpg)$',
}
}, {
'url': 'http://www.screencast.com/t/V2uXehPJa1ZI',
'md5': 'e8e4b375a7660a9e7e35c33973410d34',
'info_dict': {
'id': 'V2uXehPJa1ZI',
'ext': 'mov',
'title': 'The Amadeus Spectrometer',
'description': 're:^In this video, our friends at.*To learn more about Amadeus, visit',
'thumbnail': 're:^https?://.*\.(?:gif|jpg)$',
}
}, {
'url': 'http://www.screencast.com/t/aAB3iowa',
'md5': 'dedb2734ed00c9755761ccaee88527cd',
'info_dict': {
'id': 'aAB3iowa',
'ext': 'mp4',
'title': 'Google Earth Export',
'description': 'Provides a demo of a CommunityViz export to Google Earth, one of the 3D viewing options.',
'thumbnail': 're:^https?://.*\.(?:gif|jpg)$',
}
}, {
'url': 'http://www.screencast.com/t/X3ddTrYh',
'md5': '669ee55ff9c51988b4ebc0877cc8b159',
'info_dict': {
'id': 'X3ddTrYh',
'ext': 'wmv',
'title': 'Toolkit 6 User Group Webinar (2014-03-04) - Default Judgment and First Impression',
'description': 'md5:7b9f393bc92af02326a5c5889639eab0',
'thumbnail': 're:^https?://.*\.(?:gif|jpg)$',
}
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_regex(
r'<embed name="Video".*?src="([^"]+)"', webpage,
'QuickTime embed', default=None)
if video_url is None:
flash_vars_s = self._html_search_regex(
r'<param name="flashVars" value="([^"]+)"', webpage, 'flash vars',
default=None)
if not flash_vars_s:
flash_vars_s = self._html_search_regex(
r'<param name="initParams" value="([^"]+)"', webpage, 'flash vars',
default=None)
if flash_vars_s:
flash_vars_s = flash_vars_s.replace(',', '&')
if flash_vars_s:
flash_vars = compat_parse_qs(flash_vars_s)
video_url_raw = compat_urllib_request.quote(
flash_vars['content'][0])
video_url = video_url_raw.replace('http%3A', 'http:')
if video_url is None:
video_meta = self._html_search_meta(
'og:video', webpage, default=None)
if video_meta:
video_url = self._search_regex(
r'src=(.*?)(?:$|&)', video_meta,
'meta tag video URL', default=None)
if video_url is None:
raise ExtractorError('Cannot find video')
title = self._og_search_title(webpage, default=None)
if title is None:
title = self._html_search_regex(
[r'<b>Title:</b> ([^<]*)</div>',
r'class="tabSeperator">></span><span class="tabText">(.*?)<'],
webpage, 'title')
thumbnail = self._og_search_thumbnail(webpage)
description = self._og_search_description(webpage, default=None)
if description is None:
description = self._html_search_meta('description', webpage)
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
| gpl-3.0 |
abzaloid/maps | django-project/bin/activate_this.py | 1076 | 1137 | """By using execfile(this_file, dict(__file__=this_file)) you will
activate this virtualenv environment.
This can be used when you must use an existing Python interpreter, not
the virtualenv bin/python
"""
try:
__file__
except NameError:
raise AssertionError(
"You must run this like execfile('path/to/activate_this.py', dict(__file__='path/to/activate_this.py'))")
import sys
import os
old_os_path = os.environ.get('PATH', '')
os.environ['PATH'] = os.path.dirname(os.path.abspath(__file__)) + os.pathsep + old_os_path
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if sys.platform == 'win32':
site_packages = os.path.join(base, 'Lib', 'site-packages')
else:
site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages')
prev_sys_path = list(sys.path)
import site
site.addsitedir(site_packages)
sys.real_prefix = sys.prefix
sys.prefix = base
# Move the added items to the front of the path:
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
| mit |
shinken-debian-modules/shinken-mod-perfdata-host | module/module.py | 2 | 4305 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# This Class is a plugin for the Shinken Broker. It is in charge
# to brok information of the service perfdata into the file
# var/service-perfdata
# So it just manage the service_check_return
# Maybe one day host data will be useful too
# It will need just a new file, and a new manager :)
import codecs
from shinken.basemodule import BaseModule
from shinken.log import logger
properties = {
'daemons': ['broker'],
'type': 'host_perfdata',
'phases': ['running'],
}
# called by the plugin manager to get a broker
def get_instance(plugin):
logger.info("[Host Perfdata Broker] Get a Host Perfdata broker for plugin %s" % plugin.get_name())
# Catch errors
path = plugin.path
if hasattr(plugin, 'mode'):
mode = plugin.mode
else:
mode = 'a'
if hasattr(plugin, 'template'):
template = plugin.template
else:
template = "$LASTHOSTCHECK$\t$HOSTNAME$\t$HOSTOUTPUT$\t$HOSTSTATE$\t$HOSTPERFDATA$\n"
# int(data['last_chk']),data['host_name'], data['service_description'], data['output'], current_state, data['perf_data']
instance = Host_perfdata_broker(plugin, path, mode, template)
return instance
# Class for the Merlindb Broker
# Get broks and puts them in merlin database
class Host_perfdata_broker(BaseModule):
def __init__(self, modconf, path, mode, template):
BaseModule.__init__(self, modconf)
self.path = path
self.mode = mode
self.template = template
# Make some raw change
self.template = self.template.replace(r'\t', '\t')
self.template = self.template.replace(r'\n', '\n')
# In Nagios it's said to force a return in line
if not self.template.endswith('\n'):
self.template += '\n'
# Called by Broker so we can do init stuff
# TODO: add conf param to get pass with init
# Conf from arbiter!
def init(self):
logger.info("[Host Perfdata broker] I open the host-perfdata file '%s'" % self.path)
self.file = codecs.open(self.path, self.mode, "utf-8")
# We've got a 0, 1, 2 or 3 (or something else? ->3
# And want a real OK, WARNING, CRITICAL, etc...
def resolve_host_state(self, state):
states = {0: 'UP', 1: 'DOWN', 2: 'DOWN', 3: 'UNKNOWN'}
if state in states:
return states[state]
else:
return 'UNKNOWN'
# A service check have just arrived, we UPDATE data info with this
def manage_host_check_result_brok(self, b):
data = b.data
# The original model
# "$TIMET\t$HOSTNAME\t$OUTPUT\t$SERVICESTATE\t$PERFDATA\n"
current_state = self.resolve_host_state(data['state_id'])
macros = {
'$LASTHOSTCHECK$': int(data['last_chk']),
'$HOSTNAME$': data['host_name'],
'$HOSTOUTPUT$': data['output'],
'$HOSTSTATE$': current_state,
'$HOSTPERFDATA$': data['perf_data'],
'$LASTHOSTSTATE$': data['last_state'],
}
s = self.template
for m in macros:
s = s.replace(m, unicode(macros[m]))
#s = "%s\t%s\t%s\t%s\t%s\n" % (int(data['last_chk']),data['host_name'], \
# data['output'], \
# current_state, data['perf_data'] )
self.file.write(s)
self.file.flush()
| agpl-3.0 |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/pip/_vendor/requests/packages/urllib3/filepost.py | 1009 | 2281 | import codecs
from uuid import uuid4
from io import BytesIO
from .packages import six
from .packages.six import b
from .fields import RequestField
writer = codecs.lookup('utf-8')[3]
def choose_boundary():
"""
Our embarassingly-simple replacement for mimetools.choose_boundary.
"""
return uuid4().hex
def iter_field_objects(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
if isinstance(fields, dict):
i = six.iteritems(fields)
else:
i = iter(fields)
for field in i:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def iter_fields(fields):
"""
.. deprecated:: 1.6
Iterate over fields.
The addition of :class:`~urllib3.fields.RequestField` makes this function
obsolete. Instead, use :func:`iter_field_objects`, which returns
:class:`~urllib3.fields.RequestField` objects.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
return ((k, v) for k, v in fields)
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for field in iter_field_objects(fields):
body.write(b('--%s\r\n' % (boundary)))
writer(body).write(field.render_headers())
data = field.data
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = str('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
| gpl-2.0 |
vtorshyn/voltdb-shardit-src | voltdb-3.7/lib/python/XMLUtils.py | 2 | 1080 | #!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2013 VoltDB Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
from xml.etree import ElementTree
from xml.dom import minidom
# To create a human readable xml file
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = ElementTree.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
| apache-2.0 |
paweljasinski/ironpython3 | Src/StdLib/Lib/unittest/test/test_setups.py | 81 | 16426 | import io
import sys
import unittest
def resultFactory(*_):
return unittest.TestResult()
class TestSetups(unittest.TestCase):
def getRunner(self):
return unittest.TextTestRunner(resultclass=resultFactory,
stream=io.StringIO())
def runTests(self, *cases):
suite = unittest.TestSuite()
for case in cases:
tests = unittest.defaultTestLoader.loadTestsFromTestCase(case)
suite.addTests(tests)
runner = self.getRunner()
# creating a nested suite exposes some potential bugs
realSuite = unittest.TestSuite()
realSuite.addTest(suite)
# adding empty suites to the end exposes potential bugs
suite.addTest(unittest.TestSuite())
realSuite.addTest(unittest.TestSuite())
return runner.run(realSuite)
def test_setup_class(self):
class Test(unittest.TestCase):
setUpCalled = 0
@classmethod
def setUpClass(cls):
Test.setUpCalled += 1
unittest.TestCase.setUpClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.setUpCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class_two_classes(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test2.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(Test2.tearDownCalled, 1)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 0)
def test_error_in_setupclass(self):
class BrokenTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(BrokenTest)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'setUpClass (%s.BrokenTest)' % __name__)
def test_error_in_teardown_class(self):
class Test(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test2.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 2)
self.assertEqual(Test.tornDown, 1)
self.assertEqual(Test2.tornDown, 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'tearDownClass (%s.Test)' % __name__)
def test_class_not_torndown_when_setup_fails(self):
class Test(unittest.TestCase):
tornDown = False
@classmethod
def setUpClass(cls):
raise TypeError
@classmethod
def tearDownClass(cls):
Test.tornDown = True
raise TypeError('foo')
def test_one(self):
pass
self.runTests(Test)
self.assertFalse(Test.tornDown)
def test_class_not_setup_or_torndown_when_skipped(self):
class Test(unittest.TestCase):
classSetUp = False
tornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.tornDown = True
def test_one(self):
pass
Test = unittest.skip("hop")(Test)
self.runTests(Test)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.tornDown)
def test_setup_teardown_order_with_pathological_suite(self):
results = []
class Module1(object):
@staticmethod
def setUpModule():
results.append('Module1.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module1.tearDownModule')
class Module2(object):
@staticmethod
def setUpModule():
results.append('Module2.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module2.tearDownModule')
class Test1(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 1')
@classmethod
def tearDownClass(cls):
results.append('teardown 1')
def testOne(self):
results.append('Test1.testOne')
def testTwo(self):
results.append('Test1.testTwo')
class Test2(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 2')
@classmethod
def tearDownClass(cls):
results.append('teardown 2')
def testOne(self):
results.append('Test2.testOne')
def testTwo(self):
results.append('Test2.testTwo')
class Test3(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 3')
@classmethod
def tearDownClass(cls):
results.append('teardown 3')
def testOne(self):
results.append('Test3.testOne')
def testTwo(self):
results.append('Test3.testTwo')
Test1.__module__ = Test2.__module__ = 'Module'
Test3.__module__ = 'Module2'
sys.modules['Module'] = Module1
sys.modules['Module2'] = Module2
first = unittest.TestSuite((Test1('testOne'),))
second = unittest.TestSuite((Test1('testTwo'),))
third = unittest.TestSuite((Test2('testOne'),))
fourth = unittest.TestSuite((Test2('testTwo'),))
fifth = unittest.TestSuite((Test3('testOne'),))
sixth = unittest.TestSuite((Test3('testTwo'),))
suite = unittest.TestSuite((first, second, third, fourth, fifth, sixth))
runner = self.getRunner()
result = runner.run(suite)
self.assertEqual(result.testsRun, 6)
self.assertEqual(len(result.errors), 0)
self.assertEqual(results,
['Module1.setUpModule', 'setup 1',
'Test1.testOne', 'Test1.testTwo', 'teardown 1',
'setup 2', 'Test2.testOne', 'Test2.testTwo',
'teardown 2', 'Module1.tearDownModule',
'Module2.setUpModule', 'setup 3',
'Test3.testOne', 'Test3.testTwo',
'teardown 3', 'Module2.tearDownModule'])
def test_setup_module(self):
class Module(object):
moduleSetup = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_setup_module(self):
class Module(object):
moduleSetup = 0
moduleTornDown = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
raise TypeError('foo')
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(Module.moduleTornDown, 0)
self.assertEqual(result.testsRun, 0)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'setUpModule (Module)')
def test_testcase_with_missing_module(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules.pop('Module', None)
result = self.runTests(Test)
self.assertEqual(result.testsRun, 2)
def test_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
raise TypeError('foo')
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 4)
self.assertTrue(Test.classSetUp)
self.assertTrue(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'tearDownModule (Module)')
def test_skiptest_in_setupclass(self):
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise unittest.SkipTest('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpClass (%s.Test)' % __name__)
def test_skiptest_in_setupmodule(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
class Module(object):
@staticmethod
def setUpModule():
raise unittest.SkipTest('foo')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpModule (Module)')
def test_suite_debug_executes_setups_and_teardowns(self):
ordering = []
class Module(object):
@staticmethod
def setUpModule():
ordering.append('setUpModule')
@staticmethod
def tearDownModule():
ordering.append('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
ordering.append('setUpClass')
@classmethod
def tearDownClass(cls):
ordering.append('tearDownClass')
def test_something(self):
ordering.append('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite.debug()
expectedOrder = ['setUpModule', 'setUpClass', 'test_something', 'tearDownClass', 'tearDownModule']
self.assertEqual(ordering, expectedOrder)
def test_suite_debug_propagates_exceptions(self):
class Module(object):
@staticmethod
def setUpModule():
if phase == 0:
raise Exception('setUpModule')
@staticmethod
def tearDownModule():
if phase == 1:
raise Exception('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
if phase == 2:
raise Exception('setUpClass')
@classmethod
def tearDownClass(cls):
if phase == 3:
raise Exception('tearDownClass')
def test_something(self):
if phase == 4:
raise Exception('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
messages = ('setUpModule', 'tearDownModule', 'setUpClass', 'tearDownClass', 'test_something')
for phase, msg in enumerate(messages):
_suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite = unittest.TestSuite([_suite])
with self.assertRaisesRegex(Exception, msg):
suite.debug()
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Judystudy/gooderp_addons | auto_backup/models/backup_scheduler.py | 6 | 7744 | # -*- coding: utf-8 -*-
##############################################################################
#
# odoo, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# Copyright (C) 2012-2015 Mrshelly@gmail.com upgrade to 7.0
# Copyright (C) 2014 JianJian@osbzr.com upgrade to 8.0
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import fields, models, api
import xmlrpclib
import socket
import os
import time
import base64
import logging
import pytz
import datetime
from odoo import tools
from odoo import netsvc
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
def execute(connector, method, *args):
res = False
try:
res = getattr(connector, method)(*args)
except socket.error, e:
raise e
return res
addons_path = '%s/DBbackups' % (os.environ.get('HOME', '')
or os.environ.get('HOMEPATH', ''))
class DbBackup(models.Model):
_name = 'db.backup'
_description = u'数据库自动备份'
@api.model
def get_db_list(self, host='localhost', port='8069'):
uri = 'http://' + host + ':' + port
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
db_list = execute(conn, 'list')
return db_list
host = fields.Char('Host', size=100, required='True', default='localhost')
port = fields.Char('Port', size=10, required='True', default='8888')
name = fields.Char('Database', size=100, required='True',
help='Database you want to schedule backups for')
bkp_dir = fields.Char('Backup Directory', size=100,
help='Absolute path for storing the backups', required='True', default=addons_path)
company_id = fields.Many2one(
'res.company',
string=u'公司',
change_default=True,
default=lambda self: self.env['res.company']._company_default_get())
@api.constrains('name')
def _check_db_exist(self):
for rec in self.browse():
db_list = self.get_db_list(rec.host, rec.port)
if not rec.name in db_list:
raise UserError('Error ! No such database exist.')
@api.model
def schedule_backup(self):
confs = self.search([])
master_pass = tools.config.get('admin_passwd', False)
res_user_obj = self.env.get('res.users')
if not master_pass:
raise
for rec in confs:
db_list = self.get_db_list(rec.host, rec.port)
# Get UTC time
curtime = datetime.datetime.strptime(
datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S')
res_user_res = res_user_obj.browse(1)
# user's partner timezone
tz = pytz.timezone(
res_user_res.tz) if res_user_res.tz else pytz.utc
# Set to usre's localtime
curtime = pytz.utc.localize(curtime).astimezone(tz)
#curtime = curtime.astimezone(pytz.utc)
if rec.name in db_list:
try:
if not os.path.isdir(rec.bkp_dir):
os.makedirs(rec.bkp_dir)
except:
raise
bkp_file = '%s_%s.zip' % (
rec.name, curtime.strftime('%Y%m%d_%H_%M_%S'))
file_path = os.path.join(rec.bkp_dir, bkp_file)
uri = 'http://' + rec.host + ':' + rec.port
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
bkp = ''
try:
bkp = execute(conn, 'dump', master_pass, rec.name, 'zip')
except:
_logger.info(
'backup', "Could'nt backup database %s. Bad database administrator password for server running at http://%s:%s" % (rec.name, rec.host, rec.port))
continue
bkp = base64.decodestring(bkp)
fp = open(file_path, 'wb')
fp.write(bkp)
fp.close()
else:
_logger.info('backup', "database %s doesn't exist on http://%s:%s" %
(rec.name, rec.host, rec.port))
return True
@api.model
def schedule_backup_pgtool(self):
confs = self.search([])
master_pass = tools.config.get('admin_passwd', False)
res_user_obj = self.env.get('res.users')
if not master_pass:
raise
for rec in confs:
db_list = self.get_db_list(rec.host, rec.port)
# Get UTC time
curtime = datetime.datetime.strptime(
datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S')
res_user_res = res_user_obj.browse()
# user's partner timezone
tz = pytz.timezone(
res_user_res.tz) if res_user_res.tz else pytz.utc
# Set to usre's localtime
curtime = pytz.utc.localize(curtime).astimezone(tz)
#curtime = curtime.astimezone(pytz.utc)
if rec.name in db_list:
try:
if not os.path.isdir(rec.bkp_dir):
os.makedirs(rec.bkp_dir)
except:
raise
bkp_file = '%s_%s.sql' % (
rec.name, curtime.strftime('%Y%m%d_%H_%M_%S'))
file_path = os.path.join(rec.bkp_dir, bkp_file)
bkp = ''
try:
self._db_pg_dump(rec.name, file_path)
except Exception, ex:
_logger.warn('auto_backup DUMP DB except: ' + str(ex))
continue
return True
@api.model
def _db_pg_dump(self, db_name, db_filename):
_logger.info('auto_backup DUMP DB!')
pg_passwd = os.environ.get(
'PGPASSWORD') or tools.config['db_password'] or False
data = ''
if not pg_passwd:
_logger.error(
'DUMP DB: %s failed! Please verify the configuration of the database password on the server. '
'You may need to create a .pgpass file for authentication, or specify `db_password` in the '
'server configuration file.\n %s', db_name, data)
raise Exception, "Couldn't dump database"
os.environ['PGPASSWORD'] = pg_passwd
cmd = ['pg_dump', '--format=c', '--no-owner']
if tools.config['db_user']:
cmd.append('--username=' + tools.config['db_user'])
if tools.config['db_host']:
cmd.append('--host=' + tools.config['db_host'])
if tools.config['db_port']:
cmd.append('--port=' + str(tools.config['db_port']))
cmd.append('--file=' + db_filename)
cmd.append(db_name)
stdin, stdout = tools.exec_pg_command_pipe(*tuple(cmd))
stdin.close()
data = stdout.read()
res = stdout.close()
return base64.encodestring(data)
| agpl-3.0 |
Lasagne/Recipes | modelzoo/resnet50.py | 1 | 6996 | # ResNet-50, network from the paper:
# "Deep Residual Learning for Image Recognition"
# http://arxiv.org/pdf/1512.03385v1.pdf
# License: see https://github.com/KaimingHe/deep-residual-networks/blob/master/LICENSE
# Download pretrained weights from:
# https://s3.amazonaws.com/lasagne/recipes/pretrained/imagenet/resnet50.pkl
import lasagne
from lasagne.layers import InputLayer
from lasagne.layers import Conv2DLayer as ConvLayer
from lasagne.layers import BatchNormLayer
from lasagne.layers import Pool2DLayer as PoolLayer
from lasagne.layers import NonlinearityLayer
from lasagne.layers import ElemwiseSumLayer
from lasagne.layers import DenseLayer
from lasagne.nonlinearities import rectify, softmax
def build_simple_block(incoming_layer, names,
num_filters, filter_size, stride, pad,
use_bias=False, nonlin=rectify):
"""Creates stacked Lasagne layers ConvLayer -> BN -> (ReLu)
Parameters:
----------
incoming_layer : instance of Lasagne layer
Parent layer
names : list of string
Names of the layers in block
num_filters : int
Number of filters in convolution layer
filter_size : int
Size of filters in convolution layer
stride : int
Stride of convolution layer
pad : int
Padding of convolution layer
use_bias : bool
Whether to use bias in conlovution layer
nonlin : function
Nonlinearity type of Nonlinearity layer
Returns
-------
tuple: (net, last_layer_name)
net : dict
Dictionary with stacked layers
last_layer_name : string
Last layer name
"""
net = []
net.append((
names[0],
ConvLayer(incoming_layer, num_filters, filter_size, stride, pad,
flip_filters=False, nonlinearity=None) if use_bias
else ConvLayer(incoming_layer, num_filters, filter_size, stride, pad, b=None,
flip_filters=False, nonlinearity=None)
))
net.append((
names[1],
BatchNormLayer(net[-1][1])
))
if nonlin is not None:
net.append((
names[2],
NonlinearityLayer(net[-1][1], nonlinearity=nonlin)
))
return dict(net), net[-1][0]
def build_residual_block(incoming_layer, ratio_n_filter=1.0, ratio_size=1.0, has_left_branch=False,
upscale_factor=4, ix=''):
"""Creates two-branch residual block
Parameters:
----------
incoming_layer : instance of Lasagne layer
Parent layer
ratio_n_filter : float
Scale factor of filter bank at the input of residual block
ratio_size : float
Scale factor of filter size
has_left_branch : bool
if True, then left branch contains simple block
upscale_factor : float
Scale factor of filter bank at the output of residual block
ix : int
Id of residual block
Returns
-------
tuple: (net, last_layer_name)
net : dict
Dictionary with stacked layers
last_layer_name : string
Last layer name
"""
simple_block_name_pattern = ['res%s_branch%i%s', 'bn%s_branch%i%s', 'res%s_branch%i%s_relu']
net = {}
# right branch
net_tmp, last_layer_name = build_simple_block(
incoming_layer, list(map(lambda s: s % (ix, 2, 'a'), simple_block_name_pattern)),
int(lasagne.layers.get_output_shape(incoming_layer)[1]*ratio_n_filter), 1, int(1.0/ratio_size), 0)
net.update(net_tmp)
net_tmp, last_layer_name = build_simple_block(
net[last_layer_name], list(map(lambda s: s % (ix, 2, 'b'), simple_block_name_pattern)),
lasagne.layers.get_output_shape(net[last_layer_name])[1], 3, 1, 1)
net.update(net_tmp)
net_tmp, last_layer_name = build_simple_block(
net[last_layer_name], list(map(lambda s: s % (ix, 2, 'c'), simple_block_name_pattern)),
lasagne.layers.get_output_shape(net[last_layer_name])[1]*upscale_factor, 1, 1, 0,
nonlin=None)
net.update(net_tmp)
right_tail = net[last_layer_name]
left_tail = incoming_layer
# left branch
if has_left_branch:
net_tmp, last_layer_name = build_simple_block(
incoming_layer, list(map(lambda s: s % (ix, 1, ''), simple_block_name_pattern)),
int(lasagne.layers.get_output_shape(incoming_layer)[1]*4*ratio_n_filter), 1, int(1.0/ratio_size), 0,
nonlin=None)
net.update(net_tmp)
left_tail = net[last_layer_name]
net['res%s' % ix] = ElemwiseSumLayer([left_tail, right_tail], coeffs=1)
net['res%s_relu' % ix] = NonlinearityLayer(net['res%s' % ix], nonlinearity=rectify)
return net, 'res%s_relu' % ix
def build_model():
net = {}
net['input'] = InputLayer((None, 3, 224, 224))
sub_net, parent_layer_name = build_simple_block(
net['input'], ['conv1', 'bn_conv1', 'conv1_relu'],
64, 7, 2, 3, use_bias=True)
net.update(sub_net)
net['pool1'] = PoolLayer(net[parent_layer_name], pool_size=3, stride=2, pad=0, mode='max', ignore_border=False)
block_size = list('abc')
parent_layer_name = 'pool1'
for c in block_size:
if c == 'a':
sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1, 1, True, 4, ix='2%s' % c)
else:
sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='2%s' % c)
net.update(sub_net)
block_size = list('abcd')
for c in block_size:
if c == 'a':
sub_net, parent_layer_name = build_residual_block(
net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='3%s' % c)
else:
sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='3%s' % c)
net.update(sub_net)
block_size = list('abcdef')
for c in block_size:
if c == 'a':
sub_net, parent_layer_name = build_residual_block(
net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='4%s' % c)
else:
sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='4%s' % c)
net.update(sub_net)
block_size = list('abc')
for c in block_size:
if c == 'a':
sub_net, parent_layer_name = build_residual_block(
net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='5%s' % c)
else:
sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='5%s' % c)
net.update(sub_net)
net['pool5'] = PoolLayer(net[parent_layer_name], pool_size=7, stride=1, pad=0,
mode='average_exc_pad', ignore_border=False)
net['fc1000'] = DenseLayer(net['pool5'], num_units=1000, nonlinearity=None)
net['prob'] = NonlinearityLayer(net['fc1000'], nonlinearity=softmax)
return net
| mit |
centwave/jg82ksgvqkuan | django/conf/locale/id/formats.py | 355 | 1818 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j N Y'
DATETIME_FORMAT = "j N Y, G.i.s"
TIME_FORMAT = 'G.i.s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y G.i.s'
FIRST_DAY_OF_WEEK = 1 #Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d-%m-%y', '%d/%m/%y', # '25-10-09', 25/10/09'
'%d-%m-%Y', '%d/%m/%Y', # '25-10-2009', 25/10/2009'
'%d %b %Y', # '25 Oct 2006',
'%d %B %Y', # '25 October 2006'
)
TIME_INPUT_FORMATS = (
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
)
DATETIME_INPUT_FORMATS = (
'%d-%m-%Y %H.%M.%S', # '25-10-2009 14.30.59'
'%d-%m-%Y %H.%M', # '25-10-2009 14.30'
'%d-%m-%Y', # '25-10-2009'
'%d-%m-%y %H.%M.%S', # '25-10-09' 14.30.59'
'%d-%m-%y %H.%M', # '25-10-09' 14.30'
'%d-%m-%y', # '25-10-09''
'%m/%d/%y %H.%M.%S', # '10/25/06 14.30.59'
'%m/%d/%y %H.%M', # '10/25/06 14.30'
'%m/%d/%y', # '10/25/06'
'%m/%d/%Y %H.%M.%S', # '25/10/2009 14.30.59'
'%m/%d/%Y %H.%M', # '25/10/2009 14.30'
'%m/%d/%Y', # '10/25/2009'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
astaff/ansible | lib/ansible/module_utils/urls.py | 125 | 31558 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com>, 2015
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The match_hostname function and supporting code is under the terms and
# conditions of the Python Software Foundation License. They were taken from
# the Python3 standard library and adapted for use in Python2. See comments in the
# source for which code precisely is under this License. PSF License text
# follows:
#
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are
# retained in Python alone or in any derivative version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
try:
import urllib2
HAS_URLLIB2 = True
except:
HAS_URLLIB2 = False
try:
import urlparse
HAS_URLPARSE = True
except:
HAS_URLPARSE = False
try:
import ssl
HAS_SSL = True
except:
HAS_SSL = False
try:
# SNI Handling needs python2.7.9's SSLContext
from ssl import create_default_context, SSLContext
HAS_SSLCONTEXT = True
except ImportError:
HAS_SSLCONTEXT = False
# Select a protocol that includes all secure tls protocols
# Exclude insecure ssl protocols if possible
if HAS_SSL:
# If we can't find extra tls methods, ssl.PROTOCOL_TLSv1 is sufficient
PROTOCOL = ssl.PROTOCOL_TLSv1
if not HAS_SSLCONTEXT and HAS_SSL:
try:
import ctypes, ctypes.util
except ImportError:
# python 2.4 (likely rhel5 which doesn't have tls1.1 support in its openssl)
pass
else:
libssl_name = ctypes.util.find_library('ssl')
libssl = ctypes.CDLL(libssl_name)
for method in ('TLSv1_1_method', 'TLSv1_2_method'):
try:
libssl[method]
# Found something - we'll let openssl autonegotiate and hope
# the server has disabled sslv2 and 3. best we can do.
PROTOCOL = ssl.PROTOCOL_SSLv23
break
except AttributeError:
pass
del libssl
HAS_MATCH_HOSTNAME = True
try:
from ssl import match_hostname, CertificateError
except ImportError:
try:
from backports.ssl_match_hostname import match_hostname, CertificateError
except ImportError:
HAS_MATCH_HOSTNAME = False
if not HAS_MATCH_HOSTNAME:
###
### The following block of code is under the terms and conditions of the
### Python Software Foundation License
###
"""The match_hostname() function from Python 3.4, essential when using SSL."""
import re
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
###
### End of Python Software Foundation Licensed code
###
HAS_MATCH_HOSTNAME = True
import httplib
import os
import re
import sys
import socket
import platform
import tempfile
import base64
# This is a dummy cacert provided for Mac OS since you need at least 1
# ca cert, regardless of validity, for Python on Mac OS to use the
# keychain functionality in OpenSSL for validating SSL certificates.
# See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher
DUMMY_CA_CERT = """-----BEGIN CERTIFICATE-----
MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV
BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt
MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy
MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD
VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD
gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9
gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1
4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj
gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA
FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE
CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z
aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA
MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH
qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV
zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg=
-----END CERTIFICATE-----
"""
#
# Exceptions
#
class ConnectionError(Exception):
"""Failed to connect to the server"""
pass
class ProxyError(ConnectionError):
"""Failure to connect because of a proxy"""
pass
class SSLValidationError(ConnectionError):
"""Failure to connect due to SSL validation failing"""
pass
class NoSSLError(SSLValidationError):
"""Needed to connect to an HTTPS url but no ssl library available to verify the certificate"""
pass
class CustomHTTPSConnection(httplib.HTTPSConnection):
def __init__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
if HAS_SSLCONTEXT:
self.context = create_default_context()
if self.cert_file:
self.context.load_cert_chain(self.cert_file, self.key_file)
def connect(self):
"Connect to a host on a given (SSL) port."
if hasattr(self, 'source_address'):
sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address)
else:
sock = socket.create_connection((self.host, self.port), self.timeout)
if self._tunnel_host:
self.sock = sock
self._tunnel()
if HAS_SSLCONTEXT:
self.sock = self.context.wrap_socket(sock, server_hostname=self.host)
else:
self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL)
class CustomHTTPSHandler(urllib2.HTTPSHandler):
def https_open(self, req):
return self.do_open(CustomHTTPSConnection, req)
https_request = urllib2.AbstractHTTPHandler.do_request_
def generic_urlparse(parts):
'''
Returns a dictionary of url parts as parsed by urlparse,
but accounts for the fact that older versions of that
library do not support named attributes (ie. .netloc)
'''
generic_parts = dict()
if hasattr(parts, 'netloc'):
# urlparse is newer, just read the fields straight
# from the parts object
generic_parts['scheme'] = parts.scheme
generic_parts['netloc'] = parts.netloc
generic_parts['path'] = parts.path
generic_parts['params'] = parts.params
generic_parts['query'] = parts.query
generic_parts['fragment'] = parts.fragment
generic_parts['username'] = parts.username
generic_parts['password'] = parts.password
generic_parts['hostname'] = parts.hostname
generic_parts['port'] = parts.port
else:
# we have to use indexes, and then parse out
# the other parts not supported by indexing
generic_parts['scheme'] = parts[0]
generic_parts['netloc'] = parts[1]
generic_parts['path'] = parts[2]
generic_parts['params'] = parts[3]
generic_parts['query'] = parts[4]
generic_parts['fragment'] = parts[5]
# get the username, password, etc.
try:
netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$')
(auth, hostname, port) = netloc_re.match(parts[1])
if port:
# the capture group for the port will include the ':',
# so remove it and convert the port to an integer
port = int(port[1:])
if auth:
# the capture group above inclues the @, so remove it
# and then split it up based on the first ':' found
auth = auth[:-1]
username, password = auth.split(':', 1)
generic_parts['username'] = username
generic_parts['password'] = password
generic_parts['hostname'] = hostname
generic_parts['port'] = port
except:
generic_parts['username'] = None
generic_parts['password'] = None
generic_parts['hostname'] = None
generic_parts['port'] = None
return generic_parts
class RequestWithMethod(urllib2.Request):
'''
Workaround for using DELETE/PUT/etc with urllib2
Originally contained in library/net_infrastructure/dnsmadeeasy
'''
def __init__(self, url, method, data=None, headers={}):
self._method = method
urllib2.Request.__init__(self, url, data, headers)
def get_method(self):
if self._method:
return self._method
else:
return urllib2.Request.get_method(self)
class SSLValidationHandler(urllib2.BaseHandler):
'''
A custom handler class for SSL validation.
Based on:
http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python
http://techknack.net/python-urllib2-handlers/
'''
CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\nConnection: close\r\n"
def __init__(self, hostname, port):
self.hostname = hostname
self.port = port
def get_ca_certs(self):
# tries to find a valid CA cert in one of the
# standard locations for the current distribution
ca_certs = []
paths_checked = []
system = platform.system()
# build a list of paths to check for .crt/.pem files
# based on the platform type
paths_checked.append('/etc/ssl/certs')
if system == 'Linux':
paths_checked.append('/etc/pki/ca-trust/extracted/pem')
paths_checked.append('/etc/pki/tls/certs')
paths_checked.append('/usr/share/ca-certificates/cacert.org')
elif system == 'FreeBSD':
paths_checked.append('/usr/local/share/certs')
elif system == 'OpenBSD':
paths_checked.append('/etc/ssl')
elif system == 'NetBSD':
ca_certs.append('/etc/openssl/certs')
elif system == 'SunOS':
paths_checked.append('/opt/local/etc/openssl/certs')
# fall back to a user-deployed cert in a standard
# location if the OS platform one is not available
paths_checked.append('/etc/ansible')
tmp_fd, tmp_path = tempfile.mkstemp()
# Write the dummy ca cert if we are running on Mac OS X
if system == 'Darwin':
os.write(tmp_fd, DUMMY_CA_CERT)
# Default Homebrew path for OpenSSL certs
paths_checked.append('/usr/local/etc/openssl')
# for all of the paths, find any .crt or .pem files
# and compile them into single temp file for use
# in the ssl check to speed up the test
for path in paths_checked:
if os.path.exists(path) and os.path.isdir(path):
dir_contents = os.listdir(path)
for f in dir_contents:
full_path = os.path.join(path, f)
if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt','.pem'):
try:
cert_file = open(full_path, 'r')
os.write(tmp_fd, cert_file.read())
os.write(tmp_fd, '\n')
cert_file.close()
except:
pass
return (tmp_path, paths_checked)
def validate_proxy_response(self, response, valid_codes=[200]):
'''
make sure we get back a valid code from the proxy
'''
try:
(http_version, resp_code, msg) = re.match(r'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups()
if int(resp_code) not in valid_codes:
raise Exception
except:
raise ProxyError('Connection to proxy failed')
def detect_no_proxy(self, url):
'''
Detect if the 'no_proxy' environment variable is set and honor those locations.
'''
env_no_proxy = os.environ.get('no_proxy')
if env_no_proxy:
env_no_proxy = env_no_proxy.split(',')
netloc = urlparse.urlparse(url).netloc
for host in env_no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# Our requested URL matches something in no_proxy, so don't
# use the proxy for this
return False
return True
def _make_context(self, tmp_ca_cert_path):
context = create_default_context()
context.load_verify_locations(tmp_ca_cert_path)
return context
def http_request(self, req):
tmp_ca_cert_path, paths_checked = self.get_ca_certs()
https_proxy = os.environ.get('https_proxy')
context = None
if HAS_SSLCONTEXT:
context = self._make_context(tmp_ca_cert_path)
# Detect if 'no_proxy' environment variable is set and if our URL is included
use_proxy = self.detect_no_proxy(req.get_full_url())
if not use_proxy:
# ignore proxy settings for this host request
return req
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if https_proxy:
proxy_parts = generic_urlparse(urlparse.urlparse(https_proxy))
s.connect((proxy_parts.get('hostname'), proxy_parts.get('port')))
if proxy_parts.get('scheme') == 'http':
s.sendall(self.CONNECT_COMMAND % (self.hostname, self.port))
if proxy_parts.get('username'):
credentials = "%s:%s" % (proxy_parts.get('username',''), proxy_parts.get('password',''))
s.sendall('Proxy-Authorization: Basic %s\r\n' % credentials.encode('base64').strip())
s.sendall('\r\n')
connect_result = s.recv(4096)
self.validate_proxy_response(connect_result)
if context:
ssl_s = context.wrap_socket(s, server_hostname=proxy_parts.get('hostname'))
else:
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
match_hostname(ssl_s.getpeercert(), self.hostname)
else:
raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme'))
else:
s.connect((self.hostname, self.port))
if context:
ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
else:
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
match_hostname(ssl_s.getpeercert(), self.hostname)
# close the ssl connection
#ssl_s.unwrap()
s.close()
except (ssl.SSLError, socket.error), e:
# fail if we tried all of the certs but none worked
if 'connection refused' in str(e).lower():
raise ConnectionError('Failed to connect to %s:%s.' % (self.hostname, self.port))
else:
raise SSLValidationError('Failed to validate the SSL certificate for %s:%s.'
' Make sure your managed systems have a valid CA'
' certificate installed. If the website serving the url'
' uses SNI you need python >= 2.7.9 on your managed'
' machine. You can use validate_certs=False if you do'
' not need to confirm the server\s identity but this is'
' unsafe and not recommended'
' Paths checked for this platform: %s' % (self.hostname, self.port, ", ".join(paths_checked))
)
except CertificateError:
raise SSLValidationError("SSL Certificate does not belong to %s. Make sure the url has a certificate that belongs to it or use validate_certs=False (insecure)" % self.hostname)
try:
# cleanup the temp file created, don't worry
# if it fails for some reason
os.remove(tmp_ca_cert_path)
except:
pass
return req
https_request = http_request
# Rewrite of fetch_url to not require the module environment
def open_url(url, data=None, headers=None, method=None, use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=False):
'''
Fetches a file from an HTTP/FTP server using urllib2
'''
handlers = []
# FIXME: change the following to use the generic_urlparse function
# to remove the indexed references for 'parsed'
parsed = urlparse.urlparse(url)
if parsed[0] == 'https' and validate_certs:
if not HAS_SSL:
raise NoSSLError('SSL validation is not available in your version of python. You can use validate_certs=False, however this is unsafe and not recommended')
# do the cert validation
netloc = parsed[1]
if '@' in netloc:
netloc = netloc.split('@', 1)[1]
if ':' in netloc:
hostname, port = netloc.split(':', 1)
port = int(port)
else:
hostname = netloc
port = 443
# create the SSL validation handler and
# add it to the list of handlers
ssl_handler = SSLValidationHandler(hostname, port)
handlers.append(ssl_handler)
if parsed[0] != 'ftp':
username = url_username
if username:
password = url_password
netloc = parsed[1]
elif '@' in parsed[1]:
credentials, netloc = parsed[1].split('@', 1)
if ':' in credentials:
username, password = credentials.split(':', 1)
else:
username = credentials
password = ''
parsed = list(parsed)
parsed[1] = netloc
# reconstruct url without credentials
url = urlparse.urlunparse(parsed)
if username and not force_basic_auth:
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
# this creates a password manager
passman.add_password(None, netloc, username, password)
# because we have put None at the start it will always
# use this username/password combination for urls
# for which `theurl` is a super-url
authhandler = urllib2.HTTPBasicAuthHandler(passman)
# create the AuthHandler
handlers.append(authhandler)
elif username and force_basic_auth:
if headers is None:
headers = {}
headers["Authorization"] = "Basic %s" % base64.b64encode("%s:%s" % (username, password))
if not use_proxy:
proxyhandler = urllib2.ProxyHandler({})
handlers.append(proxyhandler)
# pre-2.6 versions of python cannot use the custom https
# handler, since the socket class is lacking this method
if hasattr(socket, 'create_connection'):
handlers.append(CustomHTTPSHandler)
opener = urllib2.build_opener(*handlers)
urllib2.install_opener(opener)
if method:
if method.upper() not in ('OPTIONS','GET','HEAD','POST','PUT','DELETE','TRACE','CONNECT'):
raise ConnectionError('invalid HTTP request method; %s' % method.upper())
request = RequestWithMethod(url, method.upper(), data)
else:
request = urllib2.Request(url, data)
# add the custom agent header, to help prevent issues
# with sites that block the default urllib agent string
request.add_header('User-agent', http_agent)
# if we're ok with getting a 304, set the timestamp in the
# header, otherwise make sure we don't get a cached copy
if last_mod_time and not force:
tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
request.add_header('If-Modified-Since', tstamp)
else:
request.add_header('cache-control', 'no-cache')
# user defined headers now, which may override things we've set above
if headers:
if not isinstance(headers, dict):
raise ValueError("headers provided to fetch_url() must be a dict")
for header in headers:
request.add_header(header, headers[header])
urlopen_args = [request, None]
if sys.version_info >= (2,6,0):
# urlopen in python prior to 2.6.0 did not
# have a timeout parameter
urlopen_args.append(timeout)
if HAS_SSLCONTEXT and not validate_certs:
# In 2.7.9, the default context validates certificates
context = SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
context.verify_mode = ssl.CERT_NONE
context.check_hostname = False
urlopen_args += (None, None, None, context)
r = urllib2.urlopen(*urlopen_args)
return r
#
# Module-related functions
#
def url_argument_spec():
'''
Creates an argument spec that can be used with any module
that will be requesting content via urllib/urllib2
'''
return dict(
url = dict(),
force = dict(default='no', aliases=['thirsty'], type='bool'),
http_agent = dict(default='ansible-httpget'),
use_proxy = dict(default='yes', type='bool'),
validate_certs = dict(default='yes', type='bool'),
url_username = dict(required=False),
url_password = dict(required=False),
force_basic_auth = dict(required=False, type='bool', default='no'),
)
def fetch_url(module, url, data=None, headers=None, method=None,
use_proxy=True, force=False, last_mod_time=None, timeout=10):
'''
Fetches a file from an HTTP/FTP server using urllib2. Requires the module environment
'''
if not HAS_URLLIB2:
module.fail_json(msg='urllib2 is not installed')
elif not HAS_URLPARSE:
module.fail_json(msg='urlparse is not installed')
# Get validate_certs from the module params
validate_certs = module.params.get('validate_certs', True)
username = module.params.get('url_username', '')
password = module.params.get('url_password', '')
http_agent = module.params.get('http_agent', None)
force_basic_auth = module.params.get('force_basic_auth', '')
r = None
info = dict(url=url)
try:
r = open_url(url, data=data, headers=headers, method=method,
use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout,
validate_certs=validate_certs, url_username=username,
url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth)
info.update(r.info())
info['url'] = r.geturl() # The URL goes in too, because of redirects.
info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200))
except NoSSLError, e:
distribution = get_distribution()
if distribution.lower() == 'redhat':
module.fail_json(msg='%s. You can also install python-ssl from EPEL' % str(e))
except (ConnectionError, ValueError), e:
module.fail_json(msg=str(e))
except urllib2.HTTPError, e:
info.update(dict(msg=str(e), status=e.code))
except urllib2.URLError, e:
code = int(getattr(e, 'code', -1))
info.update(dict(msg="Request failed: %s" % str(e), status=code))
except socket.error, e:
info.update(dict(msg="Connection failure: %s" % str(e), status=-1))
except Exception, e:
info.update(dict(msg="An unknown error occurred: %s" % str(e), status=-1))
return r, info
| gpl-3.0 |
nmrao/robotframework | src/robot/utils/compress.py | 25 | 1512 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import sys
def compress_text(text):
return base64.b64encode(_compress(text.encode('UTF-8')))
if not sys.platform.startswith('java'):
import zlib
def _compress(text):
return zlib.compress(text, 9)
else:
# Custom compress implementation was originally used to avoid memory leak
# (http://bugs.jython.org/issue1775). Kept around still because it is a bit
# faster than Jython's standard zlib.compress.
from java.util.zip import Deflater
import jarray
_DEFLATOR = Deflater(9, False)
def _compress(text):
_DEFLATOR.setInput(text)
_DEFLATOR.finish()
buf = jarray.zeros(1024, 'b')
compressed = []
while not _DEFLATOR.finished():
length = _DEFLATOR.deflate(buf, 0, 1024)
compressed.append(buf[:length].tostring())
_DEFLATOR.reset()
return ''.join(compressed)
| apache-2.0 |
felipenaselva/felipe.repository | plugin.video.neptune/resources/lib/modules/playcount.py | 5 | 8985 | # -*- coding: utf-8 -*-
'''
Neptune Rising Add-on
Copyright (C) 2016 Poseidon
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import json
from resources.lib.modules import control
from resources.lib.modules import trakt
def getMovieIndicators(refresh=False):
try:
if trakt.getTraktIndicatorsInfo() == True: raise Exception()
from metahandler import metahandlers
indicators = metahandlers.MetaData(preparezip=False)
return indicators
except:
pass
try:
if trakt.getTraktIndicatorsInfo() == False: raise Exception()
if refresh == False: timeout = 720
elif trakt.getWatchedActivity() < trakt.timeoutsyncMovies(): timeout = 720
else: timeout = 0
indicators = trakt.cachesyncMovies(timeout=timeout)
return indicators
except:
pass
def getTVShowIndicators(refresh=False):
try:
if trakt.getTraktIndicatorsInfo() == True: raise Exception()
from metahandler import metahandlers
indicators = metahandlers.MetaData(preparezip=False)
return indicators
except:
pass
try:
if trakt.getTraktIndicatorsInfo() == False: raise Exception()
if refresh == False: timeout = 720
elif trakt.getWatchedActivity() < trakt.timeoutsyncTVShows(): timeout = 720
else: timeout = 0
indicators = trakt.cachesyncTVShows(timeout=timeout)
return indicators
except:
pass
def getSeasonIndicators(imdb):
try:
if trakt.getTraktIndicatorsInfo() == False: raise Exception()
indicators = trakt.syncSeason(imdb)
return indicators
except:
pass
def getMovieOverlay(indicators, imdb):
try:
try:
playcount = indicators._get_watched('movie', imdb, '', '')
return str(playcount)
except:
playcount = [i for i in indicators if i == imdb]
playcount = 7 if len(playcount) > 0 else 6
return str(playcount)
except:
return '6'
def getTVShowOverlay(indicators, tvdb):
try:
playcount = [i[0] for i in indicators if i[0] == tvdb and len(i[2]) >= int(i[1])]
playcount = 7 if len(playcount) > 0 else 6
return str(playcount)
except:
return '6'
def getEpisodeOverlay(indicators, imdb, tvdb, season, episode):
try:
try:
playcount = indicators._get_watched_episode({'imdb_id' : imdb, 'season' : season, 'episode': episode, 'premiered' : ''})
return str(playcount)
except:
playcount = [i[2] for i in indicators if i[0] == tvdb]
playcount = playcount[0] if len(playcount) > 0 else []
playcount = [i for i in playcount if int(season) == int(i[0]) and int(episode) == int(i[1])]
playcount = 7 if len(playcount) > 0 else 6
return str(playcount)
except:
return '6'
def markMovieDuringPlayback(imdb, watched):
try:
if trakt.getTraktIndicatorsInfo() == False: raise Exception()
if int(watched) == 7: trakt.markMovieAsWatched(imdb)
else: trakt.markMovieAsNotWatched(imdb)
trakt.cachesyncMovies()
if trakt.getTraktAddonMovieInfo() == True:
trakt.markMovieAsNotWatched(imdb)
except:
pass
try:
from metahandler import metahandlers
metaget = metahandlers.MetaData(preparezip=False)
metaget.get_meta('movie', name='', imdb_id=imdb)
metaget.change_watched('movie', name='', imdb_id=imdb, watched=int(watched))
except:
pass
def markEpisodeDuringPlayback(imdb, tvdb, season, episode, watched):
try:
if trakt.getTraktIndicatorsInfo() == False: raise Exception()
if int(watched) == 7: trakt.markEpisodeAsWatched(tvdb, season, episode)
else: trakt.markEpisodeAsNotWatched(tvdb, season, episode)
trakt.cachesyncTVShows()
if trakt.getTraktAddonEpisodeInfo() == True:
trakt.markEpisodeAsNotWatched(tvdb, season, episode)
except:
pass
try:
from metahandler import metahandlers
metaget = metahandlers.MetaData(preparezip=False)
metaget.get_meta('tvshow', name='', imdb_id=imdb)
metaget.get_episode_meta('', imdb_id=imdb, season=season, episode=episode)
metaget.change_watched('episode', '', imdb_id=imdb, season=season, episode=episode, watched=int(watched))
except:
pass
def movies(imdb, watched):
control.busy()
try:
if trakt.getTraktIndicatorsInfo() == False: raise Exception()
if int(watched) == 7: trakt.markMovieAsWatched(imdb)
else: trakt.markMovieAsNotWatched(imdb)
trakt.cachesyncMovies()
control.refresh()
except:
pass
try:
from metahandler import metahandlers
metaget = metahandlers.MetaData(preparezip=False)
metaget.get_meta('movie', name='', imdb_id=imdb)
metaget.change_watched('movie', name='', imdb_id=imdb, watched=int(watched))
if trakt.getTraktIndicatorsInfo() == False: control.refresh()
except:
pass
def episodes(imdb, tvdb, season, episode, watched):
control.busy()
try:
if trakt.getTraktIndicatorsInfo() == False: raise Exception()
if int(watched) == 7: trakt.markEpisodeAsWatched(tvdb, season, episode)
else: trakt.markEpisodeAsNotWatched(tvdb, season, episode)
trakt.cachesyncTVShows()
control.refresh()
except:
pass
try:
from metahandler import metahandlers
metaget = metahandlers.MetaData(preparezip=False)
metaget.get_meta('tvshow', name='', imdb_id=imdb)
metaget.get_episode_meta('', imdb_id=imdb, season=season, episode=episode)
metaget.change_watched('episode', '', imdb_id=imdb, season=season, episode=episode, watched=int(watched))
if trakt.getTraktIndicatorsInfo() == False: control.refresh()
except:
pass
def tvshows(tvshowtitle, imdb, tvdb, season, watched):
control.busy()
try:
import sys,xbmc
if not trakt.getTraktIndicatorsInfo() == False: raise Exception()
from metahandler import metahandlers
from resources.lib.indexers import episodes
metaget = metahandlers.MetaData(preparezip=False)
name = control.addonInfo('name')
dialog = control.progressDialogBG
dialog.create(str(name), str(tvshowtitle))
dialog.update(0, str(name), str(tvshowtitle))
metaget.get_meta('tvshow', name='', imdb_id=imdb)
items = episodes.episodes().get(tvshowtitle, '0', imdb, tvdb, '0', idx=False)
try: items = [i for i in items if int('%01d' % int(season)) == int('%01d' % int(i['season']))]
except: pass
items = [{'label': '%s S%02dE%02d' % (tvshowtitle, int(i['season']), int(i['episode'])), 'season': int('%01d' % int(i['season'])), 'episode': int('%01d' % int(i['episode']))} for i in items]
for i in range(len(items)):
if xbmc.abortRequested == True: return sys.exit()
dialog.update(int((100 / float(len(items))) * i), str(name), str(items[i]['label']))
season, episode = items[i]['season'], items[i]['episode']
metaget.get_episode_meta('', imdb_id=imdb, season=season, episode=episode)
metaget.change_watched('episode', '', imdb_id=imdb, season=season, episode=episode, watched=int(watched))
try: dialog.close()
except: pass
except:
try: dialog.close()
except: pass
try:
if trakt.getTraktIndicatorsInfo() == False: raise Exception()
if season:
from resources.lib.indexers import episodes
items = episodes.episodes().get(tvshowtitle, '0', imdb, tvdb, season, idx=False)
items = [(int(i['season']), int(i['episode'])) for i in items]
items = [i[1] for i in items if int('%01d' % int(season)) == int('%01d' % i[0])]
for i in items:
if int(watched) == 7: trakt.markEpisodeAsWatched(tvdb, season, i)
else: trakt.markEpisodeAsNotWatched(tvdb, season, i)
else:
if int(watched) == 7: trakt.markTVShowAsWatched(tvdb)
else: trakt.markTVShowAsNotWatched(tvdb)
trakt.cachesyncTVShows()
except:
pass
control.refresh()
| gpl-2.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/test/test_pow.py | 133 | 4623 | import test.test_support, unittest
class PowTest(unittest.TestCase):
def powtest(self, type):
if type != float:
for i in range(-1000, 1000):
self.assertEqual(pow(type(i), 0), 1)
self.assertEqual(pow(type(i), 1), type(i))
self.assertEqual(pow(type(0), 1), type(0))
self.assertEqual(pow(type(1), 1), type(1))
for i in range(-100, 100):
self.assertEqual(pow(type(i), 3), i*i*i)
pow2 = 1
for i in range(0,31):
self.assertEqual(pow(2, i), pow2)
if i != 30 : pow2 = pow2*2
for othertype in int, long:
for i in range(-10, 0) + range(1, 10):
ii = type(i)
for j in range(1, 11):
jj = -othertype(j)
pow(ii, jj)
for othertype in int, long, float:
for i in range(1, 100):
zero = type(0)
exp = -othertype(i/10.0)
if exp == 0:
continue
self.assertRaises(ZeroDivisionError, pow, zero, exp)
il, ih = -20, 20
jl, jh = -5, 5
kl, kh = -10, 10
asseq = self.assertEqual
if type == float:
il = 1
asseq = self.assertAlmostEqual
elif type == int:
jl = 0
elif type == long:
jl, jh = 0, 15
for i in range(il, ih+1):
for j in range(jl, jh+1):
for k in range(kl, kh+1):
if k != 0:
if type == float or j < 0:
self.assertRaises(TypeError, pow, type(i), j, k)
continue
asseq(
pow(type(i),j,k),
pow(type(i),j)% type(k)
)
def test_powint(self):
self.powtest(int)
def test_powlong(self):
self.powtest(long)
def test_powfloat(self):
self.powtest(float)
def test_other(self):
# Other tests-- not very systematic
self.assertEqual(pow(3,3) % 8, pow(3,3,8))
self.assertEqual(pow(3,3) % -8, pow(3,3,-8))
self.assertEqual(pow(3,2) % -2, pow(3,2,-2))
self.assertEqual(pow(-3,3) % 8, pow(-3,3,8))
self.assertEqual(pow(-3,3) % -8, pow(-3,3,-8))
self.assertEqual(pow(5,2) % -8, pow(5,2,-8))
self.assertEqual(pow(3L,3L) % 8, pow(3L,3L,8))
self.assertEqual(pow(3L,3L) % -8, pow(3L,3L,-8))
self.assertEqual(pow(3L,2) % -2, pow(3L,2,-2))
self.assertEqual(pow(-3L,3L) % 8, pow(-3L,3L,8))
self.assertEqual(pow(-3L,3L) % -8, pow(-3L,3L,-8))
self.assertEqual(pow(5L,2) % -8, pow(5L,2,-8))
for i in range(-10, 11):
for j in range(0, 6):
for k in range(-7, 11):
if j >= 0 and k != 0:
self.assertEqual(
pow(i,j) % k,
pow(i,j,k)
)
if j >= 0 and k != 0:
self.assertEqual(
pow(long(i),j) % k,
pow(long(i),j,k)
)
def test_bug643260(self):
class TestRpow:
def __rpow__(self, other):
return None
None ** TestRpow() # Won't fail when __rpow__ invoked. SF bug #643260.
def test_bug705231(self):
# -1.0 raised to an integer should never blow up. It did if the
# platform pow() was buggy, and Python didn't worm around it.
eq = self.assertEqual
a = -1.0
# The next two tests can still fail if the platform floor()
# function doesn't treat all large inputs as integers
# test_math should also fail if that is happening
eq(pow(a, 1.23e167), 1.0)
eq(pow(a, -1.23e167), 1.0)
for b in range(-10, 11):
eq(pow(a, float(b)), b & 1 and -1.0 or 1.0)
for n in range(0, 100):
fiveto = float(5 ** n)
# For small n, fiveto will be odd. Eventually we run out of
# mantissa bits, though, and thereafer fiveto will be even.
expected = fiveto % 2.0 and -1.0 or 1.0
eq(pow(a, fiveto), expected)
eq(pow(a, -fiveto), expected)
eq(expected, 1.0) # else we didn't push fiveto to evenness
def test_main():
test.test_support.run_unittest(PowTest)
if __name__ == "__main__":
test_main()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.