repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
diplomacy/research | diplomacy_research/models/value/base_value_model.py | 1 | 8725 | # ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Base Value model
- Contains the base value model, which is used by all value models.
"""
from collections import OrderedDict
import logging
import re
from diplomacy_research.models.base_model import BaseModel
from diplomacy_research.settings import NO_PRESS_VALUE_ALL_DATASET
# Constants
LOGGER = logging.getLogger(__name__)
VALID_TAG = re.compile('^tag/value/v[0-9]{3}_[a-z0-9_]+$')
def load_args():
""" Load possible arguments
:return: A list of tuple (arg_type, arg_name, arg_value, arg_desc)
"""
return [
('str', 'model_type', 'order_based', 'Policy model family. "token_based", "order_based".'),
('int', 'value_model_id', -1, 'The model ID of the value function.'),
('str', 'dataset', NO_PRESS_VALUE_ALL_DATASET, 'The dataset builder to use for supervised learning'),
('bool', 'stop_gradient_value', False, 'Do not propagate the value loss in the policy network'),
('float', 'dropout_rate', 0.5, 'Default dropout rate %.'),
('float', 'learning_rate', 1e-3, 'Initial learning rate.'),
('float', 'lr_decay_factor', 0.93, 'Learning rate decay factor.'),
('float', 'max_gradient_norm', 5.0, 'Maximum gradient norm.'),
('float', 'value_coeff', 1.0, 'The coefficient to apply to the value loss')
]
class BaseValueModel(BaseModel):
""" Base Value Model"""
def __init__(self, parent_model, dataset, hparams):
""" Initialization
:param parent_model: A `base_model` to which we are adding features
:param dataset: The dataset that is used to iterate over the data.
:param hparams: A dictionary of hyper parameters with their values
:type parent_model: diplomacy_research.models.base_model.BaseModel
:type dataset: diplomacy_research.models.datasets.supervised_dataset.SupervisedDataset
:type dataset: diplomacy_research.models.datasets.queue_dataset.QueueDataset
"""
BaseModel.__init__(self, parent_model, dataset, hparams)
self.build_value()
@property
def _nb_evaluation_loops(self):
""" Contains the number of different evaluation tags we want to compute
This also represent the number of loops we should do over the validation set
Some model wants to calculate different statistics and require multiple pass to do that
A value of 1 indicates to only run in the main validation loop
A value > 1 indicates to run additional loops only for this model.
"""
return 1
@property
def _evaluation_tags(self):
""" List of evaluation tags (1 list of evaluation tag for each evaluation loop)
e.g. [['Acc_1', 'Acc_5', 'Acc_Tokens'], ['Gr_1', 'Gr_5', 'Gr_Tokens']]
"""
return [['[Value]L2_Loss']]
@property
def _early_stopping_tags(self):
""" List of tags to use to detect early stopping
The tags are a tuple of 1) 'min' or 'max' and 2) the tag's name
e.g. [('max', '[Gr]Acc_1'), ('min', '[TF]Perplexity')]
"""
return [('min', '[Value]L2_Loss')]
@property
def _placeholders(self):
""" Return a dictionary of all placeholders needed by the model """
from diplomacy_research.utils.tensorflow import tf, get_placeholder_with_default
return {
'stop_gradient_all': get_placeholder_with_default('stop_gradient_all', False, shape=(), dtype=tf.bool)
}
def _get_board_value(self, board_state, current_power, name='board_state_value', reuse=None):
""" Computes the estimated value of a board state
:param board_state: The board state - (batch, NB_NODES, NB_FEATURES)
:param current_power: The power for which we want the board value - (batch,)
:param name: The name to use for the operaton
:param reuse: Whether to reuse or not the weights from another operation
:return: The value of the board state for the specified power - (batch,)
"""
raise NotImplementedError()
def _build_value_initial(self):
""" Builds the value model (initial step) """
raise NotImplementedError()
def _build_value_final(self):
""" Builds the value model (final step) """
def _get_session_args(self, decode=False, eval_loop_ix=None):
""" Returns a dict of kwargs to feed to session.run
Expected format: {fetches, feed_dict=None}
"""
# Detecting if we are doing validation
in_validation, our_validation = False, False
if eval_loop_ix is not None:
in_validation = True
our_validation = eval_loop_ix in self.my_eval_loop_ixs
# --------- Fetches ---------------
train_fetches = {'optimizer_op': self.outputs['optimizer_op'],
'value_loss': self.outputs['value_loss']}
eval_fetches = {'value_loss': self.outputs['value_loss']}
# --------- Feed dict --------------
# Building feed dict
feed_dict = {self.placeholders['stop_gradient_all']: False}
# --------- Validation Loop --------------
# Validation Loop - Running one of our validation loops
if our_validation:
return {'fetches': eval_fetches, 'feed_dict': feed_dict}
# Validation Loop - Running someone else validation loop
if in_validation:
return {'feed_dict': feed_dict}
# --------- Training Loop --------------
# Training Loop - We want to decode the specific batch to display stats
if decode:
return {'fetches': eval_fetches, 'feed_dict': feed_dict}
# Training Loop - Training the model
return {'fetches': train_fetches, 'feed_dict': feed_dict}
def _validate(self):
""" Validates the built model """
# Making sure all the required outputs are present
assert 'value_target' in self.features
assert 'state_value' in self.outputs
assert 'value_loss' in self.outputs
assert len(self.outputs['state_value'].shape) == 1
# Making sure we have a name tag
for tag in self.outputs:
if VALID_TAG.match(tag):
break
else:
raise RuntimeError('Unable to find a name tag. Format: "tag/value/v000_xxxxxx".')
@staticmethod
def _decode(**fetches):
""" Performs decoding on the output (value model)
:param fetches: A dictionary of fetches from the model.
:return: A dictionary of decoded results, including various keys for evaluation
"""
if 'value_loss' not in fetches:
return {}
return {'value_loss': fetches['value_loss']}
def _evaluate(self, decoded_results, feed_dict, eval_loop_ix, incl_detailed):
""" Calculates the accuracy of the model
:param decoded_results: The decoded results (output of _decode() function)
:param feed_dict: The feed dictionary that was given to session.run()
:param eval_loop_ix: The current evaluation loop index
:param incl_detailed: is true if training is over, more statistics can be computed
:return: A tuple consisting of:
1) An ordered dictionary with result_name as key and (weight, value) as value (Regular results)
2) An ordered dictionary with result_name as key and a list of result values (Detailed results)
"""
# Detecting if it's our evaluation or not
if eval_loop_ix == -1:
pass
else:
our_validation = eval_loop_ix in self.my_eval_loop_ixs
if not our_validation:
return OrderedDict(), OrderedDict()
# Returning evaluation results
return OrderedDict({'[Value]L2_Loss': (1, decoded_results['value_loss'])}), OrderedDict()
| mit |
verilylifesciences/classifaedes | classifaedes/hparams_lib.py | 1 | 2741 | # Copyright 2019 Verily Life Sciences LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for initializing, loading, and saving HParams."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os.path
import time
import tensorflow.compat.v1 as tf
from tensorflow.contrib import training as contrib_training
flags = tf.app.flags
FLAGS = flags.FLAGS
def defaults():
"""Returns default HParams instance."""
hps = contrib_training.HParams(
batch_size=16,
# Learning rate params.
lr_init=0.01,
lr_decay_steps=200,
# Supported architectures:
# * inception_v3a: InceptionV3 with corresponding arg-scope.
# * inception_v3b: InceptionV3 without corresponding arg-scope (notably
# excludes batch-norm).
arch='inception_v3a',
clip_gradient_norm=0, # Disabled if zero.
adam_epsilon=0.1,
p_dropout=0.1,
filter_depth_mul=0.3,
use_global_objective_recall_at_precision=False,
target_precision=0.9997,
)
return hps
def write_to_file(hps, model_dir):
"""Writes HParams instance values to a JSON file under model_dir.
Format is inverted by hps.parse_json().
Args:
hps: HParams.
model_dir: Model directory.
"""
hps_path = _hps_path(model_dir)
tf.logging.info('Recording HParams to path %s.', hps_path)
with tf.gfile.Open(hps_path, 'w') as fp:
fp.write(_human_serialize(hps))
def load_from_file(model_dir):
"""Load HParams from `model_dir`.
Args:
model_dir: Model directory
Returns:
tf.HParams loaded from a JSON file under `model_dir`.
"""
hps_path = _hps_path(model_dir)
while not tf.gfile.Exists(hps_path):
tf.logging.info('Waiting for HParams file to exist at %s.', hps_path)
time.sleep(10)
tf.logging.info('Loading HParams from path %s...', hps_path)
hps = defaults()
with tf.gfile.Open(hps_path) as fp:
hps.parse_json(fp.read())
tf.logging.info('HParams values: \n%s', _human_serialize(hps))
return hps
#
# Private functions.
#
def _hps_path(model_dir):
return os.path.join(model_dir, 'hparams.json')
def _human_serialize(hps):
return json.dumps(hps.values(), indent=2)
| apache-2.0 |
asanfilippo7/osf.io | tests/test_test_utils.py | 32 | 1995 | # -*- coding: utf-8 -*-
import mock
from urlparse import urlparse
from nose.tools import * # flake8: noqa
import unittest
from framework.auth import Auth
from website.models import Node, NodeLog
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
from tests import utils as test_utils
class TestUtilsTests(OsfTestCase):
def setUp(self):
super(TestUtilsTests, self).setUp()
self.node = ProjectFactory()
self.user = self.node.creator
self.auth = Auth(self.user)
def test_assert_logs(self):
def add_log(self):
self.node.add_log(NodeLog.UPDATED_FIELDS, {}, auth=self.auth)
wrapped = test_utils.assert_logs(NodeLog.UPDATED_FIELDS, 'node')(add_log)
wrapped(self)
def test_assert_logs_fail(self):
def dont_add_log(self):
pass
wrapped = test_utils.assert_logs(NodeLog.UPDATED_FIELDS, 'node')(dont_add_log)
assert_raises(AssertionError, lambda: wrapped(self))
def test_assert_logs_stacked(self):
def add_log(self):
self.node.add_log(NodeLog.UPDATED_FIELDS, {}, auth=self.auth)
def add_two_logs(self):
add_log(self)
self.node.add_log(NodeLog.CONTRIB_ADDED, {}, auth=self.auth)
wrapped = test_utils.assert_logs(NodeLog.UPDATED_FIELDS, 'node', -2)(
test_utils.assert_logs(NodeLog.CONTRIB_ADDED, 'node')(add_two_logs)
)
wrapped(self)
def test_assert_not_logs_pass(self):
def dont_add_log(self):
pass
wrapped = test_utils.assert_not_logs(NodeLog.UPDATED_FIELDS, 'node')(dont_add_log)
wrapped(self)
def test_assert_not_logs_fail(self):
def add_log(self):
self.node.add_log(NodeLog.UPDATED_FIELDS, {}, auth=self.auth)
wrapped = test_utils.assert_not_logs(NodeLog.UPDATED_FIELDS, 'node')(add_log)
assert_raises(AssertionError, lambda: wrapped(self))
| apache-2.0 |
bastik/youtube-dl | youtube_dl/extractor/thesixtyone.py | 109 | 3196 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import unified_strdate
class TheSixtyOneIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?thesixtyone\.com/
(?:.*?/)*
(?:
s|
song/comments/list|
song
)/(?P<id>[A-Za-z0-9]+)/?$'''
_SONG_URL_TEMPLATE = 'http://thesixtyone.com/s/{0:}'
_SONG_FILE_URL_TEMPLATE = 'http://{audio_server:}/thesixtyone_production/audio/{0:}_stream'
_THUMBNAIL_URL_TEMPLATE = '{photo_base_url:}_desktop'
_TESTS = [
{
'url': 'http://www.thesixtyone.com/s/SrE3zD7s1jt/',
'md5': '821cc43b0530d3222e3e2b70bb4622ea',
'info_dict': {
'id': 'SrE3zD7s1jt',
'ext': 'mp3',
'title': 'CASIO - Unicorn War Mixtape',
'thumbnail': 're:^https?://.*_desktop$',
'upload_date': '20071217',
'duration': 3208,
}
},
{
'url': 'http://www.thesixtyone.com/song/comments/list/SrE3zD7s1jt',
'only_matching': True,
},
{
'url': 'http://www.thesixtyone.com/s/ULoiyjuJWli#/s/SrE3zD7s1jt/',
'only_matching': True,
},
{
'url': 'http://www.thesixtyone.com/#/s/SrE3zD7s1jt/',
'only_matching': True,
},
{
'url': 'http://www.thesixtyone.com/song/SrE3zD7s1jt/',
'only_matching': True,
},
]
_DECODE_MAP = {
"x": "a",
"m": "b",
"w": "c",
"q": "d",
"n": "e",
"p": "f",
"a": "0",
"h": "1",
"e": "2",
"u": "3",
"s": "4",
"i": "5",
"o": "6",
"y": "7",
"r": "8",
"c": "9"
}
def _real_extract(self, url):
song_id = self._match_id(url)
webpage = self._download_webpage(
self._SONG_URL_TEMPLATE.format(song_id), song_id)
song_data = self._parse_json(self._search_regex(
r'"%s":\s(\{.*?\})' % song_id, webpage, 'song_data'), song_id)
if self._search_regex(r'(t61\.s3_audio_load\s*=\s*1\.0;)', webpage, 's3_audio_load marker', default=None):
song_data['audio_server'] = 's3.amazonaws.com'
else:
song_data['audio_server'] = song_data['audio_server'] + '.thesixtyone.com'
keys = [self._DECODE_MAP.get(s, s) for s in song_data['key']]
url = self._SONG_FILE_URL_TEMPLATE.format(
"".join(reversed(keys)), **song_data)
formats = [{
'format_id': 'sd',
'url': url,
'ext': 'mp3',
}]
return {
'id': song_id,
'title': '{artist:} - {name:}'.format(**song_data),
'formats': formats,
'comment_count': song_data.get('comments_count'),
'duration': song_data.get('play_time'),
'like_count': song_data.get('score'),
'thumbnail': self._THUMBNAIL_URL_TEMPLATE.format(**song_data),
'upload_date': unified_strdate(song_data.get('publish_date')),
}
| unlicense |
pynamodb/PynamoDB | pynamodb/pagination.py | 2 | 7759 | import time
from typing import Any, Callable, Dict, Iterable, Iterator, TypeVar, Optional
from pynamodb.constants import (CAMEL_COUNT, ITEMS, LAST_EVALUATED_KEY, SCANNED_COUNT,
CONSUMED_CAPACITY, TOTAL, CAPACITY_UNITS)
from pynamodb.settings import OperationSettings
_T = TypeVar('_T')
class RateLimiter:
"""
RateLimiter limits operations to a pre-set rate of units/seconds
Example:
Initialize a RateLimiter with the desired rate
rate_limiter = RateLimiter(rate_limit)
Now, every time before calling an operation, call acquire()
rate_limiter.acquire()
And after an operation, update the number of units consumed
rate_limiter.consume(units)
"""
def __init__(self, rate_limit: float, time_module: Optional[Any] = None) -> None:
"""
Initializes a RateLimiter object
:param rate_limit: The desired rate
:param time_module: Optional: the module responsible for calculating time. Intended to be used for testing purposes.
"""
if rate_limit <= 0:
raise ValueError("rate_limit must be greater than zero")
self._rate_limit = rate_limit
self._consumed = 0
self._time_of_last_acquire = 0.0
self._time_module: Any = time_module or time
def consume(self, units: int) -> None:
"""
Records the amount of units consumed.
:param units: Number of units consumed
:return: None
"""
self._consumed += units
def acquire(self) -> None:
"""
Sleeps the appropriate amount of time to follow the rate limit restriction
:return: None
"""
self._time_module.sleep(max(0, self._consumed/float(self.rate_limit) - (self._time_module.time()-self._time_of_last_acquire)))
self._consumed = 0
self._time_of_last_acquire = self._time_module.time()
@property
def rate_limit(self) -> float:
"""
A limit of units per seconds
"""
return self._rate_limit
@rate_limit.setter
def rate_limit(self, rate_limit: float):
if rate_limit <= 0:
raise ValueError("rate_limit must be greater than zero")
self._rate_limit = rate_limit
class PageIterator(Iterator[_T]):
"""
PageIterator handles Query and Scan result pagination.
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination
"""
def __init__(
self,
operation: Callable,
args: Any,
kwargs: Dict[str, Any],
rate_limit: Optional[float] = None,
settings: OperationSettings = OperationSettings.default,
) -> None:
self._operation = operation
self._args = args
self._kwargs = kwargs
self._first_iteration = True
self._last_evaluated_key = kwargs.get('exclusive_start_key')
self._total_scanned_count = 0
self._rate_limiter = None
if rate_limit:
self._rate_limiter = RateLimiter(rate_limit)
self._settings = settings
def __iter__(self) -> Iterator[_T]:
return self
def __next__(self) -> _T:
if self._last_evaluated_key is None and not self._first_iteration:
raise StopIteration()
self._first_iteration = False
self._kwargs['exclusive_start_key'] = self._last_evaluated_key
if self._rate_limiter:
self._rate_limiter.acquire()
self._kwargs['return_consumed_capacity'] = TOTAL
page = self._operation(*self._args, settings=self._settings, **self._kwargs)
self._last_evaluated_key = page.get(LAST_EVALUATED_KEY)
self._total_scanned_count += page[SCANNED_COUNT]
if self._rate_limiter:
consumed_capacity = page.get(CONSUMED_CAPACITY, {}).get(CAPACITY_UNITS, 0)
self._rate_limiter.consume(consumed_capacity)
return page
def next(self) -> _T:
return self.__next__()
@property
def key_names(self) -> Iterable[str]:
# If the current page has a last_evaluated_key, use it to determine key attributes
if self._last_evaluated_key:
return self._last_evaluated_key.keys()
# Use the table meta data to determine the key attributes
table_meta = self._operation.__self__.get_meta_table() # type: ignore
return table_meta.get_key_names(self._kwargs.get('index_name'))
@property
def page_size(self) -> Optional[int]:
return self._kwargs.get('limit')
@page_size.setter
def page_size(self, page_size: int) -> None:
self._kwargs['limit'] = page_size
@property
def last_evaluated_key(self) -> Optional[Dict[str, Dict[str, Any]]]:
return self._last_evaluated_key
@property
def total_scanned_count(self) -> int:
return self._total_scanned_count
class ResultIterator(Iterator[_T]):
"""
ResultIterator handles Query and Scan item pagination.
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination
"""
def __init__(
self,
operation: Callable,
args: Any,
kwargs: Dict[str, Any],
map_fn: Optional[Callable] = None,
limit: Optional[int] = None,
rate_limit: Optional[float] = None,
settings: OperationSettings = OperationSettings.default,
) -> None:
self.page_iter: PageIterator = PageIterator(operation, args, kwargs, rate_limit, settings)
self._first_iteration = True
self._map_fn = map_fn
self._limit = limit
self._total_count = 0
def _get_next_page(self) -> None:
page = next(self.page_iter)
self._count = page[CAMEL_COUNT]
self._items = page.get(ITEMS) # not returned if 'Select' is set to 'COUNT'
self._index = 0 if self._items else self._count
self._total_count += self._count
def __iter__(self) -> Iterator[_T]:
return self
def __next__(self) -> _T:
if self._limit == 0:
raise StopIteration
if self._first_iteration:
self._first_iteration = False
self._get_next_page()
while self._index == self._count:
self._get_next_page()
item = self._items[self._index]
self._index += 1
if self._limit is not None:
self._limit -= 1
if self._map_fn:
item = self._map_fn(item)
return item
def next(self) -> _T:
return self.__next__()
@property
def last_evaluated_key(self) -> Optional[Dict[str, Dict[str, Any]]]:
if self._first_iteration or self._index == self._count:
# Not started iterating yet: return `exclusive_start_key` if set, otherwise expect None; or,
# Entire page has been consumed: last_evaluated_key is whatever DynamoDB returned
# It may correspond to the current item, or it may correspond to an item evaluated but not returned.
return self.page_iter.last_evaluated_key
# In the middle of a page of results: reconstruct a last_evaluated_key from the current item
# The operation should be resumed starting at the last item returned, not the last item evaluated.
# This can occur if the 'limit' is reached in the middle of a page.
item = self._items[self._index - 1]
return {key: item[key] for key in self.page_iter.key_names}
@property
def total_count(self) -> int:
return self._total_count
| mit |
cedi4155476/QGIS | python/plugins/processing/algs/qgis/Merge.py | 9 | 4123 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Gridify.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class Merge(GeoAlgorithm):
LAYER1 = 'LAYER1'
LAYER2 = 'LAYER2'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Merge vector layers')
self.group, self.i18n_group = self.trAlgorithm('Vector general tools')
self.addParameter(ParameterVector(self.LAYER1,
self.tr('Input layer 1'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterVector(self.LAYER2,
self.tr('Input layer 2'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Merged')))
def processAlgorithm(self, progress):
layer1 = dataobjects.getObjectFromUri(
self.getParameterValue(self.LAYER1))
layer2 = dataobjects.getObjectFromUri(
self.getParameterValue(self.LAYER2))
if layer1.wkbType() != layer2.wkbType():
raise GeoAlgorithmExecutionException(
self.tr('Merged layers must have be same type of geometry'))
count = 0
fields = []
layers = [layer1, layer2]
for layer in layers:
count += layer.featureCount()
for sfield in layer.pendingFields():
found = None
for dfield in fields:
if dfield.name() == sfield.name() and \
dfield.type() == sfield.type():
found = dfield
break
if not found:
fields.append(sfield)
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(
fields, layer1.wkbType(), layer1.crs())
total = 100.0 / float(count)
count = 0
for layer in layers:
idx = {}
for dfield in fields:
i = 0
for sfield in layer.pendingFields():
if sfield.name() == dfield.name() and \
sfield.type() == dfield.type():
idx[dfield] = i
break
i += 1
features = vector.features(layer)
for f in features:
sAttributes = f.attributes()
dAttributes = []
for dfield in fields:
if dfield in idx:
dAttributes.append(sAttributes[idx[dfield]])
else:
dAttributes.append(dfield.type())
f.setAttributes(dAttributes)
writer.addFeature(f)
count += 1
progress.setPercentage(int(count * total))
del writer
| gpl-2.0 |
beegieb/kaggle_see_click_fix | config.py | 3 | 1898 | # TRAINFILE / TESTFILE are the names of the official train and test datasets
TRAINFILE = 'data/train.csv'
TESTFILE = 'data/test.csv'
# GEODATA is the name of the csv file containing mined Geodata indexed by issue ID
GEODATA = 'data/geodata.csv'
# USE_BRYANS_DATA is a boolean, if True then use Bryan's updated dataset
# when generating the model and must also include BRYAN_TRAIN and BRYAN_TEST
# locations for Bryan's train and test files
USE_BRYANS_DATA = True
BRYAN_TRAIN = "data/train_addr_inc_pop.csv"
BRYAN_TEST = "data/test_addr_inc_pop.csv"
# CACHEDIR is the loaction of the cache
CACHEDIR = 'cache/'
# CACHETYPE is the type of caching to use - either pickle or joblib
CACHETYPE = 'joblib'
# SUBMITDIR is the location where submissions will be stored
SUBMITDIR = 'submissions/'
# SETTINGS is the name of the model and dataset settings json
SETTINGS = 'settings.json'
## Helper methods ##
import json
def json_configs(type, name):
"""
Base method that extracts the configuration info from the json file defined
in SETTINGS
Args:
type - the name of the type of configuration object to look in
name - the name of the object whose configs will be extracted
Returns:
a dict containing the settings for the object of type and name
Raises:
a value error if type or name are not defined in the SETTINGS json file
"""
f = open(SETTINGS)
configs = json.load(f)[type]
f.close()
if name not in configs:
raise ValueError('Unable to find configuration for %s %s' % (type, name))
return configs[name]
def model_configs(name):
"""
Wrapper for json_configs where type is 'models'
"""
return json_configs('models', name)
def dataset_configs(name):
"""
Wrapper for json_configs where type is 'datasets'
"""
return json_configs('datasets', name)
| bsd-3-clause |
tovmeod/anaf | anaf/messaging/tests.py | 1 | 11485 | from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User as DjangoUser
from anaf.core.models import Group, Perspective, ModuleSetting
from models import Message, MessageStream
from anaf.identities.models import Contact, ContactType
class MessagingModelsTest(TestCase):
username = "test"
password = "password"
def test_model_message(self):
"""Test message"""
contact_type = ContactType(name='test')
contact_type.save()
contact = Contact(name='test', contact_type=contact_type)
contact.save()
self.user = DjangoUser(username=self.username, password='')
self.user.set_password(self.password)
self.user.save()
stream = MessageStream(name='test')
stream.save()
obj = Message(title='test', body='test', author=contact, stream=stream)
obj.save()
self.assertEquals('test', obj.title)
self.assertNotEquals(obj.id, None)
obj.delete()
def test_model_message_stream(self):
"Test message"
obj = MessageStream(name='test')
obj.save()
self.assertEquals('test', obj.name)
self.assertNotEquals(obj.id, None)
obj.delete()
class MessagingViewsTest(TestCase):
username = "test"
password = "password"
def setUp(self):
self.group, created = Group.objects.get_or_create(name='test')
self.user, created = DjangoUser.objects.get_or_create(username=self.username, is_staff=True)
self.user.set_password(self.password)
self.user.save()
perspective, created = Perspective.objects.get_or_create(name='default')
perspective.set_default_user()
perspective.save()
ModuleSetting.set('default_perspective', perspective.id)
self.contact_type = ContactType(name='test')
self.contact_type.set_default_user()
self.contact_type.save()
self.contact = Contact(name='test', contact_type=self.contact_type)
self.contact.set_default_user()
self.contact.save()
self.stream = MessageStream(name='test')
self.stream.set_default_user()
self.stream.save()
self.message = Message(
title='test', body='test', author=self.contact, stream=self.stream)
self.message.set_default_user()
self.message.save()
######################################
# Testing views when user is logged in
######################################
def test_message_index_login(self):
"Test index page with login at /messaging/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('messaging'))
self.assertEquals(response.status_code, 200)
def test_message_index_sent(self):
"Test index page with login at /messaging/sent/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('messaging_sent'))
self.assertEquals(response.status_code, 200)
def test_message_index_inbox(self):
"Test index page with login at /messaging/inbox/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('messaging_inbox'))
self.assertEquals(response.status_code, 200)
def test_message_index_unread(self):
"Test index page with login at /messaging/unread/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('messaging_unread'))
self.assertEquals(response.status_code, 200)
# Messages
def test_message_compose_login(self):
"Test index page with login at /message/compose/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('messaging_message_compose'))
# self.assertEquals(response.status_code, 200)
def test_message_view_login(self):
"Test index page with login at /message/view/<message_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('messaging_message_view', args=[self.message.id]))
self.assertEquals(response.status_code, 200)
def test_message_delete_login(self):
"Test index page with login at /message/edit/<message_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('messaging_message_delete', args=[self.message.id]))
self.assertEquals(response.status_code, 200)
# Streams
def test_stream_add(self):
"Test index page with login at /stream/add/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('messaging_stream_edit', args=[self.stream.id]))
self.assertEquals(response.status_code, 200)
def test_stream_view(self):
"Test index page with login at /stream/view/<stream_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('messaging_stream_view', args=[self.stream.id]))
self.assertEquals(response.status_code, 200)
def test_stream_edit(self):
"Test index page with login at /stream/edit/<stream_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('messaging_stream_edit', args=[self.stream.id]))
self.assertEquals(response.status_code, 200)
def test_stream_delete(self):
"Test index page with login at /stream/delete/<stream_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('messaging_stream_delete', args=[self.stream.id]))
self.assertEquals(response.status_code, 200)
# Settings
def test_messaging_settings_view(self):
"Test index page with login at /messaging/settings/view/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('messaging_settings_view'))
self.assertEquals(response.status_code, 200)
def test_finance_settings_edit(self):
"Test index page with login at /messaging/settings/edit/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('messaging_settings_edit'))
self.assertEquals(response.status_code, 200)
######################################
# Testing views when user is not logged in
######################################
def test_message_index_out(self):
"Test index page at /messaging/"
response = self.client.get(reverse('messaging'))
# Redirects as unauthenticated
self.assertRedirects(response, reverse('user_login'))
def test_message_sent_out(self):
"Testing /messaging/sent/"
response = self.client.get(reverse('messaging_sent'))
# Redirects as unauthenticated
self.assertRedirects(response, reverse('user_login'))
def test_message_inbox_out(self):
"Testing /messaging/inbox/"
response = self.client.get(reverse('messaging_inbox'))
# Redirects as unauthenticated
self.assertRedirects(response, reverse('user_login'))
def test_message_unread_out(self):
"Testing /messaging/unread/"
response = self.client.get(reverse('messaging_unread'))
# Redirects as unauthenticated
self.assertRedirects(response, reverse('user_login'))
# Messages
def test_message_compose_out(self):
"Testing /message/compose/"
response = self.client.get(reverse('messaging_message_compose'))
self.assertRedirects(response, reverse('user_login'))
def test_message_view_out(self):
"Test index page with login at /message/view/<message_id>"
response = self.client.get(
reverse('messaging_message_view', args=[self.message.id]))
self.assertRedirects(response, reverse('user_login'))
def test_message_delete_out(self):
"Test index page with login at /message/edit/<message_id>"
response = self.client.get(
reverse('messaging_message_delete', args=[self.message.id]))
self.assertRedirects(response, reverse('user_login'))
# Streams
def test_stream_add_out(self):
"Testing /stream/add/"
response = self.client.get(
reverse('messaging_stream_edit', args=[self.stream.id]))
self.assertRedirects(response, reverse('user_login'))
def test_stream_view_out(self):
"Testing /stream/view/<stream_id>"
response = self.client.get(
reverse('messaging_stream_view', args=[self.stream.id]))
self.assertRedirects(response, reverse('user_login'))
def test_stream_edit_out(self):
"Testing /stream/edit/<stream_id>"
response = self.client.get(
reverse('messaging_stream_edit', args=[self.stream.id]))
self.assertRedirects(response, reverse('user_login'))
def test_stream_delete_out(self):
"Testing /stream/delete/<stream_id>"
response = self.client.get(
reverse('messaging_stream_delete', args=[self.stream.id]))
self.assertRedirects(response, reverse('user_login'))
# Settings
def test_messaging_settings_view_out(self):
"Testing /messaging/settings/view/"
response = self.client.get(reverse('messaging_settings_view'))
self.assertRedirects(response, reverse('user_login'))
def test_finance_settings_edit_out(self):
"Testing /messaging/settings/edit/"
response = self.client.get(reverse('messaging_settings_edit'))
self.assertRedirects(response, reverse('user_login'))
| bsd-3-clause |
Jgarcia-IAS/Fidelizacion_odoo | openerp/addons/hr_evaluation/hr_evaluation.py | 210 | 19168 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from dateutil import parser
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT as DF
class hr_evaluation_plan(osv.Model):
_name = "hr_evaluation.plan"
_description = "Appraisal Plan"
_columns = {
'name': fields.char("Appraisal Plan", required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'phase_ids': fields.one2many('hr_evaluation.plan.phase', 'plan_id', 'Appraisal Phases', copy=True),
'month_first': fields.integer('First Appraisal in (months)', help="This number of months will be used to schedule the first evaluation date of the employee when selecting an evaluation plan. "),
'month_next': fields.integer('Periodicity of Appraisal (months)', help="The number of month that depicts the delay between each evaluation of this plan (after the first one)."),
'active': fields.boolean('Active')
}
_defaults = {
'active': True,
'month_first': 6,
'month_next': 12,
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'account.account', context=c),
}
class hr_evaluation_plan_phase(osv.Model):
_name = "hr_evaluation.plan.phase"
_description = "Appraisal Plan Phase"
_order = "sequence"
_columns = {
'name': fields.char("Phase", size=64, required=True),
'sequence': fields.integer("Sequence"),
'company_id': fields.related('plan_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'plan_id': fields.many2one('hr_evaluation.plan', 'Appraisal Plan', ondelete='cascade'),
'action': fields.selection([
('top-down', 'Top-Down Appraisal Requests'),
('bottom-up', 'Bottom-Up Appraisal Requests'),
('self', 'Self Appraisal Requests'),
('final', 'Final Interview')], 'Action', required=True),
'survey_id': fields.many2one('survey.survey', 'Appraisal Form', required=True),
'send_answer_manager': fields.boolean('All Answers',
help="Send all answers to the manager"),
'send_answer_employee': fields.boolean('All Answers',
help="Send all answers to the employee"),
'send_anonymous_manager': fields.boolean('Anonymous Summary',
help="Send an anonymous summary to the manager"),
'send_anonymous_employee': fields.boolean('Anonymous Summary',
help="Send an anonymous summary to the employee"),
'wait': fields.boolean('Wait Previous Phases',
help="Check this box if you want to wait that all preceding phases " +
"are finished before launching this phase."),
'mail_feature': fields.boolean('Send mail for this phase', help="Check this box if you want to send mail to employees coming under this phase"),
'mail_body': fields.text('Email'),
'email_subject': fields.text('Subject')
}
_defaults = {
'sequence': 1,
'email_subject': _('''Regarding '''),
'mail_body': lambda *a: _('''
Date: %(date)s
Dear %(employee_name)s,
I am doing an evaluation regarding %(eval_name)s.
Kindly submit your response.
Thanks,
--
%(user_signature)s
'''),
}
class hr_employee(osv.Model):
_name = "hr.employee"
_inherit="hr.employee"
def _appraisal_count(self, cr, uid, ids, field_name, arg, context=None):
Evaluation = self.pool['hr.evaluation.interview']
return {
employee_id: Evaluation.search_count(cr, uid, [('user_to_review_id', '=', employee_id)], context=context)
for employee_id in ids
}
_columns = {
'evaluation_plan_id': fields.many2one('hr_evaluation.plan', 'Appraisal Plan'),
'evaluation_date': fields.date('Next Appraisal Date', help="The date of the next appraisal is computed by the appraisal plan's dates (first appraisal + periodicity)."),
'appraisal_count': fields.function(_appraisal_count, type='integer', string='Appraisal Interviews'),
}
def run_employee_evaluation(self, cr, uid, automatic=False, use_new_cursor=False, context=None): # cronjob
now = parser.parse(datetime.now().strftime('%Y-%m-%d'))
obj_evaluation = self.pool.get('hr_evaluation.evaluation')
emp_ids = self.search(cr, uid, [('evaluation_plan_id', '<>', False), ('evaluation_date', '=', False)], context=context)
for emp in self.browse(cr, uid, emp_ids, context=context):
first_date = (now + relativedelta(months=emp.evaluation_plan_id.month_first)).strftime('%Y-%m-%d')
self.write(cr, uid, [emp.id], {'evaluation_date': first_date}, context=context)
emp_ids = self.search(cr, uid, [('evaluation_plan_id', '<>', False), ('evaluation_date', '<=', time.strftime("%Y-%m-%d"))], context=context)
for emp in self.browse(cr, uid, emp_ids, context=context):
next_date = (now + relativedelta(months=emp.evaluation_plan_id.month_next)).strftime('%Y-%m-%d')
self.write(cr, uid, [emp.id], {'evaluation_date': next_date}, context=context)
plan_id = obj_evaluation.create(cr, uid, {'employee_id': emp.id, 'plan_id': emp.evaluation_plan_id.id}, context=context)
obj_evaluation.button_plan_in_progress(cr, uid, [plan_id], context=context)
return True
class hr_evaluation(osv.Model):
_name = "hr_evaluation.evaluation"
_inherit = "mail.thread"
_description = "Employee Appraisal"
_columns = {
'date': fields.date("Appraisal Deadline", required=True, select=True),
'employee_id': fields.many2one('hr.employee', "Employee", required=True),
'note_summary': fields.text('Appraisal Summary'),
'note_action': fields.text('Action Plan', help="If the evaluation does not meet the expectations, you can propose an action plan"),
'rating': fields.selection([
('0', 'Significantly below expectations'),
('1', 'Do not meet expectations'),
('2', 'Meet expectations'),
('3', 'Exceeds expectations'),
('4', 'Significantly exceeds expectations'),
], "Appreciation", help="This is the appreciation on which the evaluation is summarized."),
'survey_request_ids': fields.one2many('hr.evaluation.interview', 'evaluation_id', 'Appraisal Forms'),
'plan_id': fields.many2one('hr_evaluation.plan', 'Plan', required=True),
'state': fields.selection([
('draft', 'New'),
('cancel', 'Cancelled'),
('wait', 'Plan In Progress'),
('progress', 'Waiting Appreciation'),
('done', 'Done'),
], 'Status', required=True, readonly=True, copy=False),
'date_close': fields.date('Ending Date', select=True),
}
_defaults = {
'date': lambda *a: (parser.parse(datetime.now().strftime('%Y-%m-%d')) + relativedelta(months=+1)).strftime('%Y-%m-%d'),
'state': lambda *a: 'draft',
}
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.browse(cr, uid, ids, context=context)
res = []
for record in reads:
name = record.plan_id.name
employee = record.employee_id.name_related
res.append((record['id'], name + ' / ' + employee))
return res
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
vals = {}
vals['plan_id'] = False
if employee_id:
employee_obj = self.pool.get('hr.employee')
for employee in employee_obj.browse(cr, uid, [employee_id], context=context):
if employee and employee.evaluation_plan_id and employee.evaluation_plan_id.id:
vals.update({'plan_id': employee.evaluation_plan_id.id})
return {'value': vals}
def button_plan_in_progress(self, cr, uid, ids, context=None):
hr_eval_inter_obj = self.pool.get('hr.evaluation.interview')
if context is None:
context = {}
for evaluation in self.browse(cr, uid, ids, context=context):
wait = False
for phase in evaluation.plan_id.phase_ids:
children = []
if phase.action == "bottom-up":
children = evaluation.employee_id.child_ids
elif phase.action in ("top-down", "final"):
if evaluation.employee_id.parent_id:
children = [evaluation.employee_id.parent_id]
elif phase.action == "self":
children = [evaluation.employee_id]
for child in children:
int_id = hr_eval_inter_obj.create(cr, uid, {
'evaluation_id': evaluation.id,
'phase_id': phase.id,
'deadline': (parser.parse(datetime.now().strftime('%Y-%m-%d')) + relativedelta(months=+1)).strftime('%Y-%m-%d'),
'user_id': child.user_id.id,
}, context=context)
if phase.wait:
wait = True
if not wait:
hr_eval_inter_obj.survey_req_waiting_answer(cr, uid, [int_id], context=context)
if (not wait) and phase.mail_feature:
body = phase.mail_body % {'employee_name': child.name, 'user_signature': child.user_id.signature,
'eval_name': phase.survey_id.title, 'date': time.strftime('%Y-%m-%d'), 'time': time}
sub = phase.email_subject
if child.work_email:
vals = {'state': 'outgoing',
'subject': sub,
'body_html': '<pre>%s</pre>' % body,
'email_to': child.work_email,
'email_from': evaluation.employee_id.work_email}
self.pool.get('mail.mail').create(cr, uid, vals, context=context)
self.write(cr, uid, ids, {'state': 'wait'}, context=context)
return True
def button_final_validation(self, cr, uid, ids, context=None):
request_obj = self.pool.get('hr.evaluation.interview')
self.write(cr, uid, ids, {'state': 'progress'}, context=context)
for evaluation in self.browse(cr, uid, ids, context=context):
if evaluation.employee_id and evaluation.employee_id.parent_id and evaluation.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [evaluation.id], user_ids=[evaluation.employee_id.parent_id.user_id.id], context=context)
if len(evaluation.survey_request_ids) != len(request_obj.search(cr, uid, [('evaluation_id', '=', evaluation.id), ('state', 'in', ['done', 'cancel'])], context=context)):
raise osv.except_osv(_('Warning!'), _("You cannot change state, because some appraisal forms have not been completed."))
return True
def button_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'done', 'date_close': time.strftime('%Y-%m-%d')}, context=context)
return True
def button_cancel(self, cr, uid, ids, context=None):
interview_obj = self.pool.get('hr.evaluation.interview')
evaluation = self.browse(cr, uid, ids[0], context)
interview_obj.survey_req_cancel(cr, uid, [r.id for r in evaluation.survey_request_ids])
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
return True
def button_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'draft'}, context=context)
return True
def write(self, cr, uid, ids, vals, context=None):
if vals.get('employee_id'):
employee_id = self.pool.get('hr.employee').browse(cr, uid, vals.get('employee_id'), context=context)
if employee_id.parent_id and employee_id.parent_id.user_id:
vals['message_follower_ids'] = [(4, employee_id.parent_id.user_id.partner_id.id)]
if 'date' in vals:
new_vals = {'deadline': vals.get('date')}
obj_hr_eval_iterview = self.pool.get('hr.evaluation.interview')
for evaluation in self.browse(cr, uid, ids, context=context):
for survey_req in evaluation.survey_request_ids:
obj_hr_eval_iterview.write(cr, uid, [survey_req.id], new_vals, context=context)
return super(hr_evaluation, self).write(cr, uid, ids, vals, context=context)
class hr_evaluation_interview(osv.Model):
_name = 'hr.evaluation.interview'
_inherit = 'mail.thread'
_rec_name = 'user_to_review_id'
_description = 'Appraisal Interview'
_columns = {
'request_id': fields.many2one('survey.user_input', 'Survey Request', ondelete='cascade', readonly=True),
'evaluation_id': fields.many2one('hr_evaluation.evaluation', 'Appraisal Plan', required=True),
'phase_id': fields.many2one('hr_evaluation.plan.phase', 'Appraisal Phase', required=True),
'user_to_review_id': fields.related('evaluation_id', 'employee_id', type="many2one", relation="hr.employee", string="Employee to evaluate"),
'user_id': fields.many2one('res.users', 'Interviewer'),
'state': fields.selection([('draft', "Draft"),
('waiting_answer', "In progress"),
('done', "Done"),
('cancel', "Cancelled")],
string="State", required=True, copy=False),
'survey_id': fields.related('phase_id', 'survey_id', string="Appraisal Form", type="many2one", relation="survey.survey"),
'deadline': fields.related('request_id', 'deadline', type="datetime", string="Deadline"),
}
_defaults = {
'state': 'draft'
}
def create(self, cr, uid, vals, context=None):
phase_obj = self.pool.get('hr_evaluation.plan.phase')
survey_id = phase_obj.read(cr, uid, vals.get('phase_id'), fields=['survey_id'], context=context)['survey_id'][0]
if vals.get('user_id'):
user_obj = self.pool.get('res.users')
partner_id = user_obj.read(cr, uid, vals.get('user_id'), fields=['partner_id'], context=context)['partner_id'][0]
else:
partner_id = None
user_input_obj = self.pool.get('survey.user_input')
if not vals.get('deadline'):
vals['deadline'] = (datetime.now() + timedelta(days=28)).strftime(DF)
ret = user_input_obj.create(cr, uid, {'survey_id': survey_id,
'deadline': vals.get('deadline'),
'type': 'link',
'partner_id': partner_id}, context=context)
vals['request_id'] = ret
return super(hr_evaluation_interview, self).create(cr, uid, vals, context=context)
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.browse(cr, uid, ids, context=context)
res = []
for record in reads:
name = record.survey_id.title
res.append((record['id'], name))
return res
def survey_req_waiting_answer(self, cr, uid, ids, context=None):
request_obj = self.pool.get('survey.user_input')
for interview in self.browse(cr, uid, ids, context=context):
if interview.request_id:
request_obj.action_survey_resent(cr, uid, [interview.request_id.id], context=context)
self.write(cr, uid, interview.id, {'state': 'waiting_answer'}, context=context)
return True
def survey_req_done(self, cr, uid, ids, context=None):
for id in self.browse(cr, uid, ids, context=context):
flag = False
wating_id = 0
if not id.evaluation_id.id:
raise osv.except_osv(_('Warning!'), _("You cannot start evaluation without Appraisal."))
records = id.evaluation_id.survey_request_ids
for child in records:
if child.state == "draft":
wating_id = child.id
continue
if child.state != "done":
flag = True
if not flag and wating_id:
self.survey_req_waiting_answer(cr, uid, [wating_id], context=context)
self.write(cr, uid, ids, {'state': 'done'}, context=context)
return True
def survey_req_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
return True
def action_print_survey(self, cr, uid, ids, context=None):
""" If response is available then print this response otherwise print survey form (print template of the survey) """
context = dict(context or {})
interview = self.browse(cr, uid, ids, context=context)[0]
survey_obj = self.pool.get('survey.survey')
response_obj = self.pool.get('survey.user_input')
response = response_obj.browse(cr, uid, interview.request_id.id, context=context)
context.update({'survey_token': response.token})
return survey_obj.action_print_survey(cr, uid, [interview.survey_id.id], context=context)
def action_start_survey(self, cr, uid, ids, context=None):
context = dict(context or {})
interview = self.browse(cr, uid, ids, context=context)[0]
survey_obj = self.pool.get('survey.survey')
response_obj = self.pool.get('survey.user_input')
# grab the token of the response and start surveying
response = response_obj.browse(cr, uid, interview.request_id.id, context=context)
context.update({'survey_token': response.token})
return survey_obj.action_start_survey(cr, uid, [interview.survey_id.id], context=context)
| agpl-3.0 |
lzjun567/mongo-connector | mongo_connector/doc_managers/solr_doc_manager.py | 1 | 12168 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Receives documents from the oplog worker threads and indexes them
into the backend.
This file is a document manager for the Solr search engine, but the intent
is that this file can be used as an example to add on different backends.
To extend this to other systems, simply implement the exact same class and
replace the method definitions with API calls for the desired backend.
"""
import re
import json
from pysolr import Solr, SolrError
from mongo_connector import errors
from mongo_connector.constants import (DEFAULT_COMMIT_INTERVAL,
DEFAULT_MAX_BULK)
from mongo_connector.util import retry_until_ok
from mongo_connector.doc_managers import DocManagerBase, exception_wrapper
from mongo_connector.doc_managers.formatters import DocumentFlattener
# pysolr only has 1 exception: SolrError
wrap_exceptions = exception_wrapper({
SolrError: errors.OperationFailed})
ADMIN_URL = 'admin/luke?show=schema&wt=json'
decoder = json.JSONDecoder()
class DocManager(DocManagerBase):
"""The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that multiple
updates to the same doc reflect the most up to date version as opposed to
multiple, slightly different versions of a doc.
"""
def __init__(self, url, auto_commit_interval=DEFAULT_COMMIT_INTERVAL,
unique_key='_id', chunk_size=DEFAULT_MAX_BULK, **kwargs):
"""Verify Solr URL and establish a connection.
"""
self.solr = Solr(url)
self.unique_key = unique_key
print 'unique_key:', unique_key
# pysolr does things in milliseconds
if auto_commit_interval is not None:
self.auto_commit_interval = auto_commit_interval * 1000
else:
self.auto_commit_interval = None
self.chunk_size = chunk_size
self.field_list = []
self._build_fields()
self._formatter = DocumentFlattener()
def _parse_fields(self, result, field_name):
""" If Schema access, parse fields and build respective lists
"""
field_list = []
for key, value in result.get('schema', {}).get(field_name, {}).items():
if key not in field_list:
field_list.append(key)
return field_list
@wrap_exceptions
def _build_fields(self):
""" Builds a list of valid fields
"""
declared_fields = self.solr._send_request('get', ADMIN_URL)
result = decoder.decode(declared_fields)
self.field_list = self._parse_fields(result, 'fields')
# Build regular expressions to match dynamic fields.
# dynamic field names may have exactly one wildcard, either at
# the beginning or the end of the name
self._dynamic_field_regexes = []
for wc_pattern in self._parse_fields(result, 'dynamicFields'):
if wc_pattern[0] == "*":
self._dynamic_field_regexes.append(
re.compile(".*%s\Z" % wc_pattern[1:]))
elif wc_pattern[-1] == "*":
self._dynamic_field_regexes.append(
re.compile("\A%s.*" % wc_pattern[:-1]))
def _clean_doc(self, doc):
"""
清洗文档,把mongodb中的_id转换成id
Reformats the given document before insertion into Solr.
This method reformats the document in the following ways:
- removes extraneous fields that aren't defined in schema.xml
- unwinds arrays in order to find and later flatten sub-documents
- flattens the document so that there are no sub-documents, and every
value is associated with its dot-separated path of keys
An example:
{"a": 2,
"b": {
"c": {
"d": 5
}
},
"e": [6, 7, 8]
}
becomes:
{"a": 2, "b.c.d": 5, "e.0": 6, "e.1": 7, "e.2": 8}
"""
# Translate the _id field to whatever unique key we're using.
# _id may not exist in the doc, if we retrieved it from Solr
# as part of update.
print 'doc:', doc
if '_id' in doc:
print '_id in doc'
doc[self.unique_key] = doc.pop("_id")
# SOLR cannot index fields within sub-documents, so flatten documents
# with the dot-separated path to each value as the respective key
flat_doc = self._formatter.format_document(doc)
# Only include fields that are explicitly provided in the
# schema or match one of the dynamic field patterns, if
# we were able to retrieve the schema
if len(self.field_list) + len(self._dynamic_field_regexes) > 0:
def include_field(field):
return field in self.field_list or any(
regex.match(field) for regex in self._dynamic_field_regexes
)
return dict((k, v) for k, v in flat_doc.items() if include_field(k))
print 'flat_doc:', flat_doc
return flat_doc
def stop(self):
""" Stops the instance
"""
pass
def apply_update(self, doc, update_spec):
"""Override DocManagerBase.apply_update to have flat documents."""
# Replace a whole document
#参数doc:solr中的document
#参数update_spec:mongodb中的document那些发生变更的字段和值
print 'apply_update:doc:', doc
print 'apply_update:update_spec:', update_spec
if not '$set' in update_spec and not '$unset' in update_spec:
print 'not $set'
# update spec contains the new document
update_spec['_ts'] = doc['_ts']
update_spec['ns'] = doc['ns']
#这一行不知道是不是mongo-connector的bug,因为这个时候的doc是索引中的doc,索引的doc只有id,没有_id
# update_spec['_id'] = doc['_id']
update_spec['id'] = doc['id']
return update_spec
for to_set in update_spec.get("$set", []):
#更新
print 'update document.......'
value = update_spec['$set'][to_set]
# Find dotted-path to the value, remove that key from doc, then
# put value at key:
keys_to_pop = []
for key in doc:
if key.startswith(to_set):
if key == to_set or key[len(to_set)] == '.':
keys_to_pop.append(key)
for key in keys_to_pop:
doc.pop(key)
doc[to_set] = value
for to_unset in update_spec.get("$unset", []):
# MongoDB < 2.5.2 reports $unset for fields that don't exist within
# the document being updated.
keys_to_pop = []
for key in doc:
if key.startswith(to_unset):
if key == to_unset or key[len(to_unset)] == '.':
keys_to_pop.append(key)
for key in keys_to_pop:
doc.pop(key)
return doc
@wrap_exceptions
def update(self, doc, update_spec):
#更新搜索
print 'update.......'
print 'update:doc:',doc
print 'update:update_spec:', update_spec
"""Apply updates given in update_spec to the document whose id
matches that of doc.
"""
# Commit outstanding changes so that the document to be updated is the
# same version to which the changes apply.
self.commit()
query = "%s:%s" % (self.unique_key, str(doc['_id']))
results = self.solr.search(query)
if not len(results):
# Document may not be retrievable yet
self.commit()
results = self.solr.search(query)
#从索引中读出完整的结果
# Results is an iterable containing only 1 result
for doc in results:
updated = self.apply_update(doc, update_spec)
# A _version_ of 0 will always apply the update
updated['_version_'] = 0
self.upsert(updated)
return updated
@wrap_exceptions
def upsert(self, doc):
"""
真正插入到solr索引中去的方法
Update or insert a document into Solr
This method should call whatever add/insert/update method exists for
the backend engine and add the document in there. The input will
always be one mongo document, represented as a Python dictionary.
"""
if self.auto_commit_interval is not None:
self.solr.add([self._clean_doc(doc)],
commit=(self.auto_commit_interval == 0),
commitWithin=str(self.auto_commit_interval))
else:
self.solr.add([self._clean_doc(doc)], commit=False)
@wrap_exceptions
def bulk_upsert(self, docs):
"""Update or insert multiple documents into Solr
docs may be any iterable
"""
if self.auto_commit_interval is not None:
add_kwargs = {
"commit": (self.auto_commit_interval == 0),
"commitWithin": str(self.auto_commit_interval)
}
else:
add_kwargs = {"commit": False}
cleaned = (self._clean_doc(d) for d in docs)
if self.chunk_size > 0:
batch = list(next(cleaned) for i in range(self.chunk_size))
while batch:
self.solr.add(batch, **add_kwargs)
batch = list(next(cleaned)
for i in range(self.chunk_size))
else:
self.solr.add(cleaned, **add_kwargs)
@wrap_exceptions
def remove(self, doc):
"""Removes documents from Solr
The input is a python dictionary that represents a mongo document.
"""
self.solr.delete(id=str(doc["_id"]),
commit=(self.auto_commit_interval == 0))
@wrap_exceptions
def _remove(self):
"""Removes everything
"""
self.solr.delete(q='*:*', commit=(self.auto_commit_interval == 0))
@wrap_exceptions
def _stream_search(self, query):
"""Helper method for iterating over Solr search results."""
for doc in self.solr.search(query, rows=100000000):
if self.unique_key != "_id":
doc["_id"] = doc.pop(self.unique_key)
yield doc
@wrap_exceptions
def search(self, start_ts, end_ts):
"""Called to query Solr for documents in a time range."""
query = '_ts: [%s TO %s]' % (start_ts, end_ts)
return self._stream_search(query)
@wrap_exceptions
def _search(self, query):
"""For test purposes only. Performs search on Solr with given query
Does not have to be implemented.
"""
return self._stream_search(query)
def commit(self):
"""This function is used to force a commit.
"""
retry_until_ok(self.solr.commit)
@wrap_exceptions
def get_last_doc(self):
"""Returns the last document stored in the Solr engine.
"""
#search everything, sort by descending timestamp, return 1 row
try:
result = self.solr.search('*:*', sort='_ts desc', rows=1)
except ValueError:
return None
for r in result:
r['_id'] = r.pop(self.unique_key)
return r
| apache-2.0 |
deniszh/carbon | lib/carbon/aggregator/rules.py | 4 | 5209 | import re
from math import floor, ceil
from os.path import exists, getmtime
from twisted.internet.task import LoopingCall
from cachetools import TTLCache, LRUCache
from carbon import log
from carbon.conf import settings
from carbon.aggregator.buffers import BufferManager
def get_cache():
ttl = settings.CACHE_METRIC_NAMES_TTL
size = settings.CACHE_METRIC_NAMES_MAX
if ttl > 0 and size > 0:
return TTLCache(size, ttl)
elif size > 0:
return LRUCache(size)
else:
return dict()
class RuleManager(object):
def __init__(self):
self.rules = []
self.rules_file = None
self.read_task = LoopingCall(self.read_rules)
self.rules_last_read = 0.0
def clear(self):
self.rules = []
def read_from(self, rules_file):
self.rules_file = rules_file
self.read_rules()
self.read_task.start(10, now=False)
def read_rules(self):
if not exists(self.rules_file):
self.clear()
return
# Only read if the rules file has been modified
try:
mtime = getmtime(self.rules_file)
except OSError:
log.err("Failed to get mtime of %s" % self.rules_file)
return
if mtime <= self.rules_last_read:
return
# Read new rules
log.aggregator("reading new aggregation rules from %s" % self.rules_file)
new_rules = []
for line in open(self.rules_file):
line = line.strip()
if line.startswith('#') or not line:
continue
rule = self.parse_definition(line)
new_rules.append(rule)
log.aggregator("clearing aggregation buffers")
BufferManager.clear()
self.rules = new_rules
self.rules_last_read = mtime
def parse_definition(self, line):
try:
left_side, right_side = line.split('=', 1)
output_pattern, frequency = left_side.split()
method, input_pattern = right_side.split()
frequency = int(frequency.lstrip('(').rstrip(')'))
return AggregationRule(input_pattern, output_pattern, method, frequency)
except ValueError:
log.err("Failed to parse rule in %s, line: %s" % (self.rules_file, line))
raise
class AggregationRule(object):
def __init__(self, input_pattern, output_pattern, method, frequency):
self.input_pattern = input_pattern
self.output_pattern = output_pattern
self.method = method
self.frequency = int(frequency)
if method not in AGGREGATION_METHODS:
raise ValueError("Invalid aggregation method '%s'" % method)
self.aggregation_func = AGGREGATION_METHODS[method]
self.build_regex()
self.build_template()
self.cache = get_cache()
def get_aggregate_metric(self, metric_path):
if metric_path in self.cache:
try:
return self.cache[metric_path]
except KeyError:
# The value can expire at any time, so we need to catch this.
pass
match = self.regex.match(metric_path)
result = None
if match:
extracted_fields = match.groupdict()
try:
result = self.output_template % extracted_fields
except TypeError:
log.err("Failed to interpolate template %s with fields %s" % (
self.output_template, extracted_fields))
self.cache[metric_path] = result
return result
def build_regex(self):
input_pattern_parts = self.input_pattern.split('.')
regex_pattern_parts = []
for input_part in input_pattern_parts:
if '<<' in input_part and '>>' in input_part:
i = input_part.find('<<')
j = input_part.find('>>')
pre = input_part[:i]
post = input_part[j + 2:]
field_name = input_part[i + 2:j]
regex_part = '%s(?P<%s>.+?)%s' % (pre, field_name, post)
else:
i = input_part.find('<')
j = input_part.find('>')
if i > -1 and j > i:
pre = input_part[:i]
post = input_part[j + 1:]
field_name = input_part[i + 1:j]
regex_part = '%s(?P<%s>[^.]+?)%s' % (pre, field_name, post)
elif input_part == '*':
regex_part = '[^.]+'
else:
regex_part = input_part.replace('*', '[^.]*')
regex_pattern_parts.append(regex_part)
regex_pattern = '\\.'.join(regex_pattern_parts) + '$'
self.regex = re.compile(regex_pattern)
def build_template(self):
self.output_template = self.output_pattern.replace('<', '%(').replace('>', ')s')
def avg(values):
if values:
return float(sum(values)) / len(values)
def count(values):
if values:
return len(values)
def percentile(factor):
def func(values):
if values:
values = sorted(values)
rank = factor * (len(values) - 1)
rank_left = int(floor(rank))
rank_right = int(ceil(rank))
if rank_left == rank_right:
return values[rank_left]
else:
return values[rank_left] * (rank_right - rank) + values[rank_right] * (rank - rank_left)
return func
AGGREGATION_METHODS = {
'sum': sum,
'avg': avg,
'min': min,
'max': max,
'p50': percentile(0.50),
'p75': percentile(0.75),
'p80': percentile(0.80),
'p90': percentile(0.90),
'p95': percentile(0.95),
'p99': percentile(0.99),
'p999': percentile(0.999),
'count': count,
}
# Importable singleton
RuleManager = RuleManager()
| apache-2.0 |
googleapis/googleapis-gen | google/cloud/gkehub/v1/gkehub-v1-py/google/cloud/gkehub/multiclusteringress_v1/types/multiclusteringress.py | 1 | 1259 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.cloud.gkehub.multiclusteringress.v1',
manifest={
'FeatureSpec',
},
)
class FeatureSpec(proto.Message):
r"""**Multi-cluster Ingress**: The configuration for the
MultiClusterIngress feature.
Attributes:
config_membership (str):
Fully-qualified Membership name which hosts the
MultiClusterIngress CRD. Example:
``projects/foo-proj/locations/global/memberships/bar``
"""
config_membership = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
tzaffi/git-in-practice-repo | book/lib/python2.7/site-packages/setuptools/command/build_py.py | 110 | 8495 | import os
import sys
import fnmatch
import textwrap
import distutils.command.build_py as orig
from distutils.util import convert_path
from glob import glob
try:
from setuptools.lib2to3_ex import Mixin2to3
except ImportError:
class Mixin2to3:
def run_2to3(self, files, doctests=True):
"do nothing"
class build_py(orig.build_py, Mixin2to3):
"""Enhanced 'build_py' command that includes data files with packages
The data files are specified via a 'package_data' argument to 'setup()'.
See 'setuptools.dist.Distribution' for more details.
Also, this version of the 'build_py' command allows you to specify both
'py_modules' and 'packages' in the same setup operation.
"""
def finalize_options(self):
orig.build_py.finalize_options(self)
self.package_data = self.distribution.package_data
self.exclude_package_data = self.distribution.exclude_package_data or {}
if 'data_files' in self.__dict__: del self.__dict__['data_files']
self.__updated_files = []
self.__doctests_2to3 = []
def run(self):
"""Build modules, packages, and copy data files to build directory"""
if not self.py_modules and not self.packages:
return
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.run_2to3(self.__updated_files, False)
self.run_2to3(self.__updated_files, True)
self.run_2to3(self.__doctests_2to3, True)
# Only compile actual .py files, using our base class' idea of what our
# output files are.
self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
def __getattr__(self, attr):
if attr=='data_files': # lazily compute data files
self.data_files = files = self._get_data_files()
return files
return orig.build_py.__getattr__(self,attr)
def build_module(self, module, module_file, package):
outfile, copied = orig.build_py.build_module(self, module, module_file, package)
if copied:
self.__updated_files.append(outfile)
return outfile, copied
def _get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
self.analyze_manifest()
data = []
for package in self.packages or ():
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Length of path to strip from found files
plen = len(src_dir)+1
# Strip directory from globbed filenames
filenames = [
file[plen:] for file in self.find_data_files(package, src_dir)
]
data.append((package, src_dir, build_dir, filenames))
return data
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
globs = (self.package_data.get('', [])
+ self.package_data.get(package, []))
files = self.manifest_files.get(package, [])[:]
for pattern in globs:
# Each pattern has to be converted to a platform-specific path
files.extend(glob(os.path.join(src_dir, convert_path(pattern))))
return self.exclude_data_files(package, src_dir, files)
def build_package_data(self):
"""Copy data files into build directory"""
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
srcfile = os.path.join(src_dir, filename)
outf, copied = self.copy_file(srcfile, target)
srcfile = os.path.abspath(srcfile)
if copied and srcfile in self.distribution.convert_2to3_doctests:
self.__doctests_2to3.append(outf)
def analyze_manifest(self):
self.manifest_files = mf = {}
if not self.distribution.include_package_data:
return
src_dirs = {}
for package in self.packages or ():
# Locate package source directory
src_dirs[assert_relative(self.get_package_dir(package))] = package
self.run_command('egg_info')
ei_cmd = self.get_finalized_command('egg_info')
for path in ei_cmd.filelist.files:
d,f = os.path.split(assert_relative(path))
prev = None
oldf = f
while d and d!=prev and d not in src_dirs:
prev = d
d, df = os.path.split(d)
f = os.path.join(df, f)
if d in src_dirs:
if path.endswith('.py') and f==oldf:
continue # it's a module, not data
mf.setdefault(src_dirs[d],[]).append(path)
def get_data_files(self): pass # kludge 2.4 for lazy computation
if sys.version<"2.4": # Python 2.4 already has this code
def get_outputs(self, include_bytecode=1):
"""Return complete list of files copied to the build directory
This includes both '.py' files and data files, as well as '.pyc'
and '.pyo' files if 'include_bytecode' is true. (This method is
needed for the 'install_lib' command to do its job properly, and to
generate a correct installation manifest.)
"""
return orig.build_py.get_outputs(self, include_bytecode) + [
os.path.join(build_dir, filename)
for package, src_dir, build_dir,filenames in self.data_files
for filename in filenames
]
def check_package(self, package, package_dir):
"""Check namespace packages' __init__ for declare_namespace"""
try:
return self.packages_checked[package]
except KeyError:
pass
init_py = orig.build_py.check_package(self, package, package_dir)
self.packages_checked[package] = init_py
if not init_py or not self.distribution.namespace_packages:
return init_py
for pkg in self.distribution.namespace_packages:
if pkg==package or pkg.startswith(package+'.'):
break
else:
return init_py
f = open(init_py,'rbU')
if 'declare_namespace'.encode() not in f.read():
from distutils.errors import DistutilsError
raise DistutilsError(
"Namespace package problem: %s is a namespace package, but its\n"
"__init__.py does not call declare_namespace()! Please fix it.\n"
'(See the setuptools manual under "Namespace Packages" for '
"details.)\n" % (package,)
)
f.close()
return init_py
def initialize_options(self):
self.packages_checked={}
orig.build_py.initialize_options(self)
def get_package_dir(self, package):
res = orig.build_py.get_package_dir(self, package)
if self.distribution.src_root is not None:
return os.path.join(self.distribution.src_root, res)
return res
def exclude_data_files(self, package, src_dir, files):
"""Filter filenames for package's data files in 'src_dir'"""
globs = (self.exclude_package_data.get('', [])
+ self.exclude_package_data.get(package, []))
bad = []
for pattern in globs:
bad.extend(
fnmatch.filter(
files, os.path.join(src_dir, convert_path(pattern))
)
)
bad = dict.fromkeys(bad)
seen = {}
return [
f for f in files if f not in bad
and f not in seen and seen.setdefault(f,1) # ditch dupes
]
def assert_relative(path):
if not os.path.isabs(path):
return path
from distutils.errors import DistutilsSetupError
msg = textwrap.dedent("""
Error: setup script specifies an absolute path:
%s
setup() arguments must *always* be /-separated paths relative to the
setup.py directory, *never* absolute paths.
""").lstrip() % path
raise DistutilsSetupError(msg)
| mit |
samdoran/ansible | lib/ansible/modules/web_infrastructure/nginx_status_facts.py | 69 | 5398 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nginx_status_facts
short_description: Retrieve nginx status facts.
description:
- Gathers facts from nginx from an URL having C(stub_status) enabled.
version_added: "2.3"
author: "René Moser (@resmo)"
options:
url:
description:
- URL of the nginx status.
required: true
timeout:
description:
- HTTP connection timeout in seconds.
required: false
default: 10
notes:
- See http://nginx.org/en/docs/http/ngx_http_stub_status_module.html for more information.
'''
EXAMPLES = '''
# Gather status facts from nginx on localhost
- name: get current http stats
nginx_status_facts:
url: http://localhost/nginx_status
# Gather status facts from nginx on localhost with a custom timeout of 20 seconds
- name: get current http stats
nginx_status_facts:
url: http://localhost/nginx_status
timeout: 20
'''
RETURN = '''
---
nginx_status_facts.active_connections:
description: Active connections.
returned: success
type: int
sample: 2340
nginx_status_facts.accepts:
description: The total number of accepted client connections.
returned: success
type: int
sample: 81769947
nginx_status_facts.handled:
description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached.
returned: success
type: int
sample: 81769947
nginx_status_facts.requests:
description: The total number of client requests.
returned: success
type: int
sample: 144332345
nginx_status_facts.reading:
description: The current number of connections where nginx is reading the request header.
returned: success
type: int
sample: 0
nginx_status_facts.writing:
description: The current number of connections where nginx is writing the response back to the client.
returned: success
type: int
sample: 241
nginx_status_facts.waiting:
description: The current number of idle client connections waiting for a request.
returned: success
type: int
sample: 2092
nginx_status_facts.data:
description: HTTP response as is.
returned: success
type: string
sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n"
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
class NginxStatusFacts(object):
def __init__(self):
self.url = module.params.get('url')
self.timeout = module.params.get('timeout')
def run(self):
result = {
'nginx_status_facts': {
'active_connections': None,
'accepts': None,
'handled': None,
'requests': None,
'reading': None,
'writing': None,
'waiting': None,
'data': None,
}
}
(response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout)
if not response:
module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout))
data = response.read()
if not data:
return result
result['nginx_status_facts']['data'] = data
expr = r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \n' \
r'Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)'
match = re.match(expr, data, re.S)
if match:
result['nginx_status_facts']['active_connections'] = int(match.group(1))
result['nginx_status_facts']['accepts'] = int(match.group(2))
result['nginx_status_facts']['handled'] = int(match.group(3))
result['nginx_status_facts']['requests'] = int(match.group(4))
result['nginx_status_facts']['reading'] = int(match.group(5))
result['nginx_status_facts']['writing'] = int(match.group(6))
result['nginx_status_facts']['waiting'] = int(match.group(7))
return result
def main():
global module
module = AnsibleModule(
argument_spec=dict(
url=dict(required=True),
timeout=dict(type='int', default=10),
),
supports_check_mode=True,
)
nginx_status_facts = NginxStatusFacts().run()
result = dict(changed=False, ansible_facts=nginx_status_facts)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
danstoner/python_experiments | playing_with_kivy/kivi-examples/widgets/lists/list_composite.py | 1 | 2348 | from kivy.adapters.dictadapter import DictAdapter
from kivy.uix.listview import ListItemButton, ListItemLabel, \
CompositeListItem, ListView
from kivy.uix.gridlayout import GridLayout
from fixtures import integers_dict
class MainView(GridLayout):
'''Uses :class:`CompositeListItem` for list item views comprised by two
:class:`ListItemButton`s and one :class:`ListItemLabel`. Illustrates how
to construct the fairly involved args_converter used with
:class:`CompositeListItem`.
'''
def __init__(self, **kwargs):
kwargs['cols'] = 2
super(MainView, self).__init__(**kwargs)
# This is quite an involved args_converter, so we should go through the
# details. A CompositeListItem instance is made with the args
# returned by this converter. The first three, text, size_hint_y,
# height are arguments for CompositeListItem. The cls_dicts list contains
# argument sets for each of the member widgets for this composite:
# ListItemButton and ListItemLabel.
args_converter = \
lambda row_index, rec: \
{'text': rec['text'],
'size_hint_y': None,
'height': 25,
'cls_dicts': [{'cls': ListItemButton,
'kwargs': {'text': rec['text']}},
{'cls': ListItemLabel,
'kwargs': {'text': "Middle-{0}".format(rec['text']),
'is_representing_cls': True}},
{'cls': ListItemButton,
'kwargs': {'text': rec['text']}}]}
item_strings = ["{0}".format(index) for index in xrange(100)]
dict_adapter = DictAdapter(sorted_keys=item_strings,
data=integers_dict,
args_converter=args_converter,
selection_mode='single',
allow_empty_selection=False,
cls=CompositeListItem)
# Use the adapter in our ListView:
list_view = ListView(adapter=dict_adapter)
self.add_widget(list_view)
if __name__ == '__main__':
from kivy.base import runTouchApp
runTouchApp(MainView(width=800))
| gpl-2.0 |
kawamon/hue | desktop/core/ext-py/Django-1.11.29/tests/fixtures/tests.py | 5 | 42042 | from __future__ import unicode_literals
import os
import sys
import tempfile
import unittest
import warnings
from django.apps import apps
from django.contrib.sites.models import Site
from django.core import management
from django.core.files.temp import NamedTemporaryFile
from django.core.management import CommandError
from django.core.management.commands.dumpdata import ProxyModelWarning
from django.core.serializers.base import ProgressBar
from django.db import IntegrityError, connection
from django.test import (
TestCase, TransactionTestCase, mock, skipUnlessDBFeature,
)
from django.utils import six
from django.utils.encoding import force_text
from .models import (
Article, Category, PrimaryKeyUUIDModel, ProxySpy, Spy, Tag, Visa,
)
class TestCaseFixtureLoadingTests(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
def testClassFixtures(self):
"Test case has installed 3 fixture objects"
self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
class SubclassTestCaseFixtureLoadingTests(TestCaseFixtureLoadingTests):
"""
Make sure that subclasses can remove fixtures from parent class (#21089).
"""
fixtures = []
def testClassFixtures(self):
"There were no fixture objects installed"
self.assertEqual(Article.objects.count(), 0)
class DumpDataAssertMixin(object):
def _dumpdata_assert(self, args, output, format='json', filename=None,
natural_foreign_keys=False, natural_primary_keys=False,
use_base_manager=False, exclude_list=[], primary_keys=''):
new_io = six.StringIO()
if filename:
filename = os.path.join(tempfile.gettempdir(), filename)
management.call_command('dumpdata', *args, **{'format': format,
'stdout': new_io,
'stderr': new_io,
'output': filename,
'use_natural_foreign_keys': natural_foreign_keys,
'use_natural_primary_keys': natural_primary_keys,
'use_base_manager': use_base_manager,
'exclude': exclude_list,
'primary_keys': primary_keys})
if filename:
with open(filename, "r") as f:
command_output = f.read()
os.remove(filename)
else:
command_output = new_io.getvalue().strip()
if format == "json":
self.assertJSONEqual(command_output, output)
elif format == "xml":
self.assertXMLEqual(command_output, output)
else:
self.assertEqual(command_output, output)
class FixtureLoadingTests(DumpDataAssertMixin, TestCase):
def test_loading_and_dumping(self):
apps.clear_cache()
Site.objects.all().delete()
# Load fixture 1. Single JSON file, with two objects.
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Try just dumping the contents of fixtures.Category
self._dumpdata_assert(
['fixtures.Category'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", '
'"title": "News Stories"}}]'
)
# ...and just fixtures.Article
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# ...and both
self._dumpdata_assert(
['fixtures.Category', 'fixtures.Article'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", '
'"title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has '
'no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", '
'"fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify a specific model twice
self._dumpdata_assert(
['fixtures.Article', 'fixtures.Article'],
(
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
)
# Specify a dump that specifies Article both explicitly and implicitly
self._dumpdata_assert(
['fixtures.Article', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify a dump that specifies Article both explicitly and implicitly,
# but lists the app first (#22025).
self._dumpdata_assert(
['fixtures', 'fixtures.Article'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Same again, but specify in the reverse order
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no '
'place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields":'
' {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify one model from one application, and an entire other application.
self._dumpdata_assert(
['fixtures.Category', 'sites'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": '
'"example.com"}}]'
)
# Load fixture 2. JSON file imported by default. Overwrites some existing objects
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
# Load fixture 3, XML format.
management.call_command('loaddata', 'fixture3.xml', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# Load fixture 6, JSON file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture6.json', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "law">',
], ordered=False)
# Load fixture 7, XML file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture7.xml', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "legal">',
'<Tag: <Article: Django conquers world!> tagged "django">',
'<Tag: <Article: Django conquers world!> tagged "world domination">',
], ordered=False)
# Load fixture 8, JSON file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture8.json', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user>',
'<Visa: Prince >'
], ordered=False)
# Load fixture 9, XML file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture9.xml', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user, Can delete user>',
'<Visa: Artist formerly known as "Prince" Can change user>'
], ordered=False)
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# By default, you get raw keys on dumpdata
self._dumpdata_assert(
['fixtures.book'],
'[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [3, 1]}}]'
)
# But you can get natural keys if you ask for them and they are available
self._dumpdata_assert(
['fixtures.book'],
'[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist '
'formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]',
natural_foreign_keys=True
)
# You can also omit the primary keys for models that we can get later with natural keys.
self._dumpdata_assert(
['fixtures.person'],
'[{"fields": {"name": "Django Reinhardt"}, "model": "fixtures.person"}, {"fields": {"name": "Stephane '
'Grappelli"}, "model": "fixtures.person"}, {"fields": {"name": "Artist formerly known as '
'\\"Prince\\""}, "model": "fixtures.person"}]',
natural_primary_keys=True
)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker on TV is '
'great!", "pub_date": "2006-06-16T11:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}, {"pk": 4, '
'"model": "fixtures.article", "fields": {"headline": "Django conquers world!", "pub_date": '
'"2006-06-16T15:00:00"}}, {"pk": 5, "model": "fixtures.article", "fields": {"headline": "XML '
'identified as leading cause of cancer", "pub_date": "2006-06-16T16:00:00"}}, {"pk": 1, "model": '
'"fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": '
'3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": '
'"legal", "tagged_id": 3}}, {"pk": 3, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", '
'"article"], "name": "django", "tagged_id": 4}}, {"pk": 4, "model": "fixtures.tag", "fields": '
'{"tagged_type": ["fixtures", "article"], "name": "world domination", "tagged_id": 4}}, {"pk": 1, '
'"model": "fixtures.person", "fields": {"name": "Django Reinhardt"}}, {"pk": 2, "model": '
'"fixtures.person", "fields": {"name": "Stephane Grappelli"}}, {"pk": 3, "model": "fixtures.person", '
'"fields": {"name": "Artist formerly known as \\"Prince\\""}}, {"pk": 1, "model": "fixtures.visa", '
'"fields": {"person": ["Django Reinhardt"], "permissions": [["add_user", "auth", "user"], '
'["change_user", "auth", "user"], ["delete_user", "auth", "user"]]}}, {"pk": 2, "model": '
'"fixtures.visa", "fields": {"person": ["Stephane Grappelli"], "permissions": [["add_user", "auth", '
'"user"], ["delete_user", "auth", "user"]]}}, {"pk": 3, "model": "fixtures.visa", "fields": {"person":'
' ["Artist formerly known as \\"Prince\\""], "permissions": [["change_user", "auth", "user"]]}}, '
'{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist '
'formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]',
natural_foreign_keys=True
)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(
['fixtures'],
'<?xml version="1.0" encoding="utf-8"?><django-objects version="1.0"><object pk="1" '
'model="fixtures.category"><field type="CharField" name="title">News Stories</field><field '
'type="TextField" name="description">Latest news stories</field></object><object pk="2" '
'model="fixtures.article"><field type="CharField" name="headline">Poker on TV is great!</field><field '
'type="DateTimeField" name="pub_date">2006-06-16T11:00:00</field></object><object pk="3" '
'model="fixtures.article"><field type="CharField" name="headline">Copyright is fine the way it '
'is</field><field type="DateTimeField" name="pub_date">2006-06-16T14:00:00</field></object><object '
'pk="4" model="fixtures.article"><field type="CharField" name="headline">Django conquers world!'
'</field><field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field></object><object '
'pk="5" model="fixtures.article"><field type="CharField" name="headline">XML identified as leading '
'cause of cancer</field><field type="DateTimeField" name="pub_date">2006-06-16T16:00:00</field>'
'</object><object pk="1" model="fixtures.tag"><field type="CharField" name="name">copyright</field>'
'<field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures'
'</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3'
'</field></object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">legal'
'</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>'
'fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" '
'name="tagged_id">3</field></object><object pk="3" model="fixtures.tag"><field type="CharField" '
'name="name">django</field><field to="contenttypes.contenttype" name="tagged_type" '
'rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field '
'type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="4" model="fixtures.tag">'
'<field type="CharField" name="name">world domination</field><field to="contenttypes.contenttype" '
'name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field>'
'<field type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="1" '
'model="fixtures.person"><field type="CharField" name="name">Django Reinhardt</field></object>'
'<object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane Grappelli'
'</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">'
'Artist formerly known as "Prince"</field></object><object pk="1" model="fixtures.visa"><field '
'to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Django Reinhardt</natural></field>'
'<field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>add_user'
'</natural><natural>auth</natural><natural>user</natural></object><object><natural>change_user'
'</natural><natural>auth</natural><natural>user</natural></object><object><natural>delete_user'
'</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="2" '
'model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Stephane'
' Grappelli</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel">'
'<object><natural>add_user</natural><natural>auth</natural><natural>user</natural></object><object>'
'<natural>delete_user</natural><natural>auth</natural><natural>user</natural></object></field>'
'</object><object pk="3" model="fixtures.visa"><field to="fixtures.person" name="person" '
'rel="ManyToOneRel"><natural>Artist formerly known as "Prince"</natural></field><field '
'to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>change_user</natural>'
'<natural>auth</natural><natural>user</natural></object></field></object><object pk="1" '
'model="fixtures.book"><field type="CharField" name="name">Music for all ages</field><field '
'to="fixtures.person" name="authors" rel="ManyToManyRel"><object><natural>Artist formerly known as '
'"Prince"</natural></object><object><natural>Django Reinhardt</natural></object></field></object>'
'</django-objects>',
format='xml', natural_foreign_keys=True
)
def test_dumpdata_with_excludes(self):
# Load fixture1 which has a site, two articles, and a category
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1.json', verbosity=0)
# Excluding fixtures app should only leave sites
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}]',
exclude_list=['fixtures'])
# Excluding fixtures.Article/Book should leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, '
'{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book']
)
# Excluding fixtures and fixtures.Article/Book should be a no-op
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, '
'{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book']
)
# Excluding sites and fixtures.Article/Book should only leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book', 'sites']
)
# Excluding a bogus app should throw an error
with self.assertRaisesMessage(management.CommandError, "No installed app with label 'foo_app'."):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['foo_app'])
# Excluding a bogus model should throw an error
with self.assertRaisesMessage(management.CommandError, "Unknown model: fixtures.FooModel"):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['fixtures.FooModel'])
@unittest.skipIf(sys.platform.startswith('win'), "Windows doesn't support '?' in filenames.")
def test_load_fixture_with_special_characters(self):
management.call_command('loaddata', 'fixture_with[special]chars', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), ['<Article: How To Deal With Special Characters>'])
def test_dumpdata_with_filtering_manager(self):
spy1 = Spy.objects.create(name='Paul')
spy2 = Spy.objects.create(name='Alex', cover_blown=True)
self.assertQuerysetEqual(Spy.objects.all(),
['<Spy: Paul>'])
# Use the default manager
self._dumpdata_assert(
['fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy1.pk
)
# Dump using Django's base manager. Should return all objects,
# even those normally filtered by the manager
self._dumpdata_assert(
['fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": true}}, {"pk": %d, "model": '
'"fixtures.spy", "fields": {"cover_blown": false}}]' % (spy2.pk, spy1.pk),
use_base_manager=True
)
def test_dumpdata_with_pks(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}]',
primary_keys='2'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
'',
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures.Article', 'fixtures.category'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
def test_dumpdata_with_uuid_pks(self):
m1 = PrimaryKeyUUIDModel.objects.create()
m2 = PrimaryKeyUUIDModel.objects.create()
output = six.StringIO()
management.call_command(
'dumpdata', 'fixtures.PrimaryKeyUUIDModel', '--pks', ', '.join([str(m1.id), str(m2.id)]),
stdout=output,
)
result = output.getvalue()
self.assertIn('"pk": "%s"' % m1.id, result)
self.assertIn('"pk": "%s"' % m2.id, result)
def test_dumpdata_with_file_output(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]',
filename='dumpdata.json'
)
def test_dumpdata_progressbar(self):
"""
Dumpdata shows a progress bar on the command line when --output is set,
stdout is a tty, and verbosity > 0.
"""
management.call_command('loaddata', 'fixture1.json', verbosity=0)
new_io = six.StringIO()
new_io.isatty = lambda: True
with NamedTemporaryFile() as file:
options = {
'format': 'json',
'stdout': new_io,
'stderr': new_io,
'output': file.name,
}
management.call_command('dumpdata', 'fixtures', **options)
self.assertTrue(new_io.getvalue().endswith('[' + '.' * ProgressBar.progress_width + ']\n'))
# Test no progress bar when verbosity = 0
options['verbosity'] = 0
new_io = six.StringIO()
new_io.isatty = lambda: True
options.update({'stdout': new_io, 'stderr': new_io})
management.call_command('dumpdata', 'fixtures', **options)
self.assertEqual(new_io.getvalue(), '')
def test_dumpdata_proxy_without_concrete(self):
"""
A warning is displayed if a proxy model is dumped without its concrete
parent.
"""
ProxySpy.objects.create(name='Paul')
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
self._dumpdata_assert(['fixtures.ProxySpy'], '[]')
warning = warning_list.pop()
self.assertEqual(warning.category, ProxyModelWarning)
self.assertEqual(
str(warning.message),
"fixtures.ProxySpy is a proxy model and won't be serialized."
)
def test_dumpdata_proxy_with_concrete(self):
"""
A warning isn't displayed if a proxy model is dumped with its concrete
parent.
"""
spy = ProxySpy.objects.create(name='Paul')
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
self._dumpdata_assert(
['fixtures.ProxySpy', 'fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy.pk
)
self.assertEqual(len(warning_list), 0)
def test_compress_format_loading(self):
# Load fixture 4 (compressed), using format specification
management.call_command('loaddata', 'fixture4.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
])
def test_compressed_specified_loading(self):
# Load fixture 5 (compressed), using format *and* compression specification
management.call_command('loaddata', 'fixture5.json.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_compressed_loading(self):
# Load fixture 5 (compressed), only compression specification
management.call_command('loaddata', 'fixture5.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_ambiguous_compressed_fixture(self):
# The name "fixture5" is ambiguous, so loading it will raise an error
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture5', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture5'", cm.exception.args[0])
def test_db_loading(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier implicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0)
management.call_command('loaddata', 'db_fixture_2', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_loaddata_error_message(self):
"""
Loading a fixture which contains an invalid object outputs an error
message which contains the pk of the object that triggered the error.
"""
# MySQL needs a little prodding to reject invalid data.
# This won't affect other tests because the database connection
# is closed at the end of each test.
if connection.vendor == 'mysql':
connection.cursor().execute("SET sql_mode = 'TRADITIONAL'")
with self.assertRaises(IntegrityError) as cm:
management.call_command('loaddata', 'invalid.json', verbosity=0)
self.assertIn("Could not load fixtures.Article(pk=1):", cm.exception.args[0])
def test_loaddata_app_option(self):
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_1' found."):
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="someotherapp")
self.assertQuerysetEqual(Article.objects.all(), [])
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="fixtures")
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
])
def test_loaddata_verbosity_three(self):
output = six.StringIO()
management.call_command('loaddata', 'fixture1.json', verbosity=3, stdout=output, stderr=output)
command_output = force_text(output.getvalue())
self.assertIn(
"\rProcessed 1 object(s).\rProcessed 2 object(s)."
"\rProcessed 3 object(s).\rProcessed 4 object(s).\n",
command_output
)
def test_loading_using(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier explicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0, using='default')
management.call_command('loaddata', 'db_fixture_2', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_unmatched_identifier_loading(self):
# Try to load db fixture 3. This won't load because the database identifier doesn't match
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_3' found."):
management.call_command('loaddata', 'db_fixture_3', verbosity=0)
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_3' found."):
management.call_command('loaddata', 'db_fixture_3', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [])
def test_output_formats(self):
# Load back in fixture 1, we need the articles from it
management.call_command('loaddata', 'fixture1', verbosity=0)
# Try to load fixture 6 using format discovery
management.call_command('loaddata', 'fixture6', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Time to reform copyright> tagged "copyright">',
'<Tag: <Article: Time to reform copyright> tagged "law">'
], ordered=False)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 1, "model": '
'"fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": '
'3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": '
'"law", "tagged_id": 3}}, {"pk": 1, "model": "fixtures.person", "fields": {"name": "Django '
'Reinhardt"}}, {"pk": 2, "model": "fixtures.person", "fields": {"name": "Stephane Grappelli"}}, '
'{"pk": 3, "model": "fixtures.person", "fields": {"name": "Prince"}}]',
natural_foreign_keys=True
)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(
['fixtures'],
'<?xml version="1.0" encoding="utf-8"?><django-objects version="1.0"><object pk="1" '
'model="fixtures.category"><field type="CharField" name="title">News Stories</field><field '
'type="TextField" name="description">Latest news stories</field></object><object pk="2" '
'model="fixtures.article"><field type="CharField" name="headline">Poker has no place on ESPN</field>'
'<field type="DateTimeField" name="pub_date">2006-06-16T12:00:00</field></object><object pk="3" '
'model="fixtures.article"><field type="CharField" name="headline">Time to reform copyright</field>'
'<field type="DateTimeField" name="pub_date">2006-06-16T13:00:00</field></object><object pk="1" '
'model="fixtures.tag"><field type="CharField" name="name">copyright</field><field '
'to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural>'
'<natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field>'
'</object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">law</field><field '
'to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural>'
'<natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field>'
'</object><object pk="1" model="fixtures.person"><field type="CharField" name="name">Django Reinhardt'
'</field></object><object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane '
'Grappelli</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">'
'Prince</field></object></django-objects>',
format='xml', natural_foreign_keys=True
)
def test_loading_with_exclude_app(self):
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1', exclude=['fixtures'], verbosity=0)
self.assertFalse(Article.objects.exists())
self.assertFalse(Category.objects.exists())
self.assertQuerysetEqual(Site.objects.all(), ['<Site: example.com>'])
def test_loading_with_exclude_model(self):
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1', exclude=['fixtures.Article'], verbosity=0)
self.assertFalse(Article.objects.exists())
self.assertQuerysetEqual(Category.objects.all(), ['<Category: News Stories>'])
self.assertQuerysetEqual(Site.objects.all(), ['<Site: example.com>'])
def test_exclude_option_errors(self):
"""Excluding a bogus app or model should raise an error."""
msg = "No installed app with label 'foo_app'."
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command('loaddata', 'fixture1', exclude=['foo_app'], verbosity=0)
msg = "Unknown model: fixtures.FooModel"
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command('loaddata', 'fixture1', exclude=['fixtures.FooModel'], verbosity=0)
class NonExistentFixtureTests(TestCase):
"""
Custom class to limit fixture dirs.
"""
def test_loaddata_not_existent_fixture_file(self):
stdout_output = six.StringIO()
with self.assertRaisesMessage(CommandError, "No fixture named 'this_fixture_doesnt_exist' found."):
management.call_command('loaddata', 'this_fixture_doesnt_exist', stdout=stdout_output)
@mock.patch('django.db.connection.enable_constraint_checking')
@mock.patch('django.db.connection.disable_constraint_checking')
def test_nonexistent_fixture_no_constraint_checking(
self, disable_constraint_checking, enable_constraint_checking):
"""
If no fixtures match the loaddata command, constraints checks on the
database shouldn't be disabled. This is performance critical on MSSQL.
"""
with self.assertRaisesMessage(CommandError, "No fixture named 'this_fixture_doesnt_exist' found."):
management.call_command('loaddata', 'this_fixture_doesnt_exist', verbosity=0)
disable_constraint_checking.assert_not_called()
enable_constraint_checking.assert_not_called()
class FixtureTransactionTests(DumpDataAssertMixin, TransactionTestCase):
available_apps = [
'fixtures',
'django.contrib.sites',
]
@skipUnlessDBFeature('supports_forward_references')
def test_format_discovery(self):
# Load fixture 1 again, using format discovery
management.call_command('loaddata', 'fixture1', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Try to load fixture 2 using format discovery; this will fail
# because there are two fixture2's in the fixtures directory
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture2', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture2'", cm.exception.args[0])
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Load fixture 4 (compressed), using format discovery
management.call_command('loaddata', 'fixture4', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
| apache-2.0 |
rpedigoni/restless | restless/exceptions.py | 4 | 1424 | from .constants import APPLICATION_ERROR, UNAUTHORIZED, NOT_FOUND, BAD_REQUEST
from .constants import METHOD_NOT_ALLOWED, METHOD_NOT_IMPLEMENTED
class RestlessError(Exception):
"""
A common base exception from which all other exceptions in ``restless``
inherit from.
No special attributes or behaviors.
"""
pass
class HttpError(RestlessError):
"""
The foundational HTTP-related error.
All other HTTP errors in ``restless`` inherit from this one.
Has a ``status`` attribute. If present, ``restless`` will use this as the
``status_code`` in the response.
Has a ``msg`` attribute. Has a reasonable default message (override-able
from the constructor).
"""
status = APPLICATION_ERROR
msg = "Application Error"
def __init__(self, msg=None):
if not msg:
msg = self.__class__.msg
super(HttpError, self).__init__(msg)
class BadRequest(HttpError):
status = BAD_REQUEST
msg = "Bad request."
class Unauthorized(HttpError):
status = UNAUTHORIZED
msg = "Unauthorized."
class NotFound(HttpError):
status = NOT_FOUND
msg = "Resource not found."
class MethodNotAllowed(HttpError):
status = METHOD_NOT_ALLOWED
msg = "The specified HTTP method is not allowed."
class MethodNotImplemented(HttpError):
status = METHOD_NOT_IMPLEMENTED
msg = "The specified HTTP method is not implemented."
| bsd-3-clause |
xuleiboy1234/autoTitle | tensorflow/tensorflow/contrib/distributions/python/kernel_tests/shape_test.py | 102 | 27699 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ShapeUtil."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
_empty_shape = np.array([], dtype=np.int32)
def _eval(x):
if hasattr(x, "__iter__"):
return [x.eval() for x in x]
return x.eval()
def _constant(x):
if hasattr(x, "__iter__"):
return [tensor_util.constant_value(x) for x in x]
return tensor_util.constant_value(x)
class MakeBatchReadyTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _random_sample(self, sample_shape, dtype=np.float32):
return self._rng.random_sample(sample_shape).astype(dtype)
def _get_expected(self, x, batch_ndims, event_ndims, expand_batch_dim):
# Cast as int32 array explicitly, since an empty x.shape defaults
# to float64, and we can't index as float64 in numpy 1.12+.
x_shape = np.array(x.shape, dtype=np.int32)
n = x.ndim - batch_ndims - event_ndims
sample_shape = x_shape[:n]
y = np.reshape(x, np.concatenate([[-1], x_shape[n:]], 0))
y = np.transpose(y, np.roll(np.arange(y.ndim), -1))
if event_ndims == 0:
y = y[..., np.newaxis, :]
if batch_ndims == 0 and expand_batch_dim:
y = y[np.newaxis, ...]
return y, sample_shape
def _build_graph(self, x, batch_ndims, event_ndims, expand_batch_dim):
shaper = _DistributionShape(batch_ndims=batch_ndims,
event_ndims=event_ndims)
y, sample_shape = shaper.make_batch_of_event_sample_matrices(
x, expand_batch_dim=expand_batch_dim)
should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
y, sample_shape, expand_batch_dim=expand_batch_dim)
return y, sample_shape, should_be_x_value
def _test_dynamic(self, x, batch_ndims, event_ndims, expand_batch_dim=True):
with self.test_session() as sess:
x_pl = array_ops.placeholder(x.dtype)
batch_ndims_pl = array_ops.placeholder(dtypes.int32)
event_ndims_pl = array_ops.placeholder(dtypes.int32)
[y_, sample_shape_, should_be_x_value_] = sess.run(
self._build_graph(
x_pl, batch_ndims_pl, event_ndims_pl, expand_batch_dim),
feed_dict={
x_pl: x,
batch_ndims_pl: batch_ndims,
event_ndims_pl: event_ndims})
expected_y, expected_sample_shape = self._get_expected(
x, batch_ndims, event_ndims, expand_batch_dim)
self.assertAllEqual(expected_sample_shape, sample_shape_)
self.assertAllEqual(expected_y, y_)
self.assertAllEqual(x, should_be_x_value_)
def _test_static(self, x, batch_ndims, event_ndims, expand_batch_dim):
with self.test_session() as sess:
[y_, sample_shape_, should_be_x_value_] = sess.run(
self._build_graph(x, batch_ndims, event_ndims, expand_batch_dim))
expected_y, expected_sample_shape = self._get_expected(
x, batch_ndims, event_ndims, expand_batch_dim)
self.assertAllEqual(expected_sample_shape, sample_shape_)
self.assertAllEqual(expected_y, y_)
self.assertAllEqual(x, should_be_x_value_)
# Group 1a: Static scalar input.
def testStaticScalarNdims00ExpandNo(self):
self._test_static(x=self._random_sample([]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=False)
def testStaticScalarNdims00ExpandYes(self):
self._test_static(x=self._random_sample([]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=True)
def testStaticScalarNdims01ExpandNo(self):
with self.assertRaises(ValueError):
self._test_static(x=self._random_sample([]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=False)
def testStaticScalarNdims01ExpandYes(self):
with self.assertRaises(ValueError):
self._test_static(x=self._random_sample([]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=True)
def testStaticScalarNdims10ExpandNo(self):
with self.assertRaises(ValueError):
self._test_static(x=self._random_sample([]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=False)
def testStaticScalarNdims10ExpandYes(self):
with self.assertRaises(ValueError):
self._test_static(x=self._random_sample([]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=True)
def testStaticScalarNdims11ExpandNo(self):
with self.assertRaises(ValueError):
self._test_static(x=self._random_sample([]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=False)
def testStaticScalarNdims11ExpandYes(self):
with self.assertRaises(ValueError):
self._test_static(x=self._random_sample([]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=True)
# Group 1b: Dynamic scalar input.
def testDynamicScalar3Ndims00ExpandNo(self):
self._test_dynamic(x=self._random_sample([]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=False)
def testDynamicScalar3Ndims00ExpandYes(self):
self._test_dynamic(x=self._random_sample([]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=True)
def testDynamicScalarNdims01ExpandNo(self):
with self.assertRaisesOpError(""):
self._test_dynamic(x=self._random_sample([]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=False)
def testDynamicScalarNdims01ExpandYes(self):
with self.assertRaisesOpError(""):
self._test_dynamic(x=self._random_sample([]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=True)
def testDynamicScalarNdims10ExpandNo(self):
with self.assertRaisesOpError(""):
self._test_dynamic(x=self._random_sample([]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=False)
def testDynamicScalarNdims10ExpandYes(self):
with self.assertRaisesOpError(""):
self._test_dynamic(x=self._random_sample([]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=True)
def testDynamicScalarNdims11ExpandNo(self):
with self.assertRaisesOpError(""):
self._test_dynamic(x=self._random_sample([]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=False)
def testDynamicScalarNdims11ExpandYes(self):
with self.assertRaisesOpError(""):
self._test_dynamic(x=self._random_sample([]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=True)
# Group 2a: Static vector input.
def testStaticVectorNdims00ExpandNo(self):
self._test_static(x=self._random_sample([3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=False)
def testStaticVectorNdims00ExpandYes(self):
self._test_static(x=self._random_sample([3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=True)
def testStaticVectorNdims01ExpandNo(self):
self._test_static(x=self._random_sample([3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=False)
def testStaticVectorNdims01ExpandYes(self):
self._test_static(x=self._random_sample([3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=True)
def testStaticVectorNdims10ExpandNo(self):
self._test_static(x=self._random_sample([3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=False)
def testStaticVectorNdims10ExpandYes(self):
self._test_static(x=self._random_sample([3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=True)
def testStaticVectorNdims11ExpandNo(self):
with self.assertRaises(ValueError):
self._test_static(x=self._random_sample([3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=False)
def testStaticVectorNdims11ExpandYes(self):
with self.assertRaises(ValueError):
self._test_static(x=self._random_sample([3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=True)
# Group 2b: Dynamic vector input.
def testDynamicVectorNdims00ExpandNo(self):
self._test_dynamic(x=self._random_sample([3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=False)
def testDynamicVectorNdims00ExpandYes(self):
self._test_dynamic(x=self._random_sample([3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=True)
def testDynamicVectorNdims01ExpandNo(self):
self._test_dynamic(x=self._random_sample([3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=False)
def testDynamicVectorNdims01ExpandYes(self):
self._test_dynamic(x=self._random_sample([3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=True)
def testDynamicVectorNdims10ExpandNo(self):
self._test_dynamic(x=self._random_sample([3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=False)
def testDynamicVectorNdims10ExpandYes(self):
self._test_dynamic(x=self._random_sample([3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=True)
def testDynamicVectorNdims11ExpandNo(self):
with self.assertRaisesOpError(""):
self._test_dynamic(x=self._random_sample([3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=False)
def testDynamicVectorNdims11ExpandYes(self):
with self.assertRaisesOpError(""):
self._test_dynamic(x=self._random_sample([3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=True)
# Group 3a: Static matrix input.
def testStaticMatrixNdims00ExpandNo(self):
self._test_static(x=self._random_sample([2, 3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=False)
def testStaticMatrixNdims00ExpandYes(self):
self._test_static(x=self._random_sample([2, 3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=True)
def testStaticMatrixNdims01ExpandNo(self):
self._test_static(x=self._random_sample([2, 3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=False)
def testStaticMatrixNdims01ExpandYes(self):
self._test_static(x=self._random_sample([2, 3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=True)
def testStaticMatrixNdims10ExpandNo(self):
self._test_static(x=self._random_sample([2, 3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=False)
def testStaticMatrixNdims10ExpandYes(self):
self._test_static(x=self._random_sample([2, 3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=True)
def testStaticMatrixNdims11ExpandNo(self):
self._test_static(x=self._random_sample([2, 3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=False)
def testStaticMatrixNdims11ExpandYes(self):
self._test_static(x=self._random_sample([2, 3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=True)
# Group 3b: Dynamic matrix input.
def testDynamicMatrixNdims00ExpandNo(self):
self._test_dynamic(x=self._random_sample([2, 3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=False)
def testDynamicMatrixNdims00ExpandYes(self):
self._test_dynamic(x=self._random_sample([2, 3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=True)
def testDynamicMatrixNdims01ExpandNo(self):
self._test_dynamic(x=self._random_sample([2, 3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=False)
def testDynamicMatrixNdims01ExpandYes(self):
self._test_dynamic(x=self._random_sample([2, 3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=True)
def testDynamicMatrixNdims10ExpandNo(self):
self._test_dynamic(x=self._random_sample([2, 3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=False)
def testDynamicMatrixNdims10ExpandYes(self):
self._test_dynamic(x=self._random_sample([2, 3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=True)
def testDynamicMatrixNdims11ExpandNo(self):
self._test_dynamic(x=self._random_sample([2, 3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=False)
def testDynamicMatrixNdims11ExpandYes(self):
self._test_dynamic(x=self._random_sample([2, 3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=True)
# Group 4a: Static tensor input.
def testStaticTensorNdims00ExpandNo(self):
self._test_static(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=False)
def testStaticTensorNdims00ExpandYes(self):
self._test_static(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=True)
def testStaticTensorNdims01ExpandNo(self):
self._test_static(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=False)
def testStaticTensorNdims01ExpandYes(self):
self._test_static(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=True)
def testStaticTensorNdims10ExpandNo(self):
self._test_static(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=False)
def testStaticTensorNdims10ExpandYes(self):
self._test_static(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=True)
def testStaticTensorNdims11ExpandNo(self):
self._test_static(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=False)
def testStaticTensorNdims11ExpandYes(self):
self._test_static(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=True)
# Group 4b: Dynamic tensor input.
def testDynamicTensorNdims00ExpandNo(self):
self._test_dynamic(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=False)
def testDynamicTensorNdims00ExpandYes(self):
self._test_dynamic(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=0,
event_ndims=0,
expand_batch_dim=True)
def testDynamicTensorNdims01ExpandNo(self):
self._test_dynamic(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=False)
def testDynamicTensorNdims01ExpandYes(self):
self._test_dynamic(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=0,
event_ndims=1,
expand_batch_dim=True)
def testDynamicTensorNdims10ExpandNo(self):
self._test_dynamic(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=False)
def testDynamicTensorNdims10ExpandYes(self):
self._test_dynamic(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=1,
event_ndims=0,
expand_batch_dim=True)
def testDynamicTensorNdims11ExpandNo(self):
self._test_dynamic(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=False)
def testDynamicTensorNdims11ExpandYes(self):
self._test_dynamic(x=self._random_sample([4, 1, 2, 3]),
batch_ndims=1,
event_ndims=1,
expand_batch_dim=True)
class DistributionShapeTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _random_sample(self, sample_shape, dtype=dtypes.float64):
return self._rng.random_sample(sample_shape).astype(dtype.as_numpy_dtype())
def _assertNdArrayEqual(self, expected, actual):
"""Helper which properly compares two np.ndarray-like objects.
This function checks for exact equality so is probably only suitable for
integers or powers of 2.
Args:
expected: np.ndarray. Ground-truth value.
actual: np.ndarray. Observed value.
"""
expected = np.asarray(expected)
actual = np.asarray(actual)
self.assertEqual(expected.shape, actual.shape,
"Shape mismatch: expected %s, got %s." %
(expected.shape, actual.shape))
actual_item = actual.flat
for expected_item in expected.flat:
self.assertAllEqual(expected_item, next(actual_item))
def testDistributionShapeGetNdimsStatic(self):
with self.test_session():
shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
x = 1
self.assertEqual(0, shaper.get_sample_ndims(x).eval())
self.assertEqual(0, shaper.batch_ndims.eval())
self.assertEqual(0, shaper.event_ndims.eval())
shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
x = self._random_sample((1, 2, 3))
self.assertAllEqual(3, shaper.get_ndims(x).eval())
self.assertEqual(1, shaper.get_sample_ndims(x).eval())
self.assertEqual(1, shaper.batch_ndims.eval())
self.assertEqual(1, shaper.event_ndims.eval())
x += self._random_sample((1, 2, 3))
self.assertAllEqual(3, shaper.get_ndims(x).eval())
self.assertEqual(1, shaper.get_sample_ndims(x).eval())
self.assertEqual(1, shaper.batch_ndims.eval())
self.assertEqual(1, shaper.event_ndims.eval())
# Test ndims functions work, even despite unfed Tensors.
y = array_ops.placeholder(dtypes.float32, shape=(1024, None, 1024))
self.assertEqual(3, shaper.get_ndims(y).eval())
self.assertEqual(1, shaper.get_sample_ndims(y).eval())
self.assertEqual(1, shaper.batch_ndims.eval())
self.assertEqual(1, shaper.event_ndims.eval())
def testDistributionShapeGetNdimsDynamic(self):
with self.test_session() as sess:
batch_ndims = array_ops.placeholder(dtypes.int32)
event_ndims = array_ops.placeholder(dtypes.int32)
shaper = _DistributionShape(
batch_ndims=batch_ndims, event_ndims=event_ndims)
y = array_ops.placeholder(dtypes.float32)
y_value = np.ones((4, 2), dtype=y.dtype.as_numpy_dtype())
feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 1}
self.assertEqual(2, sess.run(shaper.get_ndims(y), feed_dict=feed_dict))
def testDistributionShapeGetDimsStatic(self):
with self.test_session():
shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
x = 1
self.assertAllEqual((_empty_shape, _empty_shape, _empty_shape),
_constant(shaper.get_dims(x)))
shaper = _DistributionShape(batch_ndims=1, event_ndims=2)
x += self._random_sample((1, 1, 2, 2))
self._assertNdArrayEqual(([0], [1], [2, 3]),
_constant(shaper.get_dims(x)))
x += x
self._assertNdArrayEqual(([0], [1], [2, 3]),
_constant(shaper.get_dims(x)))
def testDistributionShapeGetDimsDynamic(self):
with self.test_session() as sess:
# Works for static {batch,event}_ndims despite unfed input.
shaper = _DistributionShape(batch_ndims=1, event_ndims=2)
y = array_ops.placeholder(dtypes.float32, shape=(10, None, 5, 5))
self._assertNdArrayEqual([[0], [1], [2, 3]], _eval(shaper.get_dims(y)))
# Works for deferred {batch,event}_ndims.
batch_ndims = array_ops.placeholder(dtypes.int32)
event_ndims = array_ops.placeholder(dtypes.int32)
shaper = _DistributionShape(
batch_ndims=batch_ndims, event_ndims=event_ndims)
y = array_ops.placeholder(dtypes.float32)
y_value = self._random_sample((10, 3, 5, 5), dtype=y.dtype)
feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 2}
self._assertNdArrayEqual(
([0], [1], [2, 3]), sess.run(shaper.get_dims(y), feed_dict=feed_dict))
def testDistributionShapeGetShapeStatic(self):
with self.test_session():
shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
self.assertAllEqual((_empty_shape, _empty_shape, _empty_shape),
_constant(shaper.get_shape(1.)))
self._assertNdArrayEqual(([1], _empty_shape, _empty_shape),
_constant(shaper.get_shape(np.ones(1))))
self._assertNdArrayEqual(([2, 2], _empty_shape, _empty_shape),
_constant(shaper.get_shape(np.ones((2, 2)))))
self._assertNdArrayEqual(([3, 2, 1], _empty_shape, _empty_shape),
_constant(shaper.get_shape(np.ones((3, 2, 1)))))
shaper = _DistributionShape(batch_ndims=0, event_ndims=1)
with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
shaper.get_shape(1.)
self._assertNdArrayEqual((_empty_shape, _empty_shape, [1]),
_constant(shaper.get_shape(np.ones(1))))
self._assertNdArrayEqual(([2], _empty_shape, [2]),
_constant(shaper.get_shape(np.ones((2, 2)))))
self._assertNdArrayEqual(([3, 2], _empty_shape, [1]),
_constant(shaper.get_shape(np.ones((3, 2, 1)))))
shaper = _DistributionShape(batch_ndims=1, event_ndims=0)
with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
shaper.get_shape(1.)
self._assertNdArrayEqual((_empty_shape, [1], _empty_shape),
_constant(shaper.get_shape(np.ones(1))))
self._assertNdArrayEqual(([2], [2], _empty_shape),
_constant(shaper.get_shape(np.ones((2, 2)))))
self._assertNdArrayEqual(([3, 2], [1], _empty_shape),
_constant(shaper.get_shape(np.ones((3, 2, 1)))))
shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
shaper.get_shape(1.)
with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
shaper.get_shape(np.ones(1))
self._assertNdArrayEqual((_empty_shape, [2], [2]),
_constant(shaper.get_shape(np.ones((2, 2)))))
self._assertNdArrayEqual(([3], [2], [1]),
_constant(shaper.get_shape(np.ones((3, 2, 1)))))
def testDistributionShapeGetShapeDynamic(self):
with self.test_session() as sess:
# Works for static ndims despite unknown static shape.
shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
y = array_ops.placeholder(dtypes.int32, shape=(None, None, 2))
y_value = np.ones((3, 4, 2), dtype=y.dtype.as_numpy_dtype())
self._assertNdArrayEqual(
([3], [4], [2]),
sess.run(shaper.get_shape(y), feed_dict={y: y_value}))
shaper = _DistributionShape(batch_ndims=0, event_ndims=1)
y = array_ops.placeholder(dtypes.int32, shape=(None, None))
y_value = np.ones((3, 2), dtype=y.dtype.as_numpy_dtype())
self._assertNdArrayEqual(
([3], _empty_shape, [2]),
sess.run(shaper.get_shape(y), feed_dict={y: y_value}))
# Works for deferred {batch,event}_ndims.
batch_ndims = array_ops.placeholder(dtypes.int32)
event_ndims = array_ops.placeholder(dtypes.int32)
shaper = _DistributionShape(
batch_ndims=batch_ndims, event_ndims=event_ndims)
y = array_ops.placeholder(dtypes.float32)
y_value = self._random_sample((3, 4, 2), dtype=y.dtype)
feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 1}
self._assertNdArrayEqual(
([3], [4], [2]), sess.run(shaper.get_shape(y), feed_dict=feed_dict))
y_value = self._random_sample((3, 2), dtype=y.dtype)
feed_dict = {y: y_value, batch_ndims: 0, event_ndims: 1}
self._assertNdArrayEqual(
([3], _empty_shape, [2]),
sess.run(shaper.get_shape(y), feed_dict=feed_dict))
if __name__ == "__main__":
test.main()
| mit |
huyilin/TopicLda | src/onlineldainit.py | 1 | 11899 | # onlineldavb.py: Package of functions for fitting Latent Dirichlet
# Allocation (LDA) with online variational Bayes (VB).
#
# Copyright (C) 2010 Matthew D. Hoffman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, re, time, string
import numpy as n
#from scipy.special import gammaln, psi
from compute import gammaln,psi
n.random.seed(100000001)
meanchangethresh = 0.001
def dirichlet_expectation(alpha):
"""
For a vector theta ~ Dir(alpha), computes E[log(theta)] given alpha.
"""
# print alpha.shape
if (len(alpha.shape) == 1):
return(psi(alpha) - psi(n.sum(alpha)))
return(psi(alpha) - psi(n.sum(alpha, 1))[:, n.newaxis])
def parse_doc_list(docs, vocab):
"""
Parse a document into a list of word ids and a list of counts,
or parse a set of documents into two lists of lists of word ids
and counts.
Arguments:
docs: List of D documents. Each document must be represented as
a single string. (Word order is unimportant.) Any
words not in the vocabulary will be ignored.
vocab: Dictionary mapping from words to integer ids.
Returns a pair of lists of lists.
The first, wordids, says what vocabulary tokens are present in
each document. wordids[i][j] gives the jth unique token present in
document i. (Don't count on these tokens being in any particular
order.)
The second, wordcts, says how many times each vocabulary token is
present. wordcts[i][j] is the number of times that the token given
by wordids[i][j] appears in document i.
"""
if (type(docs).__name__ == 'str'):
temp = list()
temp.append(docs)
docs = temp
D = len(docs)
wordids = list()
wordcts = list()
for d in range(0, D):
docs[d] = docs[d].lower()
docs[d] = re.sub(r'-', ' ', docs[d])
docs[d] = re.sub(r'[^a-z ]', '', docs[d])
docs[d] = re.sub(r' +', ' ', docs[d])
words = string.split(docs[d])
ddict = dict()
for word in words:
if (word in vocab):
wordtoken = vocab[word]
if (not wordtoken in ddict):
ddict[wordtoken] = 0
ddict[wordtoken] += 1
wordids.append(ddict.keys())
wordcts.append(ddict.values())
return((wordids, wordcts))
class OnlineLDA:
"""
Implements online VB for LDA as described in (Hoffman et al. 2010).
"""
def __init__(self, vocab, K, D, alpha, eta, tau0, kappa):
"""
Arguments:
K: Number of topics
vocab: A set of words to recognize. When analyzing documents, any word
not in this set will be ignored.
D: Total number of documents in the population. For a fixed corpus,
this is the size of the corpus. In the truly online setting, this
can be an estimate of the maximum number of documents that
could ever be seen.
alpha: Hyperparameter for prior on weight vectors theta
eta: Hyperparameter for prior on topics beta
tau0: A (positive) learning parameter that downweights early iterations
kappa: Learning rate: exponential decay rate---should be between
(0.5, 1.0] to guarantee asymptotic convergence.
Note that if you pass the same set of D documents in every time and
set kappa=0 this class can also be used to do batch VB.
"""
self._vocab = dict()
for word in vocab:
word = word.lower()
word = re.sub(r'[^a-z]', '', word)
self._vocab[word] = len(self._vocab)
self._K = K
self._W = len(self._vocab)
self._D = D
self._alpha = alpha
self._eta = eta
self._tau0 = tau0 + 1
self._kappa = kappa
self._updatect = 0
# Initialize the variational distribution q(beta|lambda)
self._lambda = 1*n.random.gamma(100., 1./100., (self._K, self._W))
self._Elogbeta = dirichlet_expectation(self._lambda)
self._expElogbeta = n.exp(self._Elogbeta)
def do_e_step(self, docs):
"""
Given a mini-batch of documents, estimates the parameters
gamma controlling the variational distribution over the topic
weights for each document in the mini-batch.
Arguments:
docs: List of D documents. Each document must be represented
as a string. (Word order is unimportant.) Any
words not in the vocabulary will be ignored.
Returns a tuple containing the estimated values of gamma,
as well as sufficient statistics needed to update lambda.
"""
# This is to handle the case where someone just hands us a single
# document, not in a list.
if (type(docs).__name__ == 'string'):
temp = list()
temp.append(docs)
docs = temp
(wordids, wordcts) = parse_doc_list(docs, self._vocab)
batchD = len(docs)
# Initialize the variational distribution q(theta|gamma) for
# the mini-batch
gamma = 1*n.random.gamma(100., 1./100., (batchD, self._K))
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = n.exp(Elogtheta)
sstats = n.zeros(self._lambda.shape)
# Now, for each document d update that document's gamma and phi
it = 0
meanchange = 0
for d in range(0, batchD):
# These are mostly just shorthand (but might help cache locality)
ids = wordids[d]
cts = wordcts[d]
gammad = gamma[d, :]
Elogthetad = Elogtheta[d, :]
expElogthetad = expElogtheta[d, :]
expElogbetad = self._expElogbeta[:, ids]
# The optimal phi_{dwk} is proportional to
# expElogthetad_k * expElogbetad_w. phinorm is the normalizer.
phinorm = n.dot(expElogthetad, expElogbetad) + 1e-100
# Iterate between gamma and phi until convergence
for it in range(0, 100):
lastgamma = gammad
# We represent phi implicitly to save memory and time.
# Substituting the value of the optimal phi back into
# the update for gamma gives this update. Cf. Lee&Seung 2001.
gammad = self._alpha + expElogthetad * \
n.dot(cts / phinorm, expElogbetad.T)
Elogthetad = dirichlet_expectation(gammad)
expElogthetad = n.exp(Elogthetad)
phinorm = n.dot(expElogthetad, expElogbetad) + 1e-100
# If gamma hasn't changed much, we're done.
meanchange = n.mean(abs(gammad - lastgamma))
if (meanchange < meanchangethresh):
break
gamma[d, :] = gammad
# Contribution of document d to the expected sufficient
# statistics for the M step.
sstats[:, ids] += n.outer(expElogthetad.T, cts/phinorm)
# This step finishes computing the sufficient statistics for the
# M step, so that
# sstats[k, w] = \sum_d n_{dw} * phi_{dwk}
# = \sum_d n_{dw} * exp{Elogtheta_{dk} + Elogbeta_{kw}} / phinorm_{dw}.
sstats = sstats * self._expElogbeta
return((gamma, sstats))
def update_lambda(self, docs):
"""
First does an E step on the mini-batch given in wordids and
wordcts, then uses the result of that E step to update the
variational parameter matrix lambda.
Arguments:
docs: List of D documents. Each document must be represented
as a string. (Word order is unimportant.) Any
words not in the vocabulary will be ignored.
Returns gamma, the parameters to the variational distribution
over the topic weights theta for the documents analyzed in this
update.
Also returns an estimate of the variational bound for the
entire corpus for the OLD setting of lambda based on the
documents passed in. This can be used as a (possibly very
noisy) estimate of held-out likelihood.
"""
# rhot will be between 0 and 1, and says how much to weight
# the information we got from this mini-batch.
rhot = pow(self._tau0 + self._updatect, -self._kappa)
self._rhot = rhot
# Do an E step to update gamma, phi | lambda for this
# mini-batch. This also returns the information about phi that
# we need to update lambda.
(gamma, sstats) = self.do_e_step(docs)
# Estimate held-out likelihood for current values of lambda.
bound = self.approx_bound(docs, gamma)
# Update lambda based on documents.
self._lambda = self._lambda * (1-rhot) + \
rhot * (self._eta + self._D * sstats / len(docs))
self._Elogbeta = dirichlet_expectation(self._lambda)
self._expElogbeta = n.exp(self._Elogbeta)
self._updatect += 1
return(gamma, bound)
def approx_bound(self, docs, gamma):
"""
Estimates the variational bound over *all documents* using only
the documents passed in as "docs." gamma is the set of parameters
to the variational distribution q(theta) corresponding to the
set of documents passed in.
The output of this function is going to be noisy, but can be
useful for assessing convergence.
"""
# This is to handle the case where someone just hands us a single
# document, not in a list.
if (type(docs).__name__ == 'string'):
temp = list()
temp.append(docs)
docs = temp
(wordids, wordcts) = parse_doc_list(docs, self._vocab)
batchD = len(docs)
score = 0
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = n.exp(Elogtheta)
# E[log p(docs | theta, beta)]
for d in range(0, batchD):
gammad = gamma[d, :]
ids = wordids[d]
cts = n.array(wordcts[d])
phinorm = n.zeros(len(ids))
for i in range(0, len(ids)):
temp = Elogtheta[d, :] + self._Elogbeta[:, ids[i]]
tmax = max(temp)
phinorm[i] = n.log(sum(n.exp(temp - tmax))) + tmax
score += n.sum(cts * phinorm)
# oldphinorm = phinorm
# phinorm = n.dot(expElogtheta[d, :], self._expElogbeta[:, ids])
# print oldphinorm
# print n.log(phinorm)
# score += n.sum(cts * n.log(phinorm))
# E[log p(theta | alpha) - log q(theta | gamma)]
score += n.sum((self._alpha - gamma)*Elogtheta)
score += n.sum(gammaln(gamma) - gammaln(self._alpha))
score += sum(gammaln(self._alpha*self._K) - gammaln(n.sum(gamma, 1)))
# Compensate for the subsampling of the population of documents
score = score * self._D / len(docs)
# E[log p(beta | eta) - log q (beta | lambda)]
score = score + n.sum((self._eta-self._lambda)*self._Elogbeta)
score = score + n.sum(gammaln(self._lambda) - gammaln(self._eta))
score = score + n.sum(gammaln(self._eta*self._W) -
gammaln(n.sum(self._lambda, 1)))
return(score)
| gpl-3.0 |
CloudNcodeInc/django-celery | djcelery/tests/test_views.py | 6 | 6605 | from __future__ import absolute_import, unicode_literals
import sys
from functools import partial
from billiard.einfo import ExceptionInfo
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.test.testcases import TestCase as DjangoTestCase
from django.template import TemplateDoesNotExist
from anyjson import deserialize
from celery import current_app
from celery import states
from celery.task import task
from celery.utils import gen_unique_id, get_full_cls_name
from djcelery.views import task_webhook
from djcelery.tests.req import MockRequest
def reversestar(name, **kwargs):
return reverse(name, kwargs=kwargs)
class MyError(Exception):
# On Py2.4 repr(exc) includes the object id, so comparing
# texts is pointless when the id the "same" KeyError does not match.
def __repr__(self):
return '<{0.__class__.__name__}: {0.args!r}>'.format(self)
class MyRetryTaskError(MyError):
pass
task_is_successful = partial(reversestar, 'celery-is_task_successful')
task_status = partial(reversestar, 'celery-task_status')
task_apply = partial(reverse, 'celery-apply')
registered_tasks = partial(reverse, 'celery-tasks')
scratch = {}
@task()
def mytask(x, y):
ret = scratch['result'] = int(x) * int(y)
return ret
def create_exception(name, base=Exception):
return type(name, (base, ), {})
def catch_exception(exception):
try:
raise exception
except exception.__class__ as exc:
exc = current_app.backend.prepare_exception(exc)
return exc, ExceptionInfo(sys.exc_info()).traceback
class ViewTestCase(DjangoTestCase):
def assertJSONEqual(self, json, py):
json = isinstance(json, HttpResponse) and json.content or json
try:
self.assertEqual(deserialize(json.decode('utf-8')), py)
except TypeError as exc:
raise TypeError('{0}: {1}'.format(exc, json))
def assertIn(self, expected, source, *args):
try:
DjangoTestCase.assertIn(self, expected, source, *args)
except AttributeError:
self.assertTrue(expected in source)
def assertDictContainsSubset(self, a, b, *args):
try:
DjangoTestCase.assertDictContainsSubset(self, a, b, *args)
except AttributeError:
for key, value in a.items():
self.assertTrue(key in b)
self.assertEqual(b[key], value)
class test_task_apply(ViewTestCase):
def test_apply(self):
current_app.conf.CELERY_ALWAYS_EAGER = True
try:
self.client.get(
task_apply(kwargs={'task_name': mytask.name}) + '?x=4&y=4',
)
self.assertEqual(scratch['result'], 16)
finally:
current_app.conf.CELERY_ALWAYS_EAGER = False
def test_apply_raises_404_on_unregistered_task(self):
current_app.conf.CELERY_ALWAYS_EAGER = True
try:
name = 'xxx.does.not.exist'
action = partial(
self.client.get,
task_apply(kwargs={'task_name': name}) + '?x=4&y=4',
)
try:
res = action()
except TemplateDoesNotExist:
pass # pre Django 1.5
else:
self.assertEqual(res.status_code, 404)
finally:
current_app.conf.CELERY_ALWAYS_EAGER = False
class test_registered_tasks(ViewTestCase):
def test_list_registered_tasks(self):
json = self.client.get(registered_tasks())
tasks = deserialize(json.content.decode('utf-8'))
self.assertIn('celery.backend_cleanup', tasks['regular'])
class test_webhook_task(ViewTestCase):
def test_successful_request(self):
@task_webhook
def add_webhook(request):
x = int(request.GET['x'])
y = int(request.GET['y'])
return x + y
request = MockRequest().get('/tasks/add', dict(x=10, y=10))
response = add_webhook(request)
self.assertDictContainsSubset(
{'status': 'success', 'retval': 20},
deserialize(response.content.decode('utf-8')))
def test_failed_request(self):
@task_webhook
def error_webhook(request):
x = int(request.GET['x'])
y = int(request.GET['y'])
raise MyError(x + y)
request = MockRequest().get('/tasks/error', dict(x=10, y=10))
response = error_webhook(request)
self.assertDictContainsSubset(
{'status': 'failure',
'reason': '<MyError: (20,)>'},
deserialize(response.content.decode('utf-8')))
class test_task_status(ViewTestCase):
def assertStatusForIs(self, status, res, traceback=None):
uuid = gen_unique_id()
current_app.backend.store_result(uuid, res, status,
traceback=traceback)
json = self.client.get(task_status(task_id=uuid))
expect = dict(id=uuid, status=status, result=res)
if status in current_app.backend.EXCEPTION_STATES:
instore = current_app.backend.get_result(uuid)
self.assertEqual(str(instore.args[0]), str(res.args[0]))
expect['result'] = repr(res)
expect['exc'] = get_full_cls_name(res.__class__)
expect['traceback'] = traceback
self.assertJSONEqual(json, dict(task=expect))
def test_success(self):
self.assertStatusForIs(states.SUCCESS, 'The quick brown fox')
def test_failure(self):
exc, tb = catch_exception(MyError('foo'))
self.assertStatusForIs(states.FAILURE, exc, tb)
def test_retry(self):
oexc, _ = catch_exception(MyError('Resource not available'))
exc, tb = catch_exception(MyRetryTaskError(str(oexc), oexc))
self.assertStatusForIs(states.RETRY, exc, tb)
class test_task_is_successful(ViewTestCase):
def assertStatusForIs(self, status, outcome):
uuid = gen_unique_id()
result = gen_unique_id()
current_app.backend.store_result(uuid, result, status)
json = self.client.get(task_is_successful(task_id=uuid))
self.assertJSONEqual(json, {'task': {'id': uuid,
'executed': outcome}})
def test_success(self):
self.assertStatusForIs(states.SUCCESS, True)
def test_pending(self):
self.assertStatusForIs(states.PENDING, False)
def test_failure(self):
self.assertStatusForIs(states.FAILURE, False)
def test_retry(self):
self.assertStatusForIs(states.RETRY, False)
| bsd-3-clause |
kantlove/flask-simple-page | Lib/site-packages/flask_bootstrap/__init__.py | 2 | 5963 | #!/usr/bin/env python
# coding=utf8
__version__ = '3.3.5.6'
import re
from flask import Blueprint, current_app, url_for
try:
from wtforms.fields import HiddenField
except ImportError:
def is_hidden_field_filter(field):
raise RuntimeError('WTForms is not installed.')
else:
def is_hidden_field_filter(field):
return isinstance(field, HiddenField)
class CDN(object):
"""Base class for CDN objects."""
def get_resource_url(self, filename):
"""Return resource url for filename."""
raise NotImplementedError
class StaticCDN(object):
"""A CDN that serves content from the local application.
:param static_endpoint: Endpoint to use.
:param rev: If ``True``, honor ``BOOTSTRAP_QUERYSTRING_REVVING``.
"""
def __init__(self, static_endpoint='static', rev=False):
self.static_endpoint = static_endpoint
self.rev = rev
def get_resource_url(self, filename):
extra_args = {}
if self.rev and current_app.config['BOOTSTRAP_QUERYSTRING_REVVING']:
extra_args['bootstrap'] = __version__
return url_for(self.static_endpoint, filename=filename, **extra_args)
class WebCDN(object):
"""Serves files from the Web.
:param baseurl: The baseurl. Filenames are simply appended to this URL.
"""
def __init__(self, baseurl):
self.baseurl = baseurl
def get_resource_url(self, filename):
return self.baseurl + filename
class ConditionalCDN(object):
"""Serves files from one CDN or another, depending on whether a
configuration value is set.
:param confvar: Configuration variable to use.
:param primary: CDN to use if the configuration variable is ``True``.
:param fallback: CDN to use otherwise.
"""
def __init__(self, confvar, primary, fallback):
self.confvar = confvar
self.primary = primary
self.fallback = fallback
def get_resource_url(self, filename):
if current_app.config[self.confvar]:
return self.primary.get_resource_url(filename)
return self.fallback.get_resource_url(filename)
def bootstrap_find_resource(filename, cdn, use_minified=None, local=True):
"""Resource finding function, also available in templates.
Tries to find a resource, will force SSL depending on
``BOOTSTRAP_CDN_FORCE_SSL`` settings.
:param filename: File to find a URL for.
:param cdn: Name of the CDN to use.
:param use_minified': If set to ``True``/``False``, use/don't use
minified. If ``None``, honors
``BOOTSTRAP_USE_MINIFIED``.
:param local: If ``True``, uses the ``local``-CDN when
``BOOTSTRAP_SERVE_LOCAL`` is enabled. If ``False``, uses
the ``static``-CDN instead.
:return: A URL.
"""
config = current_app.config
if None == use_minified:
use_minified = config['BOOTSTRAP_USE_MINIFIED']
if use_minified:
filename = '%s.min.%s' % tuple(filename.rsplit('.', 1))
cdns = current_app.extensions['bootstrap']['cdns']
resource_url = cdns[cdn].get_resource_url(filename)
if resource_url.startswith('//') and config['BOOTSTRAP_CDN_FORCE_SSL']:
resource_url = 'https:%s' % resource_url
return resource_url
class Bootstrap(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
BOOTSTRAP_VERSION = re.sub(r'^(\d+\.\d+\.\d+).*', r'\1', __version__)
JQUERY_VERSION = '1.11.3'
HTML5SHIV_VERSION = '3.7.2'
RESPONDJS_VERSION = '1.4.2'
app.config.setdefault('BOOTSTRAP_USE_MINIFIED', True)
app.config.setdefault('BOOTSTRAP_CDN_FORCE_SSL', False)
app.config.setdefault('BOOTSTRAP_QUERYSTRING_REVVING', True)
app.config.setdefault('BOOTSTRAP_SERVE_LOCAL', False)
app.config.setdefault('BOOTSTRAP_LOCAL_SUBDOMAIN', None)
blueprint = Blueprint(
'bootstrap',
__name__,
template_folder='templates',
static_folder='static',
static_url_path=app.static_url_path + '/bootstrap',
subdomain=app.config['BOOTSTRAP_LOCAL_SUBDOMAIN'])
app.register_blueprint(blueprint)
app.jinja_env.globals['bootstrap_is_hidden_field'] =\
is_hidden_field_filter
app.jinja_env.globals['bootstrap_find_resource'] =\
bootstrap_find_resource
if not hasattr(app, 'extensions'):
app.extensions = {}
local = StaticCDN('bootstrap.static', rev=True)
static = StaticCDN()
def lwrap(cdn, primary=static):
return ConditionalCDN('BOOTSTRAP_SERVE_LOCAL', primary, cdn)
bootstrap = lwrap(
WebCDN('//cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/%s/'
% BOOTSTRAP_VERSION),
local)
jquery = lwrap(
WebCDN('//cdnjs.cloudflare.com/ajax/libs/jquery/%s/'
% JQUERY_VERSION),
local)
html5shiv = lwrap(
WebCDN('//cdnjs.cloudflare.com/ajax/libs/html5shiv/%s/'
% HTML5SHIV_VERSION))
respondjs = lwrap(
WebCDN('//cdnjs.cloudflare.com/ajax/libs/respond.js/%s/'
% RESPONDJS_VERSION))
app.extensions['bootstrap'] = {
'cdns': {
'local': local,
'static': static,
'bootstrap': bootstrap,
'jquery': jquery,
'html5shiv': html5shiv,
'respond.js': respondjs,
},
}
# setup support for flask-nav
renderers = app.extensions.setdefault('nav_renderers', {})
renderer_name = (__name__ + '.nav', 'BootstrapRenderer')
renderers['bootstrap'] = renderer_name
# make bootstrap the default renderer
renderers[None] = renderer_name
| mit |
wakatime/komodo-wakatime | components/wakatime/packages/pygments/lexers/stata.py | 27 | 3627 | # -*- coding: utf-8 -*-
"""
pygments.lexers.stata
~~~~~~~~~~~~~~~~~~~~~
Lexer for Stata
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words
from pygments.token import Comment, Keyword, Name, Number, \
String, Text, Operator
from pygments.lexers._stata_builtins import builtins_base, builtins_functions
__all__ = ['StataLexer']
class StataLexer(RegexLexer):
"""
For `Stata <http://www.stata.com/>`_ do files.
.. versionadded:: 2.2
"""
# Syntax based on
# - http://fmwww.bc.edu/RePEc/bocode/s/synlightlist.ado
# - http://github.com/isagalaev/highlight.js/blob/master/src/languages/stata.js
# - http://github.com/jpitblado/vim-stata/blob/master/syntax/stata.vim
name = 'Stata'
aliases = ['stata', 'do']
filenames = ['*.do', '*.ado']
mimetypes = ['text/x-stata', 'text/stata', 'application/x-stata']
tokens = {
'root': [
include('comments'),
include('vars-strings'),
include('numbers'),
include('keywords'),
(r'.', Text),
],
# Global and local macros; regular and special strings
'vars-strings': [
(r'\$[\w{]', Name.Variable.Global, 'var_validglobal'),
(r'`\w{0,31}\'', Name.Variable),
(r'"', String, 'string_dquote'),
(r'`"', String, 'string_mquote'),
],
# For either string type, highlight macros as macros
'string_dquote': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape),
(r'\$', Name.Variable.Global, 'var_validglobal'),
(r'`', Name.Variable, 'var_validlocal'),
(r'[^$`"\\]+', String),
(r'[$"\\]', String),
],
'string_mquote': [
(r'"\'', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape),
(r'\$', Name.Variable.Global, 'var_validglobal'),
(r'`', Name.Variable, 'var_validlocal'),
(r'[^$`"\\]+', String),
(r'[$"\\]', String),
],
'var_validglobal': [
(r'\{\w{0,32}\}', Name.Variable.Global, '#pop'),
(r'\w{1,32}', Name.Variable.Global, '#pop'),
],
'var_validlocal': [
(r'\w{0,31}\'', Name.Variable, '#pop'),
],
# * only OK at line start, // OK anywhere
'comments': [
(r'^\s*\*.*$', Comment),
(r'//.*', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
],
# Built in functions and statements
'keywords': [
(words(builtins_functions, prefix = r'\b', suffix = r'\('),
Name.Function),
(words(builtins_base, prefix = r'(^\s*|\s)', suffix = r'\b'),
Keyword),
],
# http://www.stata.com/help.cgi?operators
'operators': [
(r'-|==|<=|>=|<|>|&|!=', Operator),
(r'\*|\+|\^|/|!|~|==|~=', Operator)
],
# Stata numbers
'numbers': [
# decimal number
(r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[i]?\b',
Number),
],
# Stata formats
'format': [
(r'%-?\d{1,2}(\.\d{1,2})?[gfe]c?', Name.Variable),
(r'%(21x|16H|16L|8H|8L)', Name.Variable),
(r'%-?(tc|tC|td|tw|tm|tq|th|ty|tg).{0,32}', Name.Variable),
(r'%[-~]?\d{1,4}s', Name.Variable),
]
}
| bsd-3-clause |
3dfxsoftware/cbss-addons | lunch/wizard/__init__.py | 440 | 1053 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import lunch_validation
import lunch_cancel
import lunch_order
| gpl-2.0 |
a10networks/a10sdk-python | a10sdk/core/router/router_bgp_network_ip_cidr.py | 2 | 2128 | from a10sdk.common.A10BaseClass import A10BaseClass
class IpCidr(A10BaseClass):
"""Class Description::
Specify a ip network to announce via BGP.
Class ip-cidr supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param description: {"description": "Network specific description (Up to 80 characters describing this network)", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 80, "type": "string"}
:param route_map: {"description": "Route-map to modify the attributes (Name of the route map)", "format": "string", "minLength": 1, "optional": true, "maxLength": 128, "type": "string"}
:param comm_value: {"optional": true, "type": "string", "description": "community value in the format 1-4294967295|AA:NN|internet|local-AS|no-advertise|no-export", "format": "string-rlx"}
:param backdoor: {"default": 0, "optional": true, "type": "number", "description": "Specify a BGP backdoor route", "format": "flag"}
:param network_ipv4_cidr: {"optional": false, "type": "string", "description": "Specify network mask", "format": "ipv4-cidr"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/router/bgp/{as_number}/network/ip-cidr/{network_ipv4_cidr}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "network_ipv4_cidr"]
self.b_key = "ip-cidr"
self.a10_url="/axapi/v3/router/bgp/{as_number}/network/ip-cidr/{network_ipv4_cidr}"
self.DeviceProxy = ""
self.description = ""
self.route_map = ""
self.comm_value = ""
self.backdoor = ""
self.network_ipv4_cidr = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| apache-2.0 |
CoolCloud/flask-admin | flask_admin/contrib/sqla/ajax.py | 13 | 2633 | from sqlalchemy import or_
from flask_admin._compat import as_unicode, string_types
from flask_admin.model.ajax import AjaxModelLoader, DEFAULT_PAGE_SIZE
class QueryAjaxModelLoader(AjaxModelLoader):
def __init__(self, name, session, model, **options):
"""
Constructor.
:param fields:
Fields to run query against
"""
super(QueryAjaxModelLoader, self).__init__(name, options)
self.session = session
self.model = model
self.fields = options.get('fields')
self.order_by = options.get('order_by')
if not self.fields:
raise ValueError('AJAX loading requires `fields` to be specified for %s.%s' % (model, self.name))
self._cached_fields = self._process_fields()
primary_keys = model._sa_class_manager.mapper.primary_key
if len(primary_keys) > 1:
raise NotImplementedError('Flask-Admin does not support multi-pk AJAX model loading.')
self.pk = primary_keys[0].name
def _process_fields(self):
remote_fields = []
for field in self.fields:
if isinstance(field, string_types):
attr = getattr(self.model, field, None)
if not attr:
raise ValueError('%s.%s does not exist.' % (self.model, field))
remote_fields.append(attr)
else:
# TODO: Figure out if it is valid SQLAlchemy property?
remote_fields.append(field)
return remote_fields
def format(self, model):
if not model:
return None
return (getattr(model, self.pk), as_unicode(model))
def get_one(self, pk):
return self.session.query(self.model).get(pk)
def get_list(self, term, offset=0, limit=DEFAULT_PAGE_SIZE):
query = self.session.query(self.model)
filters = (field.ilike(u'%%%s%%' % term) for field in self._cached_fields)
query = query.filter(or_(*filters))
if self.order_by:
query = query.order_by(self.order_by)
return query.offset(offset).limit(limit).all()
def create_ajax_loader(model, session, name, field_name, options):
attr = getattr(model, field_name, None)
if attr is None:
raise ValueError('Model %s does not have field %s.' % (model, field_name))
if not hasattr(attr, 'property') or not hasattr(attr.property, 'direction'):
raise ValueError('%s.%s is not a relation.' % (model, field_name))
remote_model = attr.prop.mapper.class_
return QueryAjaxModelLoader(name, session, remote_model, **options)
| bsd-3-clause |
sebalix/OpenUpgrade | doc/_themes/odoodoc/html_domain.py | 129 | 4109 | # -*- coding: utf-8 -*-
from docutils import nodes, utils
from docutils.parsers.rst import Directive, directives, docutils
from docutils.parsers.rst.directives.body import LineBlock
import sphinx.roles
from sphinx.domains import Domain
def setup(app):
app.add_domain(HtmlDomain)
app.add_node(div, html=(
lambda self, node: self.body.append(self.starttag(node, 'div')),
lambda self, node: self.body.append('</div>\n')))
app.add_node(address, html=(
lambda self, node: self.body.append(self.starttag(node, 'address')),
lambda self, node: self.body.append('</address>\n')
))
app.add_node(cite, html=(visit_cite, depart_cite))
for name, node in [('mark', mark), ('ins', insert), ('del', delete),
('s', strikethrough), ('u', underline), ('small', small),
('kbd', kbd), ('var', var), ('samp', samp)]:
addnode(app, node, name)
class div(nodes.General, nodes.Element): pass
class Div(Directive):
optional_arguments = 1
final_argument_whitespace = 1
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
try:
if self.arguments:
classes = directives.class_option(self.arguments[0])
else:
classes = []
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node = div(text)
node['classes'].extend(classes)
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class address(nodes.General, nodes.Element): pass
class Address(LineBlock):
def run(self):
[node] = super(Address, self).run()
ad = address(node.rawsource, *node.children)
return [ad]
class mark(nodes.Inline, nodes.TextElement): pass
class insert(nodes.Inline, nodes.TextElement): pass
class delete(nodes.Inline, nodes.TextElement): pass
class strikethrough(nodes.Inline, nodes.TextElement): pass
class underline(nodes.Inline, nodes.TextElement): pass
class small(nodes.Inline, nodes.TextElement): pass
class kbd(nodes.Inline, nodes.FixedTextElement): pass
class var(nodes.Inline, nodes.FixedTextElement): pass
class samp(nodes.Inline, nodes.FixedTextElement): pass
def makerole(node):
return lambda name, rawtext, text, lineno, inliner, options=None, content=None:\
([node(rawtext.strip(), text.strip())], [])
def addnode(app, node, nodename):
app.add_node(node, html=(
lambda self, n: self.body.append(self.starttag(n, nodename)),
lambda self, n: self.body.append('</%s>' % nodename)
))
def initialism(*args, **kwargs):
nodes, _ = sphinx.roles.abbr_role(*args, **kwargs)
[abbr] = nodes
abbr.attributes.setdefault('classes', []).append('initialism')
return [abbr], []
def cite_role(typ, rawtext, text, lineno, inliner, options=None, content=None):
text = utils.unescape(text)
m = sphinx.roles._abbr_re.search(text)
if m is None:
return [cite(text, text, **(options or {}))], []
content = text[:m.start()].strip()
source = m.group(1)
return [cite(content, content, source=source)], []
class cite(nodes.Inline, nodes.TextElement): pass
def visit_cite(self, node):
attrs = {}
if node.hasattr('source'):
attrs['title'] = node['source']
self.body.append(self.starttag(node, 'cite', '', **attrs))
def depart_cite(self, node):
self.body.append('</abbr>')
class HtmlDomain(Domain):
name = 'h'
label = 'HTML'
directives = {
'div': Div,
'address': Address,
}
roles = {
'mark': makerole(mark),
'ins': makerole(insert),
'del': makerole(delete),
's': makerole(strikethrough),
'u': makerole(underline),
'small': makerole(small),
'initialism': initialism,
'cite': cite_role,
'kbd': makerole(kbd),
'var': makerole(var),
'samp': makerole(samp),
}
| agpl-3.0 |
JPG-Consulting/linux | tools/perf/scripts/python/futex-contention.py | 1997 | 1508 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
UweFleis3/Uwe | py/openage/convert/gamedata/unit.py | 46 | 43288 | from .. import dataformat
from struct import Struct, unpack_from
from .. import util
from ..util import dbg, zstr
class UnitCommand(dataformat.Exportable):
name_struct = "unit_command"
name_struct_file = "unit"
struct_description = "a command a single unit may recieve by script or human."
data_format = (
(dataformat.READ, "command_used", "int16_t"), #always 1
(dataformat.READ_EXPORT, "id", "int16_t"), #command id
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_EXPORT, "type", dataformat.EnumLookupMember(
raw_type = "int16_t",
type_name = "command_ability",
lookup_dict = {
0: "UNUSED",
1: "MOVE_TO",
2: "FOLLOW",
3: "GARRISON",
4: "EXPLORE",
5: "GATHER", # gather, rebuild
6: "UNKNOWN_ANIMAL",
7: "ATTACK",
8: "SHOOT",
10: "FLY",
11: "UNKNOWN_PREDATOR",
12: "UNLOAD", # transport, garrison
13: "GUARD",
20: "ESCAPE", # sure?
21: "UNKNOWN_FARM",
101: "BUILD",
102: "MAKE_OBJECT",
103: "MAKE_TECH",
104: "CONVERT",
105: "HEAL",
106: "REPAIR",
107: "CONVERT_AUTO",
109: "UNKNOWN_109",
110: "HUNT",
111: "TRADE",
120: "WONDER_VICTORY_GENERATE",
121: "DESELECT_ON_TASK",
122: "LOOT",
123: "HOUSING",
125: "UNPACK_ATTACK",
131: "UNKNOWN_131",
132: "PICKUP_UNIT",
135: "KIDNAP_UNIT",
136: "DEPOSIT_UNIT",
768: "UNKNOWN_768",
1024: "UNKNOWN_1024",
},
)),
(dataformat.READ_EXPORT, "class_id", "int16_t"),
(dataformat.READ_EXPORT, "unit_id", "int16_t"),
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ_EXPORT, "ressource_in", "int16_t"),
(dataformat.READ_EXPORT, "ressource_productivity", "int16_t"), #resource that multiplies the amount you can gather
(dataformat.READ_EXPORT, "ressource_out", "int16_t"),
(dataformat.READ_EXPORT, "ressource", "int16_t"),
(dataformat.READ_EXPORT, "work_rate_multiplier", "float"),
(dataformat.READ_EXPORT, "execution_radius", "float"),
(dataformat.READ_EXPORT, "extra_range", "float"),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_UNKNOWN, None, "float"),
(dataformat.READ, "selection_enabled", "int8_t"), #1=allows to select a target, type defined in `selection_type`
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ_EXPORT, "targets_allowed", dataformat.EnumLookupMember(
raw_type = "int8_t", #what can be selected as a target for the unit command?
type_name = "selection_type",
lookup_dict = {
0: "ANY_0", #select anything
1: "OWNED_UNITS", #your own things
2: "NEUTRAL_ENEMY", #enemy and neutral things (->attack)
3: "NOTHING",
4: "GAIA_OWNED_ALLY", #any of gaia, owned or allied things
5: "GAYA_NEUTRAL_ENEMY", #any of gaia, neutral or enemy things
6: "NOT_OWNED", #all things that aren't yours
7: "ANY_7",
},
)),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ, "tool_graphic_id", "int16_t"), #walking with tool but no ressource
(dataformat.READ, "proceed_graphic_id", "int16_t"), #proceeding ressource gathering or attack
(dataformat.READ, "action_graphic_id", "int16_t"), #actual execution or transformation graphic
(dataformat.READ, "carrying_graphic_id", "int16_t"), #display ressources in hands
(dataformat.READ, "execution_sound_id", "int16_t"), #sound to play when execution starts
(dataformat.READ, "ressource_deposit_sound_id", "int16_t"), #sound to play on ressource drop
)
class UnitHeader(dataformat.Exportable):
name_struct = "unit_header"
name_struct_file = "unit"
struct_description = "stores a bunch of unit commands."
data_format = (
(dataformat.READ, "exists", dataformat.ContinueReadMember("uint8_t")),
(dataformat.READ, "unit_command_count", "uint16_t"),
(dataformat.READ_EXPORT, "unit_commands", dataformat.SubdataMember(
ref_type=UnitCommand,
length="unit_command_count",
)),
)
class RessourceStorage(dataformat.Exportable):
name_struct = "ressource_storage"
name_struct_file = "unit"
struct_description = "determines the resource storage capacity for one unit mode."
data_format = (
(dataformat.READ, "type", "int16_t"),
(dataformat.READ, "amount", "float"),
(dataformat.READ, "used_mode", dataformat.EnumLookupMember(
raw_type = "int8_t",
type_name = "ressource_handling",
lookup_dict = {
0: "DECAYABLE",
1: "KEEP_AFTER_DEATH",
2: "RESET_ON_DEATH_INSTANT",
4: "RESET_ON_DEATH_WHEN_COMPLETED",
},
)),
)
class DamageGraphic(dataformat.Exportable):
name_struct = "damage_graphic"
name_struct_file = "unit"
struct_description = "stores one possible unit image that is displayed at a given damage percentage."
data_format = (
(dataformat.READ_EXPORT, "graphic_id", "int16_t"),
(dataformat.READ_EXPORT, "damage_percent", "int8_t"),
(dataformat.READ_EXPORT, "apply_mode", dataformat.EnumLookupMember(
raw_type = "int8_t",
type_name = "damage_draw_type",
lookup_dict = {
0: "ADD_FLAMES_0",
1: "ADD_FLAMES_1",
2: "REPLACE",
},
)),
(dataformat.READ_UNKNOWN, None, "int8_t"),
)
class HitType(dataformat.Exportable):
name_struct = "hit_type"
name_struct_file = "unit"
struct_description = "stores attack amount for a damage type."
data_format = (
(dataformat.READ, "type_id", dataformat.EnumLookupMember(
raw_type = "int16_t",
type_name = "hit_class",
lookup_dict = {
-1: "NONE",
1: "INFANTRY",
2: "SHIP_TURTLE",
3: "UNITS_PIERCE",
4: "UNITS_MELEE",
5: "WAR_ELEPHANT",
8: "CAVALRY",
11: "BUILDINGS_NO_PORT",
13: "STONE_DEFENSES",
15: "ARCHERS",
16: "SHIPS_CAMELS_SABOTEURS",
17: "RAMS",
18: "TREES",
19: "UNIQUE_UNITS",
20: "SIEGE_WEAPONS",
21: "BUILDINGS",
22: "WALLS_GATES",
24: "BOAR",
25: "MONKS",
26: "CASTLE",
27: "SPEARMEN",
28: "CAVALRY_ARCHER",
29: "EAGLE_WARRIOR",
},
)),
(dataformat.READ, "amount", "int16_t"),
)
class RessourceCost(dataformat.Exportable):
name_struct = "ressource_cost"
name_struct_file = "unit"
struct_description = "stores cost for one ressource for creating the unit."
data_format = (
(dataformat.READ, "type_id", dataformat.EnumLookupMember(
raw_type = "int16_t",
type_name = "ressource_types",
lookup_dict = {
-1: "NONE",
0: "FOOD_STORAGE",
1: "WOOD_STORAGE",
2: "STONE_STORAGE",
3: "GOLD_STORAGE",
4: "POPULATION_HEADROOM",
5: "CONVERSION_RANGE",
6: "CURRENT_AGE",
7: "OWNED_RELIC_COUNT",
8: "TRADE_BONUS",
9: "TRADE_GOODS",
10: "TRADE_PRODUCTION",
11: "POPULATION", #both current population and population headroom
12: "CORPSE_DECAY_TIME",
13: "DISCOVERY",
14: "RUIN_MONUMENTS_CAPTURED", #unused
15: "PREDATOR_ANIMAL_FOOD",
16: "CROPS",
17: "FISH_STORAGE",
18: "UNKNOWN_18",
19: "TOTAL_UNITS_OWNED", #or just military ones? used for counting losses
20: "UNITS_KILLED",
21: "RESEARCHED_TECHNOLOGIES_COUNT",
23: "TECHNOLOGY_ID_0", #default: 102
24: "TECHNOLOGY_ID_1", #default: 103
25: "TECHNOLOGY_ID_2", #default: 101
27: "ATONEMENT", #bool
28: "REDEMPTION", #bool
30: "VAL_500", #default: 500
32: "BONUS_POPULATION",
35: "FAITH_RECHARGE_RATE", #default: 1.6
36: "FARM_FOOD_AMOUNT", #default: 175
37: "CIVILIAN_POPULATION",
38: "UNKNOWN_38",
39: "ALL_TECHS_ACHIEVED", #default: 178
40: "MILITARY_POPULATION", #-> largest army
41: "UNITS_CONVERTED",
42: "WONDERS_STANDING",
43: "BUILDINGS_RAZED",
44: "KILL_RATIO",
45: "SURVIVAL_TO_FINISH", #bool
46: "TRIBUTE_FEE", #default: 0.3
47: "GOLD_MINING_PRODUCTIVITY", #default: 1
48: "TOWN_CENTER_AVAILABLE",
49: "GOLD_COUNTER",
50: "REVEAL_ALLY", #bool, ==cartography discovered
51: "HOUSES_UNUSED",
52: "MONASTERY_COUNT",
53: "TRIBUTE_SENT",
54: "RUINES_CAPTURED_ALL", #bool
55: "RELICS_CAPTURED_ALL", #bool
56: "LOAD_STORAGE", #or unit unload room?
57: "CAPTURED_UNITS",
58: "DARK_AGE", #default: 104
59: "TRADE_GOOD_QUALITY", #default: 1
60: "TRADE_MARKET_LEVEL",
61: "FORMATIONS",
62: "BUILDING_HOUSING_RATE", #default: 20
63: "GATHER_TAX_RATE", #default: 32000
64: "GATHER_ACCUMULATOR",
65: "SALVAGE_DECAY_RATE", #default: 5
66: "ALLOW_FORMATION", #bool, something with age?
67: "CONVERSIONS", #bool?
68: "HIT_POINTS_KILLED", #unused
69: "KILLED_PLAYER_1", #bool
70: "KILLED_PLAYER_2", #bool
71: "KILLED_PLAYER_3", #bool
72: "KILLED_PLAYER_4", #bool
73: "KILLED_PLAYER_5", #bool
74: "KILLED_PLAYER_6", #bool
75: "KILLED_PLAYER_7", #bool
76: "KILLED_PLAYER_8", #bool
77: "CONVERSION_RESISTANCE",
78: "TRADE_FEE", #default: 0.3
79: "STONE_MINING_PRODUCTIVITY", #default: 1
80: "QUEUED_UNITS",
81: "TRAINING_COUNT",
82: "START_PACKED_TOWNCENTER", #or raider, default: 2
83: "BOARDING_RECHARGE_RATE",
84: "STARTING_VILLAGERS", #default: 3
85: "RESEARCH_COST_MULTIPLIER",
86: "RESEARCH_TIME_MULTIPLIER",
87: "CONVERT_SHIPS_ABILITY", #bool
88: "FISH_TRAP_FOOD_AMOUNT", #default: 700
89: "BONUS_HEALING_RATE",
90: "HEALING_RANGE",
91: "BONUS_STARTING_FOOD",
92: "BONUS_STARTING_WOOD",
93: "BONUS_STARTING_STONE",
94: "BONUS_STARTING_GOLD",
95: "TOWN_CENTER_PACKING", #or raider, default: 3
96: "SELF_HEALING_SECONDS_BERSERKER",
97: "ANIMAL_DISCOVERY_DOMINANT_LOS", #bool, sheep/turkey
98: "SCORE_ECONOMY", #object cost summary
99: "SCORE_TECHNOLOGY",
100: "RELIC_GOLD_COLLECTED",
101: "TRADE_PROFIT",
102: "TRIBUTE_P1",
103: "TRIBUTE_P2",
104: "TRIBUTE_P3",
105: "TRIBUTE_P4",
106: "TRIBUTE_P5",
107: "TRIBUTE_P6",
108: "TRIBUTE_P7",
109: "TRIBUTE_P8",
110: "KILL_SCORE_P1",
111: "KILL_SCORE_P2",
112: "KILL_SCORE_P3",
113: "KILL_SCORE_P4",
114: "KILL_SCORE_P5",
115: "KILL_SCORE_P6",
116: "KILL_SCORE_P7",
117: "KILL_SCORE_P8",
118: "RAZING_COUNT_P1",
119: "RAZING_COUNT_P2",
120: "RAZING_COUNT_P3",
121: "RAZING_COUNT_P4",
122: "RAZING_COUNT_P5",
123: "RAZING_COUNT_P6",
124: "RAZING_COUNT_P7",
125: "RAZING_COUNT_P8",
126: "RAZING_SCORE_P1",
127: "RAZING_SCORE_P2",
128: "RAZING_SCORE_P3",
129: "RAZING_SCORE_P4",
130: "RAZING_SCORE_P5",
131: "RAZING_SCORE_P6",
132: "RAZING_SCORE_P7",
133: "RAZING_SCORE_P8",
134: "STANDING_CASTLES",
135: "RAZINGS_HIT_POINTS",
136: "KILLS_BY_P1",
137: "KILLS_BY_P2",
138: "KILLS_BY_P3",
139: "KILLS_BY_P4",
140: "KILLS_BY_P5",
141: "KILLS_BY_P6",
142: "KILLS_BY_P7",
143: "KILLS_BY_P8",
144: "RAZINGS_BY_P1",
145: "RAZINGS_BY_P2",
146: "RAZINGS_BY_P3",
147: "RAZINGS_BY_P4",
148: "RAZINGS_BY_P5",
149: "RAZINGS_BY_P6",
150: "RAZINGS_BY_P7",
151: "RAZINGS_BY_P8",
152: "LOST_UNITS_SCORE",
153: "LOST_BUILDINGS_SCORE",
154: "LOST_UNITS",
155: "LOST_BUILDINGS",
156: "TRIBUTE_FROM_P1",
157: "TRIBUTE_FROM_P2",
158: "TRIBUTE_FROM_P3",
159: "TRIBUTE_FROM_P4",
160: "TRIBUTE_FROM_P5",
161: "TRIBUTE_FROM_P6",
162: "TRIBUTE_FROM_P7",
163: "TRIBUTE_FROM_P8",
164: "SCORE_UNITS_CURRENT",
165: "SCORE_BUILDINGS_CURRENT", #default: 275
166: "COLLECTED_FOOD",
167: "COLLECTED_WOOD",
168: "COLLECTED_STONE",
169: "COLLECTED_GOLD",
170: "SCORE_MILITARY",
171: "TRIBUTE_RECEIVED",
172: "SCORE_RAZINGS",
173: "TOTAL_CASTLES",
174: "TOTAL_WONDERS",
175: "SCORE_ECONOMY_TRIBUTES",
176: "CONVERT_ADJUSTMENT_MIN", #used for resistance against monk conversions
177: "CONVERT_ADJUSTMENT_MAX",
178: "CONVERT_RESIST_ADJUSTMENT_MIN",
179: "CONVERT_RESIST_ADJUSTMENT_MAX",
180: "CONVERT_BUILDIN_MIN", #default: 15
181: "CONVERT_BUILDIN_MAX", #default: 25
182: "CONVERT_BUILDIN_CHANCE", #default: 25
183: "REVEAL_ENEMY",
184: "SCORE_SOCIETY",
185: "SCORE_FOOD",
186: "SCORE_WOOD",
187: "SCORE_STONE",
188: "SCORE_GOLD",
189: "CHOPPING_PRODUCTIVITY", #default: 1
190: "FOOD_GATHERING_PRODUCTIVITY", #default: 1
191: "RELIC_GOLD_PRODUCTION_RATE", #default: 30
192: "HERESY_ACTIVE", #bool
193: "THEOCRACY_ACTIVE", #bool
194: "CRENELLATIONS_ACTIVE", #bool
195: "CONSTRUCTION_RATE", #except for wonders
196: "WONDER_BONUS",
197: "SPIES_DISCOUNT", #or atheism_active?
}
)),
(dataformat.READ, "amount", "int16_t"),
(dataformat.READ, "enabled", "int16_t"),
)
class BuildingAnnex(dataformat.Exportable):
name_struct = "building_annex"
name_struct_file = "unit"
struct_description = "a possible building annex."
data_format = (
(dataformat.READ_EXPORT, "unit_id", "int16_t"),
(dataformat.READ_EXPORT, "misplaced0", "float"),
(dataformat.READ_EXPORT, "misplaced1", "float"),
)
def __init__(self, **args):
super().__init__(**args)
class UnitObject(dataformat.Exportable):
"""
base properties for every unit entry.
"""
name_struct = "unit_object"
name_struct_file = "unit"
struct_description = "base properties for all units."
data_format = (
(dataformat.READ, "name_length", "uint16_t"),
(dataformat.READ_EXPORT, "id0", "int16_t"),
(dataformat.READ_EXPORT, "language_dll_name", "uint16_t"),
(dataformat.READ_EXPORT, "language_dll_creation", "uint16_t"),
(dataformat.READ_EXPORT, "unit_class", dataformat.EnumLookupMember(
raw_type = "int16_t",
type_name = "unit_classes",
lookup_dict = {
0: "ARCHER",
1: "ARTIFACT",
2: "TRADE_BOAT",
3: "BUILDING",
4: "CIVILIAN",
5: "SEA_FISH",
6: "SOLDIER",
7: "BERRY_BUSH",
8: "STONE_MINE",
9: "PREY_ANIMAL",
10: "PREDATOR_ANIMAL",
11: "OTHER",
12: "CAVALRY",
13: "SIEGE_WEAPON",
14: "TERRAIN",
15: "TREES",
18: "PRIEST",
19: "TRADE_CART",
20: "TRANSPORT_BOAT",
21: "FISHING_BOAT",
22: "WAR_BOAT",
23: "CONQUISTADOR",
27: "WALLS",
28: "PHALANX",
29: "ANIMAL_DOMESTICATED",
30: "FLAGS",
32: "GOLD_MINE",
33: "SHORE_FISH",
34: "CLIFF",
35: "PETARD",
36: "CAVALRY_ARCHER",
37: "DOLPHIN",
38: "BIRDS",
39: "GATES",
40: "PILES",
41: "PILES_OF_RESOURCE",
42: "RELIC",
43: "MONK_WITH_RELIC",
44: "HAND_CANNONEER",
45: "TWO_HANDED_SWORD",
46: "PIKEMAN",
47: "SCOUT_CAVALRY",
48: "ORE_MINE",
49: "FARM",
50: "SPEARMAN",
51: "PACKED_SIEGE_UNITS",
52: "TOWER",
53: "BOARDING_BOAT",
54: "UNPACKED_SIEGE_UNITS",
55: "SCORPION",
56: "RAIDER",
57: "CAVALRY_RAIDER",
58: "SHEEP",
59: "KING",
61: "HORSE",
},
)),
(dataformat.READ_EXPORT, "graphic_standing0", "int16_t"),
(dataformat.READ_EXPORT, "graphic_standing1", "int16_t"),
(dataformat.READ_EXPORT, "graphic_dying0", "int16_t"),
(dataformat.READ_EXPORT, "graphic_dying1", "int16_t"),
(dataformat.READ, "death_mode", "int8_t"), #1 = become `dead_unit_id` (reviving does not make it usable again)
(dataformat.READ_EXPORT, "hit_points", "int16_t"), #unit health. -1=insta-die
(dataformat.READ, "line_of_sight", "float"),
(dataformat.READ, "garrison_capacity", "int8_t"), #number of units that can garrison in there
(dataformat.READ_EXPORT, "radius_size0", "float"), #size of the unit
(dataformat.READ_EXPORT, "radius_size1", "float"),
(dataformat.READ, "hp_bar_height0", "float"), #vertical hp bar distance from ground
(dataformat.READ_EXPORT, "sound_creation0", "int16_t"),
(dataformat.READ_EXPORT, "sound_creation1", "int16_t"),
(dataformat.READ, "dead_unit_id", "int16_t"), #unit id to become on death
(dataformat.READ, "placement_mode", "int8_t"), #0=placable on top of others in scenario editor, 5=can't
(dataformat.READ, "air_mode", "int8_t"), #1=no footprints
(dataformat.READ, "icon_id", "int16_t"), #frame id of the icon slp (57029) to place on the creation button
(dataformat.READ, "hidden_in_editor", "int8_t"),
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ, "enabled", "int16_t"), #0=unlocked by research, 1=insta-available
(dataformat.READ, "placement_bypass_terrain0", "int16_t"), #terrain id that's needed somewhere on the foundation (e.g. dock water)
(dataformat.READ, "placement_bypass_terrain1", "int16_t"), #second slot for ^
(dataformat.READ, "placement_terrain0", "int16_t"), #terrain needed for placement (e.g. dock: water)
(dataformat.READ, "placement_terrain1", "int16_t"), #alternative terrain needed for placement (e.g. dock: shallows)
(dataformat.READ, "editor_radius0", "float"),
(dataformat.READ, "editor_radius1", "float"),
(dataformat.READ_EXPORT, "building_mode", dataformat.EnumLookupMember(
raw_type = "int8_t",
type_name = "building_modes",
lookup_dict = {
0: "NON_BUILDING", #gates, farms, walls, towers
2: "TRADE_BUILDING", #towncenter, port, trade workshop
3: "ANY",
},
)),
(dataformat.READ_EXPORT, "visible_in_fog", dataformat.EnumLookupMember(
raw_type = "int8_t",
type_name = "fog_visibility",
lookup_dict = {
0: "INVISIBLE", #people etc
1: "VISIBLE", #buildings
3: "ONLY_IN_FOG",
},
)),
(dataformat.READ_EXPORT, "terrain_restriction", dataformat.EnumLookupMember(
raw_type = "int16_t", #determines on what type of ground the unit can be placed/walk
type_name = "ground_type", #is actually the id of the terrain_restriction entry!
lookup_dict = {
0x00: "ANY",
0x01: "SHORELINE",
0x02: "WATER",
0x03: "WATER_SHIP_0x03",
0x04: "FOUNDATION",
0x05: "NOWHERE", #can't place anywhere
0x06: "WATER_DOCK", #shallow water for dock placement
0x07: "SOLID",
0x08: "NO_ICE_0x08",
0x0A: "NO_ICE_0x0A",
0x0B: "FOREST",
0x0C: "UNKNOWN_0x0C",
0x0D: "WATER_0x0D", #great fish
0x0E: "UNKNOWN_0x0E",
0x0F: "WATER_SHIP_0x0F", #transport ship
0x10: "GRASS_SHORELINE", #for gates and walls
0x11: "WATER_ANY_0x11",
0x12: "UNKNOWN_0x12",
0x13: "FISH_NO_ICE",
0x14: "WATER_ANY_0x14",
0x15: "WATER_SHALLOW",
},
)),
(dataformat.READ_EXPORT, "fly_mode", "int8_t"),
(dataformat.READ_EXPORT, "ressource_capacity", "int16_t"),
(dataformat.READ_EXPORT, "ressource_decay", "float"), #when animals rot, their ressources decay
(dataformat.READ_EXPORT, "blast_type", dataformat.EnumLookupMember(
raw_type = "int8_t",
type_name = "blast_types",
lookup_dict = {
0: "UNIT_0", #projectile, dead, fish, relic, tree, gate, towncenter
1: "OTHER", #'other' things with multiple rotations
2: "BUILDING", #buildings, gates, walls, towncenter, fishtrap
3: "UNIT_3", #boar, farm, fishingship, villager, tradecart, sheep, turkey, archers, junk, ships, monk, siege
}
)),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_EXPORT, "interaction_mode", dataformat.EnumLookupMember(
raw_type = "int8_t", #what can be done with this unit?
type_name = "interaction_modes",
lookup_dict = {
0: "NOTHING_0",
1: "NOTHING_1",
2: "SELECTABLE",
3: "SELECT_ATTACK",
4: "SELECT_ATTACK_MOVE",
5: "SELECT_MOVE",
},
)),
(dataformat.READ_EXPORT, "minimap_mode", dataformat.EnumLookupMember(
raw_type = "int8_t", #how does the unit show up on the minimap
type_name = "minimap_modes",
lookup_dict = {
0: "NO_DOT_0",
1: "SQUARE_DOT", #turns white when selected
2: "DIAMOND_DOT", #dito
3: "DIAMOND_DOT_KEEPCOLOR",
4: "LARGEDOT_0", #observable by all players, no attack-blinking
5: "LARGEDOT_1",
6: "NO_DOT_6",
7: "NO_DOT_7",
8: "NO_DOT_8",
9: "NO_DOT_9",
10: "NO_DOT_10",
},
)),
(dataformat.READ_EXPORT, "command_attribute", dataformat.EnumLookupMember(
raw_type = "int16_t", #selects the available ui command buttons for the unit
type_name = "command_attributes",
lookup_dict = {
0: "LIVING", #commands: delete, garrison, stop, attributes: hit points
1: "ANIMAL", #animal
2: "NONMILITARY_BULIDING", #nonmilitary building (build page 1)
3: "VILLAGER", #villager
4: "MILITARY_UNIT", #military unit
5: "TRADING_UNIT", #trading unit
6: "MONK_EMPTY", #monk
7: "TRANSPORT_SHIP", #transport ship
8: "RELIC", #relic / monk with relic
9: "FISHING_SHIP", #fishing ship
10: "MILITARY_BUILDING", #military building (build page 2)
11: "SHIELDED_BUILDING", #shield building (build page 3)
},
)),
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ_EXPORT, "language_dll_help", "uint16_t"),
(dataformat.READ, "hot_keys", "int16_t[4]"),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ, "unselectable", "uint8_t"),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_UNKNOWN, None, "int8_t"),
#bit 0 == 1 && val != 7: mask shown behind buildings,
#bit 0 == 0 && val != {6, 10}: no mask displayed,
#val == {-1, 7}: in open area mask is partially displayed
#val == {6, 10}: building, causes mask to appear on units behind it
(dataformat.READ, "selection_mask", "int8_t"),
(dataformat.READ, "selection_shape_type", "int8_t"),
(dataformat.READ, "selection_shape", "int8_t"), #0=square, 1<=round
#bitfield of unit attributes:
#bit 0: allow garrison,
#bit 1: don't join formation,
#bit 2: stealth unit,
#bit 3: detector unit,
#bit 4: mechanical unit,
#bit 5: biological unit,
#bit 6: self-shielding unit,
#bit 7: invisible unit
(dataformat.READ, "attribute", "uint8_t"),
(dataformat.READ, "civilisation", "int8_t"),
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ_EXPORT, "selection_effect", dataformat.EnumLookupMember(
raw_type = "int8_t", #things that happen when the unit was selected
type_name = "selection_effects",
lookup_dict = {
0: "NONE",
1: "HPBAR_ON_OUTLINE_DARK", #permanent, editor only
2: "HPBAR_ON_OUTLINE_NORMAL",
3: "HPBAR_OFF_SELECTION_SHADOW",
4: "HPBAR_OFF_OUTLINE_NORMAL",
5: "HPBAR_ON_5",
6: "HPBAR_OFF_6",
7: "HPBAR_OFF_7",
8: "HPBAR_ON_8",
9: "HPBAR_ON_9",
},
)),
(dataformat.READ, "editor_selection_color", "uint8_t"), #0: default, -16: fish trap, farm, 52: deadfarm, OLD-*, 116: flare, whale, dolphin -123: fish
(dataformat.READ, "selection_radius0", "float"),
(dataformat.READ, "selection_radius1", "float"),
(dataformat.READ, "hp_bar_height1", "float"), #vertical hp bar distance from ground
(dataformat.READ_EXPORT, "ressource_storage", dataformat.SubdataMember(
ref_type=RessourceStorage,
length=3,
)),
(dataformat.READ, "damage_graphic_count", "int8_t"),
(dataformat.READ_EXPORT, "damage_graphic", dataformat.SubdataMember(
ref_type=DamageGraphic,
length="damage_graphic_count",
)),
(dataformat.READ_EXPORT, "sound_selection", "int16_t"),
(dataformat.READ_EXPORT, "sound_dying", "int16_t"),
(dataformat.READ_EXPORT, "attack_mode", "int16_t"), #0: no attack, 1: attack by following, 2: run when attacked, 3:?, 4: attack
(dataformat.READ_EXPORT, "name", "char[name_length]"),
(dataformat.READ_EXPORT, "id1", "int16_t"),
(dataformat.READ_EXPORT, "id2", "int16_t"),
)
def __init__(self, **args):
super().__init__(**args)
class UnitFlag(UnitObject):
"""
type_id >= 20
"""
name_struct = "unit_flag"
name_struct_file = "unit"
struct_description = "adds speed property to units."
data_format = (
(dataformat.READ_EXPORT, None, dataformat.IncludeMembers(cls=UnitObject)),
(dataformat.READ_EXPORT, "speed", "float"),
)
def __init__(self, **args):
super().__init__(**args)
class UnitDoppelganger(UnitFlag):
"""
type_id >= 25
"""
name_struct = "unit_doppelganger"
name_struct_file = "unit"
struct_description = "weird doppelganger unit thats actually the same as a flag unit."
data_format = (
(dataformat.READ_EXPORT, None, dataformat.IncludeMembers(cls=UnitFlag)),
)
def __init__(self):
super().__init__()
class UnitDeadOrFish(UnitDoppelganger):
"""
type_id >= 30
"""
name_struct = "unit_dead_or_fish"
name_struct_file = "unit"
struct_description = "adds walking graphics, rotations and tracking properties to units."
data_format = (
(dataformat.READ_EXPORT, None, dataformat.IncludeMembers(cls=UnitDoppelganger)),
(dataformat.READ_EXPORT, "walking_graphics0", "int16_t"),
(dataformat.READ_EXPORT, "walking_graphics1", "int16_t"),
(dataformat.READ, "rotation_speed", "float"),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ, "tracking_unit_id", "int16_t"), #unit id what for the ground traces are for
(dataformat.READ, "tracking_unit_used", "uint8_t"), #-1: no tracking present, 2: projectiles with tracking unit
(dataformat.READ, "tracking_unit_density", "float"), #0: no tracking, 0.5: trade cart, 0.12: some projectiles, 0.4: other projectiles
(dataformat.READ_UNKNOWN, None, "float"),
(dataformat.READ_UNKNOWN, None, "int8_t[17]"),
)
def __init__(self, **args):
super().__init__(**args)
class UnitBird(UnitDeadOrFish):
"""
type_id >= 40
"""
name_struct = "unit_bird"
name_struct_file = "unit"
struct_description = "adds search radius and work properties, as well as movement sounds."
data_format = (
(dataformat.READ_EXPORT, None, dataformat.IncludeMembers(cls=UnitDeadOrFish)),
(dataformat.READ, "sheep_conversion", "int16_t"), #0=can be converted by unit command 107 (you found sheep!!1)
(dataformat.READ, "search_radius", "float"),
(dataformat.READ, "work_rate", "float"),
(dataformat.READ, "drop_site0", "int16_t"), #unit id where gathered ressources shall be delivered to
(dataformat.READ, "drop_site1", "int16_t"), #alternative unit id
(dataformat.READ_EXPORT, "villager_mode", "int8_t"), #unit can switch villager type (holza? gathara!) 1=male, 2=female
(dataformat.READ_EXPORT, "move_sound", "int16_t"),
(dataformat.READ_EXPORT, "stop_sound", "int16_t"),
(dataformat.READ, "animal_mode", "int8_t"),
)
def __init__(self, **args):
super().__init__(**args)
class UnitMovable(UnitBird):
"""
type_id >= 60
"""
name_struct = "unit_movable"
name_struct_file = "unit"
struct_description = "adds attack and armor properties to units."
data_format = (
(dataformat.READ_EXPORT, None, dataformat.IncludeMembers(cls=UnitBird)),
(dataformat.READ, "default_armor", "int16_t"),
(dataformat.READ, "attack_count", "uint16_t"),
(dataformat.READ, "attacks", dataformat.SubdataMember(ref_type=HitType, length="attack_count")),
(dataformat.READ, "armor_count", "uint16_t"),
(dataformat.READ, "armors", dataformat.SubdataMember(ref_type=HitType, length="armor_count")),
(dataformat.READ_EXPORT, "interaction_type", dataformat.EnumLookupMember(
raw_type = "int16_t",
type_name = "interaction_types",
lookup_dict = {
-1: "UNIT",
4: "BUILDING",
6: "DOCK",
10: "WALL",
},
)),
(dataformat.READ, "max_range", "float"),
(dataformat.READ, "blast_radius", "float"),
(dataformat.READ, "reload_time0", "float"),
(dataformat.READ, "projectile_unit_id", "int16_t"),
(dataformat.READ, "accuracy_percent", "int16_t"), #probablity of attack hit
(dataformat.READ, "tower_mode", "int8_t"),
(dataformat.READ, "delay", "int16_t"), #delay in frames before projectile is shot
(dataformat.READ, "projectile_graphics_displacement_lr", "float"),
(dataformat.READ, "projectile_graphics_displacement_distance", "float"),
(dataformat.READ, "projectile_graphics_displacement_height", "float"),
(dataformat.READ_EXPORT, "blast_level", dataformat.EnumLookupMember(
raw_type = "int8_t",
type_name = "range_damage_type",
lookup_dict = {
0: "RESSOURCES",
1: "TREES",
2: "NEARBY_UNITS",
3: "TARGET_ONLY",
},
)),
(dataformat.READ, "min_range", "float"),
(dataformat.READ, "garrison_recovery_rate", "float"),
(dataformat.READ_EXPORT, "attack_graphic", "int16_t"),
(dataformat.READ, "melee_armor_displayed", "int16_t"),
(dataformat.READ, "attack_displayed", "int16_t"),
(dataformat.READ, "range_displayed", "float"),
(dataformat.READ, "reload_time1", "float"),
)
def __init__(self):
super().__init__()
class UnitProjectile(UnitMovable):
"""
type_id == 60
"""
name_struct = "unit_projectile"
name_struct_file = "unit"
struct_description = "adds projectile specific unit properties."
data_format = (
(dataformat.READ_EXPORT, None, dataformat.IncludeMembers(cls=UnitMovable)),
(dataformat.READ, "stretch_mode", "int8_t"), #1 = projectile falls vertically to the bottom of the map
(dataformat.READ, "compensation_mode", "int8_t"),
(dataformat.READ, "drop_animation_mode", "int8_t"), #1 = disappear on hit
(dataformat.READ, "penetration_mode", "int8_t"), #1 = pass through hit object
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ, "projectile_arc", "float"),
)
def __init__(self):
super().__init__()
class UnitLiving(UnitMovable):
"""
type_id >= 70
"""
name_struct = "unit_living"
name_struct_file = "unit"
struct_description = "adds creation location and garrison unit properties."
data_format = (
(dataformat.READ_EXPORT, None, dataformat.IncludeMembers(cls=UnitMovable)),
(dataformat.READ, "ressource_cost", dataformat.SubdataMember(ref_type=RessourceCost, length=3)),
(dataformat.READ, "creation_time", "int16_t"), #in seconds
(dataformat.READ, "creation_location_id", "int16_t"), #e.g. 118 = villager
#where to place the button with the given icon
#creation page:
#+------------------------+
#| 01 | 02 | 03 | 04 | 05 |
#|----|----|----|----|----|
#| 06 | 07 | 08 | 09 | 10 |
#|----|----|----|----|----|
#| 11 | 12 | 13 | 14 | 15 |
#+------------------------+
#
#additional page (dock):
#+------------------------+
#| 21 | 22 | 23 | 24 | 25 |
#|----|----|----|----|----|
#| 26 | 27 | 28 | 29 | 30 |
#|----|----|----|----|----|
#| 31 | 32 | 33 | 34 | 35 |
#+------------------------+
(dataformat.READ, "creation_button_id", "int8_t"),
(dataformat.READ_UNKNOWN, None, "int32_t"),
(dataformat.READ_UNKNOWN, None, "int32_t"),
(dataformat.READ, "missile_graphic_delay", "int8_t"), #delay before the projectile is fired.
(dataformat.READ, "hero_mode", "int8_t"), #if building: "others" tab in editor, if living unit: "heroes" tab, regenerate health + monk immunity
(dataformat.READ, "garrison_graphic", "int32_t"), #graphic to display when units are garrisoned
(dataformat.READ, "attack_missile_duplication_min", "float"), #projectile duplication when nothing garrisoned
(dataformat.READ, "attack_missile_duplication_max", "int8_t"), #duplication when fully garrisoned
(dataformat.READ, "attack_missile_duplication_spawning_width", "float"),
(dataformat.READ, "attack_missile_duplication_spawning_length", "float"),
(dataformat.READ, "attack_missile_duplication_spawning_randomness", "float"), #placement randomness, 0=from single spot, 1=random, 1<less random
(dataformat.READ, "attack_missile_duplication_unit_id", "int32_t"),
(dataformat.READ, "attack_missile_duplication_graphic_id", "int32_t"),
(dataformat.READ, "dynamic_image_update", "int8_t"), #determines adjacent unit graphics, if 1: building can adapt graphics by adjacent buildings
(dataformat.READ, "pierce_armor_displayed", "int16_t"), #unit stats display of pierce armor
)
def __init__(self):
super().__init__()
class UnitBuilding(UnitLiving):
"""
type_id >= 80
"""
name_struct = "unit_building"
name_struct_file = "unit"
struct_description = "construction graphics and garrison building properties for units."
data_format = (
(dataformat.READ_EXPORT, None, dataformat.IncludeMembers(cls=UnitLiving)),
(dataformat.READ_EXPORT, "construction_graphic_id", "int16_t"),
(dataformat.READ, "snow_graphic_id", "int16_t"),
(dataformat.READ, "adjacent_mode", "int16_t"), #1=adjacent units may change the graphics
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ, "stack_unit_id", "int16_t"), #second building to place directly on top
(dataformat.READ_EXPORT, "terrain_id", "int16_t"), #change underlying terrain to this id when building completed
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ, "research_id", "int16_t"), #research_id to be enabled when building creation
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_EXPORT, "building_annex", dataformat.SubdataMember(ref_type=BuildingAnnex, length=4)),
(dataformat.READ, "head_unit_id", "int16_t"), #building at which an annex building is attached to
(dataformat.READ, "transform_unit_id", "int16_t"), #destination unit id when unit shall transform (e.g. unpack)
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ, "construction_sound_id", "int16_t"),
(dataformat.READ_EXPORT, "garrison_type", dataformat.EnumLookupMember(
raw_type = "int8_t",
type_name = "garrison_types",
lookup_dict = { #TODO: create bitfield
0x00: "NONE",
0x01: "VILLAGER",
0x02: "INFANTRY",
0x04: "CAVALRY",
0x08: "MONK",
0x0b: "NOCAVALRY",
0x0f: "ALL",
},
)),
(dataformat.READ, "garrison_heal_rate", "float"),
(dataformat.READ_UNKNOWN, None, "int32_t"),
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ_UNKNOWN, None, "int8_t[6]"),
)
def __init__(self):
super().__init__()
class UnitTree(UnitObject):
"""
type_id = 90
"""
name_struct = "unit_tree"
name_struct_file = "unit"
struct_description = "just a tree unit."
data_format = (
(dataformat.READ_EXPORT, None, dataformat.IncludeMembers(cls=UnitObject)),
)
def __init__(self, **args):
super().__init__(**args)
unit_type_lookup = {
10: "object",
20: "flag",
25: "doppelganger",
30: "dead_or_fish",
40: "bird",
60: "projectile",
70: "living",
80: "building",
90: "tree",
}
unit_type_class_lookup = {
"object": UnitObject,
"flag": UnitFlag,
"doppelganger": UnitDoppelganger,
"dead_or_fish": UnitDeadOrFish,
"bird": UnitBird,
"projectile": UnitProjectile,
"living": UnitLiving,
"building": UnitBuilding,
"tree": UnitTree,
}
| gpl-3.0 |
akretion/odoo | addons/website_sale/models/sale_order.py | 4 | 20279 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import random
from datetime import datetime
from dateutil.relativedelta import relativedelta
from odoo import api, models, fields, _
from odoo.http import request
from odoo.osv import expression
from odoo.exceptions import UserError, ValidationError
_logger = logging.getLogger(__name__)
class SaleOrder(models.Model):
_inherit = "sale.order"
website_order_line = fields.One2many(
'sale.order.line',
compute='_compute_website_order_line',
string='Order Lines displayed on Website',
help='Order Lines to be displayed on the website. They should not be used for computation purpose.',
)
cart_quantity = fields.Integer(compute='_compute_cart_info', string='Cart Quantity')
only_services = fields.Boolean(compute='_compute_cart_info', string='Only Services')
is_abandoned_cart = fields.Boolean('Abandoned Cart', compute='_compute_abandoned_cart', search='_search_abandoned_cart')
cart_recovery_email_sent = fields.Boolean('Cart recovery email already sent')
website_id = fields.Many2one('website', string='Website', readonly=True,
help='Website through which this order was placed.')
@api.one
def _compute_website_order_line(self):
self.website_order_line = self.order_line
@api.multi
@api.depends('website_order_line.product_uom_qty', 'website_order_line.product_id')
def _compute_cart_info(self):
for order in self:
order.cart_quantity = int(sum(order.mapped('website_order_line.product_uom_qty')))
order.only_services = all(l.product_id.type in ('service', 'digital') for l in order.website_order_line)
@api.multi
@api.depends('team_id.team_type', 'date_order', 'order_line', 'state', 'partner_id')
def _compute_abandoned_cart(self):
for order in self:
abandoned_delay = order.website_id and order.website_id.cart_abandoned_delay or 1.0
abandoned_datetime = datetime.utcnow() - relativedelta(hours=abandoned_delay)
domain = order.date_order and order.date_order <= abandoned_datetime and order.team_id.team_type == 'website' and order.state == 'draft' and order.partner_id.id != self.env.ref('base.public_partner').id and order.order_line
order.is_abandoned_cart = bool(domain)
def _search_abandoned_cart(self, operator, value):
abandoned_delay = self.website_id and self.website_id.cart_abandoned_delay or 1.0
abandoned_datetime = fields.Datetime.to_string(datetime.utcnow() - relativedelta(hours=abandoned_delay))
abandoned_domain = expression.normalize_domain([
('date_order', '<=', abandoned_datetime),
('team_id.team_type', '=', 'website'),
('state', '=', 'draft'),
('partner_id', '!=', self.env.ref('base.public_partner').id),
('order_line', '!=', False)
])
# is_abandoned domain possibilities
if (operator not in expression.NEGATIVE_TERM_OPERATORS and value) or (operator in expression.NEGATIVE_TERM_OPERATORS and not value):
return abandoned_domain
return expression.distribute_not(['!'] + abandoned_domain) # negative domain
@api.multi
def _cart_find_product_line(self, product_id=None, line_id=None, **kwargs):
"""Find the cart line matching the given parameters.
If a product_id is given, the line will match the product only if the
line also has the same special attributes: `no_variant` attributes and
`is_custom` values.
"""
self.ensure_one()
product = self.env['product.product'].browse(product_id)
# split lines with the same product if it has untracked attributes
if product and (product.product_tmpl_id.has_dynamic_attributes() or product.product_tmpl_id._has_no_variant_attributes()) and not line_id:
return self.env['sale.order.line']
domain = [('order_id', '=', self.id), ('product_id', '=', product_id)]
if line_id:
domain += [('id', '=', line_id)]
else:
domain += [('product_custom_attribute_value_ids', '=', False)]
lines = self.env['sale.order.line'].sudo().search(domain)
if line_id:
return lines
linked_line_id = kwargs.get('linked_line_id', False)
optional_product_ids = set(kwargs.get('optional_product_ids', []))
lines = lines.filtered(lambda line: line.linked_line_id.id == linked_line_id)
if optional_product_ids:
# only match the lines with the same chosen optional products on the existing lines
lines = lines.filtered(lambda line: optional_product_ids == set(line.mapped('option_line_ids.product_id.id')))
else:
lines = lines.filtered(lambda line: not line.option_line_ids)
return lines
@api.multi
def _website_product_id_change(self, order_id, product_id, qty=0):
order = self.sudo().browse(order_id)
product_context = dict(self.env.context)
product_context.setdefault('lang', order.partner_id.lang)
product_context.update({
'partner': order.partner_id,
'quantity': qty,
'date': order.date_order,
'pricelist': order.pricelist_id.id,
'force_company': order.company_id.id,
})
product = self.env['product.product'].with_context(product_context).browse(product_id)
discount = 0
if order.pricelist_id.discount_policy == 'without_discount':
# This part is pretty much a copy-paste of the method '_onchange_discount' of
# 'sale.order.line'.
price, rule_id = order.pricelist_id.with_context(product_context).get_product_price_rule(product, qty or 1.0, order.partner_id)
pu, currency = request.env['sale.order.line'].with_context(product_context)._get_real_price_currency(product, rule_id, qty, product.uom_id, order.pricelist_id.id)
if pu != 0:
if order.pricelist_id.currency_id != currency:
# we need new_list_price in the same currency as price, which is in the SO's pricelist's currency
date = order.date_order or fields.Date.today()
pu = currency._convert(pu, order.pricelist_id.currency_id, order.company_id, date)
discount = (pu - price) / pu * 100
if discount < 0:
# In case the discount is negative, we don't want to show it to the customer,
# but we still want to use the price defined on the pricelist
discount = 0
pu = price
else:
pu = product.price
if order.pricelist_id and order.partner_id:
order_line = order._cart_find_product_line(product.id)
if order_line:
pu = self.env['account.tax']._fix_tax_included_price_company(pu, product.taxes_id, order_line[0].tax_id, self.company_id)
return {
'product_id': product_id,
'product_uom_qty': qty,
'order_id': order_id,
'product_uom': product.uom_id.id,
'price_unit': pu,
'discount': discount,
}
@api.multi
def _get_line_description(self, order_id, product_id, no_variant_attribute_values=None, custom_values=None):
"""Deprecated, use `get_sale_order_line_multiline_description_sale`"""
order = self.sudo().browse(order_id)
product_context = dict(self.env.context)
product_context.setdefault('lang', order.partner_id.lang)
product = self.env['product.product'].with_context(product_context).browse(product_id)
name = product.display_name
if product.description_sale:
name += '\n%s' % (product.description_sale)
if no_variant_attribute_values:
name += ''.join(['\n%s: %s' % (attribute_value['attribute_name'], attribute_value['attribute_value_name'])
for attribute_value in no_variant_attribute_values])
if custom_values:
name += ''.join(['\n%s: %s' % (custom_value['attribute_value_name'], custom_value['custom_value']) for custom_value in custom_values])
return name
@api.multi
def _cart_update(self, product_id=None, line_id=None, add_qty=0, set_qty=0, **kwargs):
""" Add or set product quantity, add_qty can be negative """
self.ensure_one()
product_context = dict(self.env.context)
product_context.setdefault('lang', self.sudo().partner_id.lang)
SaleOrderLineSudo = self.env['sale.order.line'].sudo().with_context(product_context)
try:
if add_qty:
add_qty = int(add_qty)
except ValueError:
add_qty = 1
try:
if set_qty:
set_qty = int(set_qty)
except ValueError:
set_qty = 0
quantity = 0
order_line = False
if self.state != 'draft':
request.session['sale_order_id'] = None
raise UserError(_('It is forbidden to modify a sales order which is not in draft status.'))
if line_id is not False:
order_line = self._cart_find_product_line(product_id, line_id, **kwargs)[:1]
# Create line if no line with product_id can be located
if not order_line:
# change lang to get correct name of attributes/values
product = self.env['product.product'].with_context(product_context).browse(int(product_id))
if not product:
raise UserError(_("The given product does not exist therefore it cannot be added to cart."))
no_variant_attribute_values = kwargs.get('no_variant_attribute_values') or []
received_no_variant_values = product.env['product.template.attribute.value'].browse([int(ptav['value']) for ptav in no_variant_attribute_values])
received_combination = product.product_template_attribute_value_ids | received_no_variant_values
product_template = product.product_tmpl_id
# handle all cases where incorrect or incomplete data are received
combination = product_template._get_closest_possible_combination(received_combination)
# get or create (if dynamic) the correct variant
product = product_template._create_product_variant(combination)
if not product:
raise UserError(_("The given combination does not exist therefore it cannot be added to cart."))
product_id = product.id
values = self._website_product_id_change(self.id, product_id, qty=1)
# add no_variant attributes that were not received
for ptav in combination.filtered(lambda ptav: ptav.attribute_id.create_variant == 'no_variant' and ptav not in received_no_variant_values):
no_variant_attribute_values.append({
'value': ptav.id,
'attribute_name': ptav.attribute_id.name,
'attribute_value_name': ptav.name,
})
# save no_variant attributes values
if no_variant_attribute_values:
values['product_no_variant_attribute_value_ids'] = [
(6, 0, [int(attribute['value']) for attribute in no_variant_attribute_values])
]
# add is_custom attribute values that were not received
custom_values = kwargs.get('product_custom_attribute_values') or []
received_custom_values = product.env['product.attribute.value'].browse([int(ptav['attribute_value_id']) for ptav in custom_values])
for ptav in combination.filtered(lambda ptav: ptav.is_custom and ptav.product_attribute_value_id not in received_custom_values):
custom_values.append({
'attribute_value_id': ptav.product_attribute_value_id.id,
'attribute_value_name': ptav.name,
'custom_value': '',
})
# save is_custom attributes values
if custom_values:
values['product_custom_attribute_value_ids'] = [(0, 0, {
'attribute_value_id': custom_value['attribute_value_id'],
'custom_value': custom_value['custom_value']
}) for custom_value in custom_values]
# create the line
order_line = SaleOrderLineSudo.create(values)
# Generate the description with everything. This is done after
# creating because the following related fields have to be set:
# - product_no_variant_attribute_value_ids
# - product_custom_attribute_value_ids
order_line.name = order_line.get_sale_order_line_multiline_description_sale(product)
try:
order_line._compute_tax_id()
except ValidationError as e:
# The validation may occur in backend (eg: taxcloud) but should fail silently in frontend
_logger.debug("ValidationError occurs during tax compute. %s" % (e))
if add_qty:
add_qty -= 1
# compute new quantity
if set_qty:
quantity = set_qty
elif add_qty is not None:
quantity = order_line.product_uom_qty + (add_qty or 0)
# Remove zero of negative lines
if quantity <= 0:
order_line.unlink()
else:
# update line
no_variant_attributes_price_extra = [ptav.price_extra for ptav in order_line.product_no_variant_attribute_value_ids]
values = self.with_context(no_variant_attributes_price_extra=no_variant_attributes_price_extra)._website_product_id_change(self.id, product_id, qty=quantity)
if self.pricelist_id.discount_policy == 'with_discount' and not self.env.context.get('fixed_price'):
order = self.sudo().browse(self.id)
product_context.update({
'partner': order.partner_id,
'quantity': quantity,
'date': order.date_order,
'pricelist': order.pricelist_id.id,
'force_company': order.company_id.id,
})
product = self.env['product.product'].with_context(product_context).browse(product_id)
values['price_unit'] = self.env['account.tax']._fix_tax_included_price_company(
order_line._get_display_price(product),
order_line.product_id.taxes_id,
order_line.tax_id,
self.company_id
)
order_line.write(values)
# link a product to the sales order
if kwargs.get('linked_line_id'):
linked_line = SaleOrderLineSudo.browse(kwargs['linked_line_id'])
order_line.write({
'linked_line_id': linked_line.id,
'name': order_line.name + "\n" + _("Option for:") + ' ' + linked_line.product_id.display_name,
})
linked_line.write({"name": linked_line.name + "\n" + _("Option:") + ' ' + order_line.product_id.display_name})
option_lines = self.order_line.filtered(lambda l: l.linked_line_id.id == order_line.id)
for option_line_id in option_lines:
self._cart_update(option_line_id.product_id.id, option_line_id.id, add_qty, set_qty, **kwargs)
return {'line_id': order_line.id, 'quantity': quantity, 'option_ids': list(set(option_lines.ids))}
def _cart_accessories(self):
""" Suggest accessories based on 'Accessory Products' of products in cart """
for order in self:
products = order.website_order_line.mapped('product_id')
accessory_products = self.env['product.product']
for line in order.website_order_line.filtered(lambda l: l.product_id):
combination = line.product_id.product_template_attribute_value_ids + line.product_no_variant_attribute_value_ids
accessory_products |= line.product_id.accessory_product_ids.filtered(lambda product:
product.website_published and
product not in products and
product._is_variant_possible(parent_combination=combination)
)
return random.sample(accessory_products, len(accessory_products))
@api.multi
def action_recovery_email_send(self):
for order in self:
order._portal_ensure_token()
composer_form_view_id = self.env.ref('mail.email_compose_message_wizard_form').id
template_id = self._get_cart_recovery_template().id
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'view_id': composer_form_view_id,
'target': 'new',
'context': {
'default_composition_mode': 'mass_mail' if len(self.ids) > 1 else 'comment',
'default_res_id': self.ids[0],
'default_model': 'sale.order',
'default_use_template': bool(template_id),
'default_template_id': template_id,
'website_sale_send_recovery_email': True,
'active_ids': self.ids,
},
}
@api.multi
def _get_cart_recovery_template(self):
"""
Return the cart recovery template record for a set of orders.
If they all belong to the same website, we return the website-specific template;
otherwise we return the default template.
If the default is not found, the empty ['mail.template'] is returned.
"""
websites = self.mapped('website_id')
template = websites.cart_recovery_mail_template_id if len(websites) == 1 else False
template = template or self.env.ref('website_sale.mail_template_sale_cart_recovery', raise_if_not_found=False)
return template or self.env['mail.template']
@api.multi
def _cart_recovery_email_send(self):
"""Send the cart recovery email on the current recordset,
making sure that the portal token exists to avoid broken links, and marking the email as sent.
Similar method to action_recovery_email_send, made to be called in automated actions.
Contrary to the former, it will use the website-specific template for each order."""
sent_orders = self.env['sale.order']
for order in self:
template = order._get_cart_recovery_template()
if template:
order._portal_ensure_token()
template.send_mail(order.id)
sent_orders |= order
sent_orders.write({'cart_recovery_email_sent': True})
@api.multi
def get_base_url(self):
"""When using multi-website, we want the user to be redirected to the
most appropriate website if possible."""
res = super(SaleOrder, self).get_base_url()
return self.website_id and self.website_id._get_http_domain() or res
class SaleOrderLine(models.Model):
_inherit = "sale.order.line"
name_short = fields.Char(compute="_compute_name_short")
linked_line_id = fields.Many2one('sale.order.line', string='Linked Order Line', domain="[('order_id', '!=', order_id)]", ondelete='cascade')
option_line_ids = fields.One2many('sale.order.line', 'linked_line_id', string='Options Linked')
@api.multi
@api.depends('product_id.display_name')
def _compute_name_short(self):
""" Compute a short name for this sale order line, to be used on the website where we don't have much space.
To keep it short, instead of using the first line of the description, we take the product name without the internal reference.
"""
for record in self:
record.name_short = record.product_id.with_context(display_default_code=False).display_name
def get_description_following_lines(self):
return self.name.splitlines()[1:]
| agpl-3.0 |
titanium-forks/GeraudBourdin.Ti.AndroidSvgView | iphone/build.py | 2 | 8522 | #!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['architectures', 'name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
docdir = os.path.join(cwd,'..','documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.geraudbourdin.svgview.js')
if not os.path.exists(js_file):
js_file = os.path.join(cwd,'..','assets','com.geraudbourdin.svgview.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','ComGeraudbourdinSvgviewModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
c = open(license_file).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if manifest[key].strip() == '': die("manifest key '%s' missing required value" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[],includeJSFiles=False):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] == '.pyc': continue
if not includeJSFiles and len(e) == 2 and e[1] == '.js': continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def verify_build_arch(manifest, config):
binaryname = 'lib%s.a' % manifest['moduleid']
binarypath = os.path.join('build', binaryname)
manifestarch = set(manifest['architectures'].split(' '))
output = subprocess.check_output('xcrun lipo -info %s' % binarypath, shell=True)
builtarch = set(output.split(':')[-1].strip().split(' '))
if ('arm64' not in builtarch):
warn('built module is missing 64-bit support.')
if (manifestarch != builtarch):
warn('there is discrepancy between the architectures specified in module manifest and compiled binary.')
warn('architectures in manifest: %s' % ', '.join(manifestarch))
warn('compiled binary architectures: %s' % ', '.join(builtarch))
die('please update manifest to match module binary architectures.')
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
p = os.path.join(cwd, 'assets')
if not os.path.exists(p):
p = os.path.join(cwd, '..', 'assets')
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,'assets'),['README'])
for dn in ('example','platform'):
p = os.path.join(cwd, dn)
if not os.path.exists(p):
p = os.path.join(cwd, '..', dn)
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,dn),['README'],True)
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
zf.write(license_file,'%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
verify_build_arch(manifest, config)
package_module(manifest,mf,config)
sys.exit(0)
| mit |
kevinyu98/spark | python/pyspark/ml/tests/test_base.py | 21 | 2791 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.sql.types import DoubleType, IntegerType
from pyspark.testing.mlutils import MockDataset, MockEstimator, MockUnaryTransformer, \
SparkSessionTestCase
class UnaryTransformerTests(SparkSessionTestCase):
def test_unary_transformer_validate_input_type(self):
shiftVal = 3
transformer = MockUnaryTransformer(shiftVal=shiftVal) \
.setInputCol("input").setOutputCol("output")
# should not raise any errors
transformer.validateInputType(DoubleType())
with self.assertRaises(TypeError):
# passing the wrong input type should raise an error
transformer.validateInputType(IntegerType())
def test_unary_transformer_transform(self):
shiftVal = 3
transformer = MockUnaryTransformer(shiftVal=shiftVal) \
.setInputCol("input").setOutputCol("output")
df = self.spark.range(0, 10).toDF('input')
df = df.withColumn("input", df.input.cast(dataType="double"))
transformed_df = transformer.transform(df)
results = transformed_df.select("input", "output").collect()
for res in results:
self.assertEqual(res.input + shiftVal, res.output)
class EstimatorTest(unittest.TestCase):
def testDefaultFitMultiple(self):
N = 4
data = MockDataset()
estimator = MockEstimator()
params = [{estimator.fake: i} for i in range(N)]
modelIter = estimator.fitMultiple(data, params)
indexList = []
for index, model in modelIter:
self.assertEqual(model.getFake(), index)
indexList.append(index)
self.assertEqual(sorted(indexList), list(range(N)))
if __name__ == "__main__":
from pyspark.ml.tests.test_base import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
BehavioralInsightsTeam/edx-platform | cms/djangoapps/contentstore/tests/test_i18n.py | 22 | 10115 | """
Tests for validate Internationalization and Module i18n service.
"""
import gettext
from unittest import skip
import mock
from django.contrib.auth.models import User
from django.utils import translation
from django.utils.translation import get_language
from contentstore.tests.utils import AjaxEnabledTestClient
from contentstore.views.preview import _preview_module_system
from xmodule.modulestore.django import ModuleI18nService
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
class FakeTranslations(ModuleI18nService):
"""A test GNUTranslations class that takes a map of msg -> translations."""
def __init__(self, translations): # pylint: disable=super-init-not-called
self.translations = translations
def ugettext(self, msgid):
"""
Mock override for ugettext translation operation
"""
return self.translations.get(msgid, msgid)
@staticmethod
def translator(locales_map): # pylint: disable=method-hidden
"""Build mock translator for the given locales.
Returns a mock gettext.translation function that uses
individual TestTranslations to translate in the given locales.
:param locales_map: A map from locale name to a translations map.
{
'es': {'Hi': 'Hola', 'Bye': 'Adios'},
'zh': {'Hi': 'Ni Hao', 'Bye': 'Zaijian'}
}
"""
def _translation(domain, localedir=None, languages=None): # pylint: disable=unused-argument
"""
return gettext.translation for given language
"""
if languages:
language = languages[0]
if language in locales_map:
return FakeTranslations(locales_map[language])
return gettext.NullTranslations()
return _translation
class TestModuleI18nService(ModuleStoreTestCase):
""" Test ModuleI18nService """
def setUp(self):
""" Setting up tests """
super(TestModuleI18nService, self).setUp()
self.test_language = 'dummy language'
self.request = mock.Mock()
self.course = CourseFactory.create()
self.field_data = mock.Mock()
self.descriptor = ItemFactory(category="pure", parent=self.course)
self.runtime = _preview_module_system(
self.request,
self.descriptor,
self.field_data,
)
self.addCleanup(translation.deactivate)
def get_module_i18n_service(self, descriptor):
"""
return the module i18n service.
"""
i18n_service = self.runtime.service(descriptor, 'i18n')
self.assertIsNotNone(i18n_service)
self.assertIsInstance(i18n_service, ModuleI18nService)
return i18n_service
def test_django_service_translation_works(self):
"""
Test django translation service works fine.
"""
class wrap_ugettext_with_xyz(object): # pylint: disable=invalid-name
"""
A context manager function that just adds 'XYZ ' to the front
of all strings of the module ugettext function.
"""
def __init__(self, module):
self.module = module
self.old_ugettext = module.ugettext
def __enter__(self):
def new_ugettext(*args, **kwargs):
""" custom function """
output = self.old_ugettext(*args, **kwargs)
return "XYZ " + output
self.module.ugettext = new_ugettext
def __exit__(self, _type, _value, _traceback):
self.module.ugettext = self.old_ugettext
i18n_service = self.get_module_i18n_service(self.descriptor)
# Activate french, so that if the fr files haven't been loaded, they will be loaded now.
with translation.override("fr"):
french_translation = translation.trans_real._active.value # pylint: disable=protected-access
# wrap the ugettext functions so that 'XYZ ' will prefix each translation
with wrap_ugettext_with_xyz(french_translation):
self.assertEqual(i18n_service.ugettext(self.test_language), 'XYZ dummy language')
# Check that the old ugettext has been put back into place
self.assertEqual(i18n_service.ugettext(self.test_language), 'dummy language')
@mock.patch('django.utils.translation.ugettext', mock.Mock(return_value='XYZ-TEST-LANGUAGE'))
def test_django_translator_in_use_with_empty_block(self):
"""
Test: Django default translator should in use if we have an empty block
"""
i18n_service = ModuleI18nService(None)
self.assertEqual(i18n_service.ugettext(self.test_language), 'XYZ-TEST-LANGUAGE')
@mock.patch('django.utils.translation.ugettext', mock.Mock(return_value='XYZ-TEST-LANGUAGE'))
def test_message_catalog_translations(self):
"""
Test: Message catalog from FakeTranslation should return required translations.
"""
_translator = FakeTranslations.translator(
{
'es': {'Hello': 'es-hello-world'},
'fr': {'Hello': 'fr-hello-world'},
},
)
localedir = '/translations'
translation.activate("es")
with mock.patch('gettext.translation', return_value=_translator(domain='text', localedir=localedir,
languages=[get_language()])):
i18n_service = self.get_module_i18n_service(self.descriptor)
self.assertEqual(i18n_service.ugettext('Hello'), 'es-hello-world')
translation.activate("ar")
with mock.patch('gettext.translation', return_value=_translator(domain='text', localedir=localedir,
languages=[get_language()])):
i18n_service = self.get_module_i18n_service(self.descriptor)
self.assertEqual(i18n_service.ugettext('Hello'), 'Hello')
self.assertNotEqual(i18n_service.ugettext('Hello'), 'fr-hello-world')
self.assertNotEqual(i18n_service.ugettext('Hello'), 'es-hello-world')
translation.activate("fr")
with mock.patch('gettext.translation', return_value=_translator(domain='text', localedir=localedir,
languages=[get_language()])):
i18n_service = self.get_module_i18n_service(self.descriptor)
self.assertEqual(i18n_service.ugettext('Hello'), 'fr-hello-world')
def test_i18n_service_callable(self):
"""
Test: i18n service should be callable in studio.
"""
self.assertTrue(callable(self.runtime._services.get('i18n'))) # pylint: disable=protected-access
class InternationalizationTest(ModuleStoreTestCase):
"""
Tests to validate Internationalization.
"""
CREATE_USER = False
def setUp(self):
"""
These tests need a user in the DB so that the django Test Client
can log them in.
They inherit from the ModuleStoreTestCase class so that the mongodb collection
will be cleared out before each test case execution and deleted
afterwards.
"""
super(InternationalizationTest, self).setUp()
self.uname = 'testuser'
self.email = 'test+courses@edx.org'
self.password = 'foo'
# Create the use so we can log them in.
self.user = User.objects.create_user(self.uname, self.email, self.password)
# Note that we do not actually need to do anything
# for registration if we directly mark them active.
self.user.is_active = True
# Staff has access to view all courses
self.user.is_staff = True
self.user.save()
self.course_data = {
'org': 'MITx',
'number': '999',
'display_name': 'Robot Super Course',
}
def test_course_plain_english(self):
"""Test viewing the index page with no courses"""
self.client = AjaxEnabledTestClient()
self.client.login(username=self.uname, password=self.password)
resp = self.client.get_html('/home/')
self.assertContains(resp,
'<h1 class="page-header">Studio Home</h1>',
status_code=200,
html=True)
def test_course_explicit_english(self):
"""Test viewing the index page with no courses"""
self.client = AjaxEnabledTestClient()
self.client.login(username=self.uname, password=self.password)
resp = self.client.get_html(
'/home/',
{},
HTTP_ACCEPT_LANGUAGE='en',
)
self.assertContains(resp,
'<h1 class="page-header">Studio Home</h1>',
status_code=200,
html=True)
# ****
# NOTE:
# ****
#
# This test will break when we replace this fake 'test' language
# with actual Esperanto. This test will need to be updated with
# actual Esperanto at that time.
# Test temporarily disable since it depends on creation of dummy strings
@skip
def test_course_with_accents(self):
"""Test viewing the index page with no courses"""
self.client = AjaxEnabledTestClient()
self.client.login(username=self.uname, password=self.password)
resp = self.client.get_html(
'/home/',
{},
HTTP_ACCEPT_LANGUAGE='eo'
)
TEST_STRING = (
u'<h1 class="title-1">'
u'My \xc7\xf6\xfcrs\xe9s L#'
u'</h1>'
)
self.assertContains(resp,
TEST_STRING,
status_code=200,
html=True)
| agpl-3.0 |
dogukantufekci/easyfind | docs/conf.py | 1 | 7738 | # -*- coding: utf-8 -*-
#
# easyfind documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 17 11:46:20 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'easyfind'
copyright = u'2013, ChangeMyName'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'easyfinddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'easyfind.tex', u'easyfind Documentation',
u'ChangeToMyName', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'easyfind', u'easyfind Documentation',
[u'ChangeToMyName'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'easyfind', u'easyfind Documentation',
u'ChangeToMyName', 'easyfind', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit |
ondys/three.js | utils/exporters/blender/addons/io_three/exporter/api/texture.py | 97 | 3680 | from bpy import data, types
from .. import constants, logger
from .constants import IMAGE, MAG_FILTER, MIN_FILTER, MAPPING
from . import image
def _texture(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Texture):
texture = name
else:
texture = data.textures[name]
return func(texture, *args, **kwargs)
return inner
@_texture
def anisotropy(texture):
"""
:param texture:
:return: filter_size value
"""
logger.debug("texture.file_path(%s)", texture)
return texture.filter_size
@_texture
def file_name(texture):
"""
:param texture:
:return: file name
"""
logger.debug("texture.file_name(%s)", texture)
if texture.image:
return image.file_name(texture.image)
@_texture
def file_path(texture):
"""
:param texture:
:return: file path
"""
logger.debug("texture.file_path(%s)", texture)
if texture.image:
return image.file_path(texture.image)
@_texture
def image_node(texture):
"""
:param texture:
:return: texture's image node
"""
logger.debug("texture.image_node(%s)", texture)
return texture.image
@_texture
def mag_filter(texture):
"""
:param texture:
:return: THREE_mag_filter value
"""
logger.debug("texture.mag_filter(%s)", texture)
try:
val = texture.THREE_mag_filter
except AttributeError:
logger.debug("No THREE_mag_filter attribute found")
val = MAG_FILTER
return val
@_texture
def mapping(texture):
"""
:param texture:
:return: THREE_mapping value
"""
logger.debug("texture.mapping(%s)", texture)
try:
val = texture.THREE_mapping
except AttributeError:
logger.debug("No THREE_mapping attribute found")
val = MAPPING
return val
@_texture
def min_filter(texture):
"""
:param texture:
:return: THREE_min_filter value
"""
logger.debug("texture.min_filter(%s)", texture)
try:
val = texture.THREE_min_filter
except AttributeError:
logger.debug("No THREE_min_filter attribute found")
val = MIN_FILTER
return val
@_texture
def repeat(texture):
"""The repeat parameters of the texture node
:param texture:
:returns: repeat_x, and repeat_y values
"""
logger.debug("texture.repeat(%s)", texture)
return (texture.repeat_x, texture.repeat_y)
@_texture
def wrap(texture):
"""The wrapping parameters of the texture node
:param texture:
:returns: tuple of THREE compatible wrapping values
"""
logger.debug("texture.wrap(%s)", texture)
if(texture.extension == "REPEAT"):
wrapping = {
True: constants.WRAPPING.MIRROR,
False: constants.WRAPPING.REPEAT
}
return (wrapping[texture.use_mirror_x],
wrapping[texture.use_mirror_y])
# provide closest available three.js behavior.
# other possible values: "CLIP", "EXTEND", "CLIP_CUBE", "CHECKER",
# best match CLAMP behavior
else:
return (constants.WRAPPING.CLAMP, constants.WRAPPING.CLAMP);
def textures():
"""
:return: list of texture node names that are IMAGE
"""
logger.debug("texture.textures()")
for mat in data.materials:
if mat.users == 0:
continue
for slot in mat.texture_slots:
if (slot and slot.use and
slot.texture and slot.texture.type == IMAGE):
yield slot.texture.name
| mit |
jinshuai829/dpdp-2.2.0 | doc/guides/conf.py | 4 | 6178 | # BSD LICENSE
# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import subprocess
from docutils import nodes
from distutils.version import LooseVersion
from sphinx import __version__ as sphinx_version
from sphinx.highlighting import PygmentsBridge
from pygments.formatters.latex import LatexFormatter
project = 'Data Plane Development Kit'
if LooseVersion(sphinx_version) >= LooseVersion('1.3.1'):
html_theme = "sphinx_rtd_theme"
html_logo = '../logo/DPDK_logo_vertical_rev_small.png'
latex_logo = '../logo/DPDK_logo_horizontal_tag.png'
html_add_permalinks = ""
html_show_copyright = False
highlight_language = 'none'
version = subprocess.check_output(['make', '-sRrC', '../../', 'showversion']).decode('utf-8')
release = version
master_doc = 'index'
# Figures, tables and code-blocks automatically numbered if they have caption
numfig = True
latex_documents = [
('index',
'doc.tex',
'',
'',
'manual')
]
# Latex directives to be included directly in the latex/pdf docs.
latex_preamble = r"""
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{helvet}
\renewcommand{\familydefault}{\sfdefault}
\RecustomVerbatimEnvironment{Verbatim}{Verbatim}{xleftmargin=5mm}
"""
# Configuration for the latex/pdf docs.
latex_elements = {
'papersize': 'a4paper',
'pointsize': '11pt',
# remove blank pages
'classoptions': ',openany,oneside',
'babel': '\\usepackage[english]{babel}',
# customize Latex formatting
'preamble': latex_preamble
}
# Override the default Latex formatter in order to modify the
# code/verbatim blocks.
class CustomLatexFormatter(LatexFormatter):
def __init__(self, **options):
super(CustomLatexFormatter, self).__init__(**options)
# Use the second smallest font size for code/verbatim blocks.
self.verboptions = r'formatcom=\footnotesize'
# Replace the default latex formatter.
PygmentsBridge.latex_formatter = CustomLatexFormatter
######## :numref: fallback ########
# The following hook functions add some simple handling for the :numref:
# directive for Sphinx versions prior to 1.3.1. The functions replace the
# :numref: reference with a link to the target (for all Sphinx doc types).
# It doesn't try to label figures/tables.
def numref_role(reftype, rawtext, text, lineno, inliner):
"""
Add a Sphinx role to handle numref references. Note, we can't convert
the link here because the doctree isn't build and the target information
isn't available.
"""
# Add an identifier to distinguish numref from other references.
newnode = nodes.reference('',
'',
refuri='_local_numref_#%s' % text,
internal=True)
return [newnode], []
def process_numref(app, doctree, from_docname):
"""
Process the numref nodes once the doctree has been built and prior to
writing the files. The processing involves replacing the numref with a
link plus text to indicate if it is a Figure or Table link.
"""
# Iterate over the reference nodes in the doctree.
for node in doctree.traverse(nodes.reference):
target = node.get('refuri', '')
# Look for numref nodes.
if target.startswith('_local_numref_#'):
target = target.replace('_local_numref_#', '')
# Get the target label and link information from the Sphinx env.
data = app.builder.env.domains['std'].data
docname, label, _ = data['labels'].get(target, ('', '', ''))
relative_url = app.builder.get_relative_uri(from_docname, docname)
# Add a text label to the link.
if target.startswith('figure'):
caption = 'Figure'
elif target.startswith('table'):
caption = 'Table'
else:
caption = 'Link'
# New reference node with the updated link information.
newnode = nodes.reference('',
caption,
refuri='%s#%s' % (relative_url, label),
internal=True)
node.replace_self(newnode)
def setup(app):
if LooseVersion(sphinx_version) < LooseVersion('1.3.1'):
print('Upgrade sphinx to version >= 1.3.1 for '
'improved Figure/Table number handling.')
# Add a role to handle :numref: references.
app.add_role('numref', numref_role)
# Process the numref references once the doctree has been created.
app.connect('doctree-resolved', process_numref)
| gpl-2.0 |
joanma100/mieli | mieli/views.py | 1 | 2143 | from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponseServerError
from agora.api import agora_, user as agora_user # TODO <- this shouldn't be here
from django_tables2 import RequestConfig
from identity.tables import UserTable
from django.shortcuts import render, redirect
from django.conf import settings
from mieli.api import user
import cookies
@user_passes_test(lambda u:u.is_staff, login_url='auth_login')
def dashboard(request):
users = user.from_organization(request.organization, is_active=True, nexus=None)
users_table = UserTable(users)
RequestConfig(request).configure(users_table)
return render(request, 'mieli/dashboard.html', {
'users': users_table,
})
def featured(request):
return render(request, 'mieli/featured_election.html')
@login_required(login_url='auth_login')
def vote(request, path):
nexus = request.organization.main_nexus
n_agora = agora_.get_by_nexus(nexus)
booth = '%s/%s/election/%s/vote' % (settings.AGORA_ADMIN_USER, n_agora.agora, path)
try:
n_agora.link.head(booth)
except:
raise Http404('Vote does not exist.')
r = agora_user.login(n_agora.link, agora_user.get_agora_username(request.user))
if 'errors' in r:
return render(request, 'mieli/unauthorized_voter.html')
session = r['__session']
if session == None:
return HttpResponseServerError('Error while logging in you in the backend.')
c = cookies.Cookies()
c.parse_response(session)
if not settings.AGORA_BACKEND_COOKIE in c:
return HttpResponseServerError('Unexpected behavoir in backend.')
try:
abc = c[settings.AGORA_BACKEND_COOKIE]
except KeyError:
return HttpResponseServerError('Unexpected error getting backend cookie.')
response = redirect('%s/%s' % (n_agora.link.url, booth))
response.set_cookie(settings.AGORA_BACKEND_COOKIE, value=abc.value, max_age=abc.max_age, expires=abc.expires, path=abc.path, domain=abc.domain, secure=abc.secure, httponly=abc.httponly)
return response
| agpl-3.0 |
wikimedia/mediawiki-extensions-CirrusSearch | tests/load/send_some.py | 1 | 3918 | #!/usr/bin/env python
import calendar
import time
import sys
import random
from multiprocessing import Process, Queue
from queue import Full
from urllib.parse import unquote
from urllib.request import urlopen
def send_line(search, destination):
# Since requests come in with timestamp resolution we assume they came in
# at some random point in the second
time.sleep(random.uniform(0, 1))
start = time.time()
params = "fulltext=Search&srbackend=CirrusSearch"
url = "%s/%s?%s" % (destination, search, params)
urlopen(url)
print('Fetched ({:07.3f}) {}'.format(time.time() - start, url))
def hostname(wiki):
wiki = wiki.split(":")[1]
if wiki == "commonswiki":
return "commons.wikimedia.org"
if wiki[2:] == "wiki":
return wiki[0:2] + ".wikipedia.org"
# Not perfect but decent
return wiki[0:2] + "." + wiki[2:] + ".org"
def send_lines(percent, jobs, destination):
queue = Queue(jobs) # Only allow a backlog of one per job
# Spawn jobs. Note that we just spawn them as daemon because we don't
# want to bother signaling them when the main process is done and we don't
# care if they die when it finishes either. In fact, we'd love for them
# to die immediately because we want to stop sending requests when the main
# process stops.
def work(queue):
while True:
try:
(hostname, search) = queue.get()
if "%s" in destination:
resolved_destination = destination % hostname
else:
resolved_destination = destination
if hostname == "commons.wikimedia.org":
search = "File:" + search
send_line(search, resolved_destination)
except (KeyboardInterrupt, SystemExit):
break
except:
continue
for i in range(jobs):
p = Process(target=work, args=(queue,))
p.daemon = True
p.start()
# Got to read stdin line by line even on old pythons....
line = sys.stdin.readline()
target_lag = None
while line:
if random.uniform(0, 100) > percent:
line = sys.stdin.readline()
continue
s = line.strip().split("\t")
target_time = calendar.timegm(
time.strptime(s[1][:-1] + "UTC", "%Y-%m-%dT%H:%M:%S%Z"))
lag = time.time() - target_time
if target_lag is None:
target_lag = time.time() - target_time
wait_time = target_lag - lag
if wait_time >= 0:
print('Sleeping {} to stay {} ahead of the logged time.'
.format(wait_time, target_lag))
time.sleep(wait_time)
try:
queue.put((hostname(s[2]), unquote(s[3])), False)
except Full:
print("Couldn't keep up so dropping the request")
line = sys.stdin.readline()
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] destination")
parser.add_option("-p", dest="percent", type="int", default=1, metavar="N",
help="send this percent of search results")
parser.add_option("-j", "--jobs", type="int", default=1, metavar="JOBS",
help="number of processes used to send searches")
parser.add_option("-d", "--destination", dest="destination", type="string",
metavar="DESTINATION",
default="http://127.0.0.1:8080/wiki/Special:Search",
help="Where to send the searches. Add %s as hostname " +
"to send to hostname based the log line.")
(options, args) = parser.parse_args()
try:
send_lines(options.percent, options.jobs, options.destination)
except KeyboardInterrupt:
pass # This is how we expect to exit anyway
| gpl-2.0 |
adamjmcgrath/glancydesign | src/django-nonrel/django/contrib/gis/tests/geoapp/test_regress.py | 55 | 1987 | from datetime import datetime
from django.contrib.gis.tests.utils import no_mysql, no_oracle, no_postgis, no_spatialite
from django.contrib.gis.shortcuts import render_to_kmz
from django.test import TestCase
from models import City, PennsylvaniaCity
class GeoRegressionTests(TestCase):
def test01_update(self):
"Testing GeoQuerySet.update(), see #10411."
pnt = City.objects.get(name='Pueblo').point
bak = pnt.clone()
pnt.y += 0.005
pnt.x += 0.005
City.objects.filter(name='Pueblo').update(point=pnt)
self.assertEqual(pnt, City.objects.get(name='Pueblo').point)
City.objects.filter(name='Pueblo').update(point=bak)
self.assertEqual(bak, City.objects.get(name='Pueblo').point)
def test02_kmz(self):
"Testing `render_to_kmz` with non-ASCII data, see #11624."
name = '\xc3\x85land Islands'.decode('iso-8859-1')
places = [{'name' : name,
'description' : name,
'kml' : '<Point><coordinates>5.0,23.0</coordinates></Point>'
}]
kmz = render_to_kmz('gis/kml/placemarks.kml', {'places' : places})
@no_spatialite
@no_mysql
def test03_extent(self):
"Testing `extent` on a table with a single point, see #11827."
pnt = City.objects.get(name='Pueblo').point
ref_ext = (pnt.x, pnt.y, pnt.x, pnt.y)
extent = City.objects.filter(name='Pueblo').extent()
for ref_val, val in zip(ref_ext, extent):
self.assertAlmostEqual(ref_val, val, 4)
def test04_unicode_date(self):
"Testing dates are converted properly, even on SpatiaLite, see #16408."
founded = datetime(1857, 5, 23)
mansfield = PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)',
founded=founded)
self.assertEqual(founded, PennsylvaniaCity.objects.dates('founded', 'day')[0])
| bsd-3-clause |
rrooij/youtube-dl | youtube_dl/extractor/disney.py | 29 | 7044 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
compat_str,
determine_ext,
ExtractorError,
update_url_query,
)
class DisneyIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?P<domain>(?:[^/]+\.)?(?:disney\.[a-z]{2,3}(?:\.[a-z]{2})?|disney(?:(?:me|latino)\.com|turkiye\.com\.tr|channel\.de)|(?:starwars|marvelkids)\.com))/(?:(?:embed/|(?:[^/]+/)+[\w-]+-)(?P<id>[a-z0-9]{24})|(?:[^/]+/)?(?P<display_id>[^/?#]+))'''
_TESTS = [{
# Disney.EmbedVideo
'url': 'http://video.disney.com/watch/moana-trailer-545ed1857afee5a0ec239977',
'info_dict': {
'id': '545ed1857afee5a0ec239977',
'ext': 'mp4',
'title': 'Moana - Trailer',
'description': 'A fun adventure for the entire Family! Bring home Moana on Digital HD Feb 21 & Blu-ray March 7',
'upload_date': '20170112',
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
# Grill.burger
'url': 'http://www.starwars.com/video/rogue-one-a-star-wars-story-intro-featurette',
'info_dict': {
'id': '5454e9f4e9804a552e3524c8',
'ext': 'mp4',
'title': '"Intro" Featurette: Rogue One: A Star Wars Story',
'upload_date': '20170104',
'description': 'Go behind-the-scenes of Rogue One: A Star Wars Story in this featurette with Director Gareth Edwards and the cast of the film.',
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
'url': 'http://videos.disneylatino.com/ver/spider-man-de-regreso-a-casa-primer-adelanto-543a33a1850bdcfcca13bae2',
'only_matching': True,
}, {
'url': 'http://video.en.disneyme.com/watch/future-worm/robo-carp-2001-544b66002aa7353cdd3f5114',
'only_matching': True,
}, {
'url': 'http://video.disneyturkiye.com.tr/izle/7c-7-cuceler/kimin-sesi-zaten-5456f3d015f6b36c8afdd0e2',
'only_matching': True,
}, {
'url': 'http://disneyjunior.disney.com/embed/546a4798ddba3d1612e4005d',
'only_matching': True,
}, {
'url': 'http://www.starwars.com/embed/54690d1e6c42e5f09a0fb097',
'only_matching': True,
}, {
'url': 'http://spiderman.marvelkids.com/embed/522900d2ced3c565e4cc0677',
'only_matching': True,
}, {
'url': 'http://spiderman.marvelkids.com/videos/contest-of-champions-part-four-clip-1',
'only_matching': True,
}, {
'url': 'http://disneyjunior.en.disneyme.com/dj/watch-my-friends-tigger-and-pooh-promo',
'only_matching': True,
}, {
'url': 'http://disneychannel.de/sehen/soy-luna-folge-118-5518518987ba27f3cc729268',
'only_matching': True,
}, {
'url': 'http://disneyjunior.disney.com/galactech-the-galactech-grab-galactech-an-admiral-rescue',
'only_matching': True,
}]
def _real_extract(self, url):
domain, video_id, display_id = re.match(self._VALID_URL, url).groups()
if not video_id:
webpage = self._download_webpage(url, display_id)
grill = re.sub(r'"\s*\+\s*"', '', self._search_regex(
r'Grill\.burger\s*=\s*({.+})\s*:',
webpage, 'grill data'))
page_data = next(s for s in self._parse_json(grill, display_id)['stack'] if s.get('type') == 'video')
video_data = page_data['data'][0]
else:
webpage = self._download_webpage(
'http://%s/embed/%s' % (domain, video_id), video_id)
page_data = self._parse_json(self._search_regex(
r'Disney\.EmbedVideo\s*=\s*({.+});',
webpage, 'embed data'), video_id)
video_data = page_data['video']
for external in video_data.get('externals', []):
if external.get('source') == 'vevo':
return self.url_result('vevo:' + external['data_id'], 'Vevo')
video_id = video_data['id']
title = video_data['title']
formats = []
for flavor in video_data.get('flavors', []):
flavor_format = flavor.get('format')
flavor_url = flavor.get('url')
if not flavor_url or not re.match(r'https?://', flavor_url) or flavor_format == 'mp4_access':
continue
tbr = int_or_none(flavor.get('bitrate'))
if tbr == 99999:
# wrong ks(Kaltura Signature) causes 404 Error
flavor_url = update_url_query(flavor_url, {'ks': ''})
m3u8_formats = self._extract_m3u8_formats(
flavor_url, video_id, 'mp4',
m3u8_id=flavor_format, fatal=False)
for f in m3u8_formats:
# Apple FairPlay
if '/fpshls/' in f['url']:
continue
formats.append(f)
continue
format_id = []
if flavor_format:
format_id.append(flavor_format)
if tbr:
format_id.append(compat_str(tbr))
ext = determine_ext(flavor_url)
if flavor_format == 'applehttp' or ext == 'm3u8':
ext = 'mp4'
width = int_or_none(flavor.get('width'))
height = int_or_none(flavor.get('height'))
formats.append({
'format_id': '-'.join(format_id),
'url': flavor_url,
'width': width,
'height': height,
'tbr': tbr,
'ext': ext,
'vcodec': 'none' if (width == 0 and height == 0) else None,
})
if not formats and video_data.get('expired'):
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, page_data['translations']['video_expired']),
expected=True)
self._sort_formats(formats)
subtitles = {}
for caption in video_data.get('captions', []):
caption_url = caption.get('url')
caption_format = caption.get('format')
if not caption_url or caption_format.startswith('unknown'):
continue
subtitles.setdefault(caption.get('language', 'en'), []).append({
'url': caption_url,
'ext': {
'webvtt': 'vtt',
}.get(caption_format, caption_format),
})
return {
'id': video_id,
'title': title,
'description': video_data.get('description') or video_data.get('short_desc'),
'thumbnail': video_data.get('thumb') or video_data.get('thumb_secure'),
'duration': int_or_none(video_data.get('duration_sec')),
'upload_date': unified_strdate(video_data.get('publish_date')),
'formats': formats,
'subtitles': subtitles,
}
| unlicense |
snasoft/QtCreatorPluginsPack | Bin/3rdParty/vera/bin/lib/test/test_xrange.py | 36 | 5209 | # Python test set -- built-in functions
import test.test_support, unittest
import sys
import pickle
import itertools
import warnings
warnings.filterwarnings("ignore", "integer argument expected",
DeprecationWarning, "unittest")
# pure Python implementations (3 args only), for comparison
def pyrange(start, stop, step):
if (start - stop) // step < 0:
# replace stop with next element in the sequence of integers
# that are congruent to start modulo step.
stop += (start - stop) % step
while start != stop:
yield start
start += step
def pyrange_reversed(start, stop, step):
stop += (start - stop) % step
return pyrange(stop - step, start - step, -step)
class XrangeTest(unittest.TestCase):
def assert_iterators_equal(self, xs, ys, test_id, limit=None):
# check that an iterator xs matches the expected results ys,
# up to a given limit.
if limit is not None:
xs = itertools.islice(xs, limit)
ys = itertools.islice(ys, limit)
sentinel = object()
pairs = itertools.izip_longest(xs, ys, fillvalue=sentinel)
for i, (x, y) in enumerate(pairs):
if x == y:
continue
elif x == sentinel:
self.fail('{}: iterator ended unexpectedly '
'at position {}; expected {}'.format(test_id, i, y))
elif y == sentinel:
self.fail('{}: unexpected excess element {} at '
'position {}'.format(test_id, x, i))
else:
self.fail('{}: wrong element at position {};'
'expected {}, got {}'.format(test_id, i, y, x))
def test_xrange(self):
self.assertEqual(list(xrange(3)), [0, 1, 2])
self.assertEqual(list(xrange(1, 5)), [1, 2, 3, 4])
self.assertEqual(list(xrange(0)), [])
self.assertEqual(list(xrange(-3)), [])
self.assertEqual(list(xrange(1, 10, 3)), [1, 4, 7])
self.assertEqual(list(xrange(5, -5, -3)), [5, 2, -1, -4])
a = 10
b = 100
c = 50
self.assertEqual(list(xrange(a, a+2)), [a, a+1])
self.assertEqual(list(xrange(a+2, a, -1L)), [a+2, a+1])
self.assertEqual(list(xrange(a+4, a, -2)), [a+4, a+2])
seq = list(xrange(a, b, c))
self.assertIn(a, seq)
self.assertNotIn(b, seq)
self.assertEqual(len(seq), 2)
seq = list(xrange(b, a, -c))
self.assertIn(b, seq)
self.assertNotIn(a, seq)
self.assertEqual(len(seq), 2)
seq = list(xrange(-a, -b, -c))
self.assertIn(-a, seq)
self.assertNotIn(-b, seq)
self.assertEqual(len(seq), 2)
self.assertRaises(TypeError, xrange)
self.assertRaises(TypeError, xrange, 1, 2, 3, 4)
self.assertRaises(ValueError, xrange, 1, 2, 0)
self.assertRaises(OverflowError, xrange, 10**100, 10**101, 10**101)
self.assertRaises(TypeError, xrange, 0, "spam")
self.assertRaises(TypeError, xrange, 0, 42, "spam")
self.assertEqual(len(xrange(0, sys.maxint, sys.maxint-1)), 2)
self.assertRaises(OverflowError, xrange, -sys.maxint, sys.maxint)
self.assertRaises(OverflowError, xrange, 0, 2*sys.maxint)
r = xrange(-sys.maxint, sys.maxint, 2)
self.assertEqual(len(r), sys.maxint)
self.assertRaises(OverflowError, xrange, -sys.maxint-1, sys.maxint, 2)
def test_pickling(self):
testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1),
(13, 21, 3), (-2, 2, 2)]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for t in testcases:
r = xrange(*t)
self.assertEqual(list(pickle.loads(pickle.dumps(r, proto))),
list(r))
def test_range_iterators(self):
# see issue 7298
limits = [base + jiggle
for M in (2**32, 2**64)
for base in (-M, -M//2, 0, M//2, M)
for jiggle in (-2, -1, 0, 1, 2)]
test_ranges = [(start, end, step)
for start in limits
for end in limits
for step in (-2**63, -2**31, -2, -1, 1, 2)]
for start, end, step in test_ranges:
try:
iter1 = xrange(start, end, step)
except OverflowError:
pass
else:
iter2 = pyrange(start, end, step)
test_id = "xrange({}, {}, {})".format(start, end, step)
# check first 100 entries
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
try:
iter1 = reversed(xrange(start, end, step))
except OverflowError:
pass
else:
iter2 = pyrange_reversed(start, end, step)
test_id = "reversed(xrange({}, {}, {}))".format(start, end, step)
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
def test_main():
test.test_support.run_unittest(XrangeTest)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
abenzbiria/clients_odoo | addons/hw_scale/__init__.py | 1894 | 1075 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
quoclieu/codebrew17-starving | env/lib/python3.5/site-packages/werkzeug/contrib/lint.py | 131 | 12490 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.lint
~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module provides a middleware that performs sanity checks of the WSGI
application. It checks that :pep:`333` is properly implemented and warns
on some common HTTP errors such as non-empty responses for 304 status
codes.
This module provides a middleware, the :class:`LintMiddleware`. Wrap your
application with it and it will warn about common problems with WSGI and
HTTP while your application is running.
It's strongly recommended to use it during development.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from urlparse import urlparse
from warnings import warn
from werkzeug.datastructures import Headers
from werkzeug.http import is_entity_header
from werkzeug.wsgi import FileWrapper
from werkzeug._compat import string_types
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
class HTTPWarning(Warning):
"""Warning class for HTTP warnings."""
def check_string(context, obj, stacklevel=3):
if type(obj) is not str:
warn(WSGIWarning('%s requires bytestrings, got %s' %
(context, obj.__class__.__name__)))
class InputStream(object):
def __init__(self, stream):
self._stream = stream
def read(self, *args):
if len(args) == 0:
warn(WSGIWarning('wsgi does not guarantee an EOF marker on the '
'input stream, thus making calls to '
'wsgi.input.read() unsafe. Conforming servers '
'may never return from this call.'),
stacklevel=2)
elif len(args) != 1:
warn(WSGIWarning('too many parameters passed to wsgi.input.read()'),
stacklevel=2)
return self._stream.read(*args)
def readline(self, *args):
if len(args) == 0:
warn(WSGIWarning('Calls to wsgi.input.readline() without arguments'
' are unsafe. Use wsgi.input.read() instead.'),
stacklevel=2)
elif len(args) == 1:
warn(WSGIWarning('wsgi.input.readline() was called with a size hint. '
'WSGI does not support this, although it\'s available '
'on all major servers.'),
stacklevel=2)
else:
raise TypeError('too many arguments passed to wsgi.input.readline()')
return self._stream.readline(*args)
def __iter__(self):
try:
return iter(self._stream)
except TypeError:
warn(WSGIWarning('wsgi.input is not iterable.'), stacklevel=2)
return iter(())
def close(self):
warn(WSGIWarning('application closed the input stream!'),
stacklevel=2)
self._stream.close()
class ErrorStream(object):
def __init__(self, stream):
self._stream = stream
def write(self, s):
check_string('wsgi.error.write()', s)
self._stream.write(s)
def flush(self):
self._stream.flush()
def writelines(self, seq):
for line in seq:
self.write(seq)
def close(self):
warn(WSGIWarning('application closed the error stream!'),
stacklevel=2)
self._stream.close()
class GuardedWrite(object):
def __init__(self, write, chunks):
self._write = write
self._chunks = chunks
def __call__(self, s):
check_string('write()', s)
self._write.write(s)
self._chunks.append(len(s))
class GuardedIterator(object):
def __init__(self, iterator, headers_set, chunks):
self._iterator = iterator
self._next = iter(iterator).next
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self):
return self
def next(self):
if self.closed:
warn(WSGIWarning('iterated over closed app_iter'),
stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(WSGIWarning('Application returned before it '
'started the response'), stacklevel=2)
check_string('application iterator items', rv)
self.chunks.append(len(rv))
return rv
def close(self):
self.closed = True
if hasattr(self._iterator, 'close'):
self._iterator.close()
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get('content-length', type=int)
if status_code == 304:
for key, value in headers:
key = key.lower()
if key not in ('expires', 'content-location') and \
is_entity_header(key):
warn(HTTPWarning('entity header %r found in 304 '
'response' % key))
if bytes_sent:
warn(HTTPWarning('304 responses must not have a body'))
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(HTTPWarning('%r responses must have an empty '
'content length') % status_code)
if bytes_sent:
warn(HTTPWarning('%r responses must not have a body' %
status_code))
elif content_length is not None and content_length != bytes_sent:
warn(WSGIWarning('Content-Length and the number of bytes '
'sent to the client do not match.'))
def __del__(self):
if not self.closed:
try:
warn(WSGIWarning('Iterator was garbage collected before '
'it was closed.'))
except Exception:
pass
class LintMiddleware(object):
"""This middleware wraps an application and warns on common errors.
Among other thing it currently checks for the following problems:
- invalid status codes
- non-bytestrings sent to the WSGI server
- strings returned from the WSGI application
- non-empty conditional responses
- unquoted etags
- relative URLs in the Location header
- unsafe calls to wsgi.input
- unclosed iterators
Detected errors are emitted using the standard Python :mod:`warnings`
system and usually end up on :data:`stderr`.
::
from werkzeug.contrib.lint import LintMiddleware
app = LintMiddleware(app)
:param app: the application to wrap
"""
def __init__(self, app):
self.app = app
def check_environ(self, environ):
if type(environ) is not dict:
warn(WSGIWarning('WSGI environment is not a standard python dict.'),
stacklevel=4)
for key in ('REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once'):
if key not in environ:
warn(WSGIWarning('required environment key %r not found'
% key), stacklevel=3)
if environ['wsgi.version'] != (1, 0):
warn(WSGIWarning('environ is not a WSGI 1.0 environ'),
stacklevel=3)
script_name = environ.get('SCRIPT_NAME', '')
if script_name and script_name[:1] != '/':
warn(WSGIWarning('SCRIPT_NAME does not start with a slash: %r'
% script_name), stacklevel=3)
path_info = environ.get('PATH_INFO', '')
if path_info[:1] != '/':
warn(WSGIWarning('PATH_INFO does not start with a slash: %r'
% path_info), stacklevel=3)
def check_start_response(self, status, headers, exc_info):
check_string('status', status)
status_code = status.split(None, 1)[0]
if len(status_code) != 3 or not status_code.isdigit():
warn(WSGIWarning('Status code must be three digits'), stacklevel=3)
if len(status) < 4 or status[3] != ' ':
warn(WSGIWarning('Invalid value for status %r. Valid '
'status strings are three digits, a space '
'and a status explanation'), stacklevel=3)
status_code = int(status_code)
if status_code < 100:
warn(WSGIWarning('status code < 100 detected'), stacklevel=3)
if type(headers) is not list:
warn(WSGIWarning('header list is not a list'), stacklevel=3)
for item in headers:
if type(item) is not tuple or len(item) != 2:
warn(WSGIWarning('Headers must tuple 2-item tuples'),
stacklevel=3)
name, value = item
if type(name) is not str or type(value) is not str:
warn(WSGIWarning('header items must be strings'),
stacklevel=3)
if name.lower() == 'status':
warn(WSGIWarning('The status header is not supported due to '
'conflicts with the CGI spec.'),
stacklevel=3)
if exc_info is not None and not isinstance(exc_info, tuple):
warn(WSGIWarning('invalid value for exc_info'), stacklevel=3)
headers = Headers(headers)
self.check_headers(headers)
return status_code, headers
def check_headers(self, headers):
etag = headers.get('etag')
if etag is not None:
if etag.startswith(('W/', 'w/')):
if etag.startswith('w/'):
warn(HTTPWarning('weak etag indicator should be upcase.'),
stacklevel=4)
etag = etag[2:]
if not (etag[:1] == etag[-1:] == '"'):
warn(HTTPWarning('unquoted etag emitted.'), stacklevel=4)
location = headers.get('location')
if location is not None:
if not urlparse(location).netloc:
warn(HTTPWarning('absolute URLs required for location header'),
stacklevel=4)
def check_iterator(self, app_iter):
if isinstance(app_iter, string_types):
warn(WSGIWarning('application returned string. Response will '
'send character for character to the client '
'which will kill the performance. Return a '
'list or iterable instead.'), stacklevel=3)
def __call__(self, *args, **kwargs):
if len(args) != 2:
warn(WSGIWarning('Two arguments to WSGI app required'), stacklevel=2)
if kwargs:
warn(WSGIWarning('No keyword arguments to WSGI app allowed'),
stacklevel=2)
environ, start_response = args
self.check_environ(environ)
environ['wsgi.input'] = InputStream(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorStream(environ['wsgi.errors'])
# hook our own file wrapper in so that applications will always
# iterate to the end and we can check the content length
environ['wsgi.file_wrapper'] = FileWrapper
headers_set = []
chunks = []
def checking_start_response(*args, **kwargs):
if len(args) not in (2, 3):
warn(WSGIWarning('Invalid number of arguments: %s, expected '
'2 or 3' % len(args), stacklevel=2))
if kwargs:
warn(WSGIWarning('no keyword arguments allowed.'))
status, headers = args[:2]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
headers_set[:] = self.check_start_response(status, headers,
exc_info)
return GuardedWrite(start_response(status, headers, exc_info),
chunks)
app_iter = self.app(environ, checking_start_response)
self.check_iterator(app_iter)
return GuardedIterator(app_iter, headers_set, chunks)
| mit |
DMLoy/ECommerceBasic | lib/python2.7/site-packages/django/contrib/admindocs/tests/__init__.py | 100 | 1095 | from __future__ import absolute_import, unicode_literals
from django.contrib.admindocs import views
from django.db.models import fields as builtin_fields
from django.utils import unittest
from django.utils.translation import ugettext as _
from . import fields
class TestFieldType(unittest.TestCase):
def setUp(self):
pass
def test_field_name(self):
self.assertRaises(AttributeError,
views.get_readable_field_data_type, "NotAField"
)
def test_builtin_fields(self):
self.assertEqual(
views.get_readable_field_data_type(builtin_fields.BooleanField()),
_('Boolean (Either True or False)')
)
def test_custom_fields(self):
self.assertEqual(
views.get_readable_field_data_type(fields.CustomField()),
'A custom field type'
)
self.assertEqual(
views.get_readable_field_data_type(fields.DescriptionLackingField()),
_('Field of type: %(field_type)s') % {
'field_type': 'DescriptionLackingField'
}
)
| mit |
DamCB/tyssue | tyssue/draw/plt_draw.py | 2 | 17946 | """
Matplotlib based plotting
"""
import shutil
import glob
import tempfile
import subprocess
import warnings
import pathlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.path import Path
from matplotlib.patches import FancyArrow, Arc, PathPatch
from matplotlib.collections import PatchCollection, PolyCollection, LineCollection
from ..config.draw import sheet_spec
from ..utils.utils import spec_updater, get_sub_eptm
COORDS = ["x", "y"]
def create_gif(
history,
output,
num_frames=None,
interval=None,
draw_func=None,
margin=5,
**draw_kwds,
):
"""Creates an animated gif of the recorded history.
You need imagemagick on your system for this function to work.
Parameters
----------
history : a :class:`tyssue.History` object
output : path to the output gif file
num_frames : int, the number of frames in the gif
interval : tuples, define begin and end frame of the gif
draw_func : a drawing function
this function must take a `sheet` object as first argument
and return a `fig, ax` pair. Defaults to quick_edge_draw
(aka sheet_view with quick mode)
margin : int, the graph margins in percents, default 5
if margin is -1, let the draw function decide
**draw_kwds are passed to the drawing function
"""
if draw_func is None:
draw_func = sheet_view
draw_kwds.update({"mode": "quick"})
time_stamps = history.time_stamps
if num_frames is not None:
times = np.linspace(time_stamps[0], time_stamps[-1], num_frames)
elif interval is not None:
times = time_stamps[interval[0] : interval[1] + 1]
num_frames = len(times)
else:
raise ValueError("Need to define `num_frames` or `interval` parameters.")
graph_dir = pathlib.Path(tempfile.mkdtemp())
x, y = coords = draw_kwds.get("coords", history.sheet.coords[:2])
sheet0 = history.retrieve(0)
bounds = sheet0.vert_df[coords].describe().loc[["min", "max"]]
delta = (bounds.loc["max"] - bounds.loc["min"]).max()
margin = delta * margin / 100
xlim = bounds.loc["min", x] - margin, bounds.loc["max", x] + margin
ylim = bounds.loc["min", y] - margin, bounds.loc["max", y] + margin
if len(history) < num_frames:
for i, (t_, sheet) in enumerate(history):
fig, ax = draw_func(sheet, **draw_kwds)
if isinstance(ax, plt.Axes) and margin >= 0:
ax.set(xlim=xlim, ylim=ylim)
fig.savefig(graph_dir / f"sheet_{i:03d}")
plt.close(fig)
figs = glob.glob((graph_dir / "sheet_*.png").as_posix())
figs.sort()
for i, t in enumerate(times):
index = np.where(time_stamps >= t)[0][0]
fig = figs[index]
shutil.copy(fig, graph_dir / f"movie_{i:04d}.png")
else:
for i, t in enumerate(times):
sheet = history.retrieve(t)
try:
fig, ax = draw_func(sheet, **draw_kwds)
except Exception as e:
print("Droped frame {i}")
if isinstance(ax, plt.Axes) and margin >= 0:
ax.set(xlim=xlim, ylim=ylim)
fig.savefig(graph_dir / f"movie_{i:04d}.png")
plt.close(fig)
try:
proc = subprocess.run(
["convert", (graph_dir / "movie_*.png").as_posix(), output]
)
except Exception as e:
print(
"Converting didn't work, make sure imagemagick is available on your system"
)
raise e
finally:
shutil.rmtree(graph_dir)
def sheet_view(sheet, coords=COORDS, ax=None, **draw_specs_kw):
"""Base view function, parametrizable
through draw_secs
The default sheet_spec specification is:
{'edge': {
'visible': True,
'width': 0.5,
'head_width': 0.2, # arrow head width for the edges
'length_includes_head': True, # see matplotlib Arrow artist doc
'shape': 'right',
'color': '#2b5d0a', # can be an array
'alpha': 0.8,
'zorder': 1,
'colormap': 'viridis'},
'vert': {
'visible': True,
's': 100,
'color': '#000a4b',
'alpha': 0.3,
'zorder': 2},
'face': {
'visible': False,
'color': '#8aa678',
'alpha': 1.0,
'zorder': -1}
}
"""
draw_specs = sheet_spec()
spec_updater(draw_specs, draw_specs_kw)
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
vert_spec = draw_specs["vert"]
if vert_spec["visible"]:
ax = draw_vert(sheet, coords, ax, **vert_spec)
edge_spec = draw_specs["edge"]
if edge_spec["visible"]:
ax = draw_edge(sheet, coords, ax, **edge_spec)
face_spec = draw_specs["face"]
if face_spec["visible"]:
ax = draw_face(sheet, coords, ax, **face_spec)
ax.autoscale()
ax.set_aspect("equal")
return fig, ax
def draw_face(sheet, coords, ax, **draw_spec_kw):
"""Draws epithelial sheet polygonal faces in matplotlib
Keyword values can be specified at the element
level as columns of the sheet.face_df
"""
draw_spec = sheet_spec()["face"]
draw_spec.update(**draw_spec_kw)
collection_specs = parse_face_specs(draw_spec, sheet)
if "visible" in sheet.face_df.columns:
edges = sheet.edge_df[sheet.upcast_face(sheet.face_df["visible"])].index
if edges.shape[0]:
_sheet = get_sub_eptm(sheet, edges)
sheet = _sheet
color = collection_specs["facecolors"]
if isinstance(color, np.ndarray):
faces = sheet.face_df["face_o"].values.astype(np.uint32)
collection_specs["facecolors"] = color.take(faces, axis=0)
else:
warnings.warn("No face is visible")
if not sheet.is_ordered:
sheet_ = sheet.copy()
sheet_.reset_index(order=True)
polys = sheet_.face_polygons(coords)
else:
polys = sheet.face_polygons(coords)
p = PolyCollection(polys, closed=True, **collection_specs)
ax.add_collection(p)
return ax
def parse_face_specs(face_draw_specs, sheet):
collection_specs = {}
color = face_draw_specs.get("color")
if callable(color):
color = color(sheet)
face_draw_specs["color"] = color
if color is None:
return {}
elif isinstance(color, str):
collection_specs["facecolors"] = color
elif hasattr(color, "__len__"):
collection_specs["facecolors"] = _face_color_from_sequence(
face_draw_specs, sheet
)
if "alpha" in face_draw_specs:
collection_specs["alpha"] = face_draw_specs["alpha"]
return collection_specs
def _face_color_from_sequence(face_spec, sheet):
color_ = face_spec["color"]
cmap = cm.get_cmap(face_spec.get("colormap", "viridis"))
color_min, color_max = face_spec.get("color_range", (color_.min(), color_.max()))
if color_.shape in [(sheet.Nf, 3), (sheet.Nf, 4)]:
return color_
elif color_.shape == (sheet.Nf,):
if np.ptp(color_) < 1e-10:
warnings.warn("Attempting to draw a colormap " "with a uniform value")
return np.ones((sheet.Nf, 3)) * 0.5
normed = (color_ - color_min) / (color_max - color_min)
return cmap(normed)
else:
raise ValueError(
"shape of `face_spec['color']` must be either (Nf, 3), (Nf, 4) or (Nf,)"
)
def draw_vert(sheet, coords, ax, **draw_spec_kw):
"""Draw junction vertices in matplotlib."""
draw_spec = sheet_spec()["vert"]
draw_spec.update(**draw_spec_kw)
x, y = coords
if "z_coord" in sheet.vert_df.columns:
pos = sheet.vert_df.sort_values("z_coord")[coords]
else:
pos = sheet.vert_df[coords]
ax.scatter(pos[x], pos[y], **draw_spec_kw)
return ax
def draw_edge(sheet, coords, ax, **draw_spec_kw):
""""""
draw_spec = sheet_spec()["edge"]
draw_spec.update(**draw_spec_kw)
arrow_specs, collections_specs = _parse_edge_specs(draw_spec, sheet)
dx, dy = ("d" + c for c in coords)
sx, sy = ("s" + c for c in coords)
tx, ty = ("t" + c for c in coords)
if draw_spec.get("head_width"):
app_length = (
np.hypot(sheet.edge_df[dx], sheet.edge_df[dy]) * sheet.edge_df.length.mean()
)
patches = [
FancyArrow(*edge[[sx, sy, dx, dy]], **arrow_specs)
for idx, edge in sheet.edge_df[app_length > 1e-6].iterrows()
]
ax.add_collection(PatchCollection(patches, False, **collections_specs))
else:
segments = sheet.edge_df[[sx, sy, tx, ty]].to_numpy().reshape((-1, 2, 2))
ax.add_collection(LineCollection(segments, **collections_specs))
return ax
def _parse_edge_specs(edge_draw_specs, sheet):
arrow_keys = ["head_width", "length_includes_head", "shape"]
arrow_specs = {
key: val for key, val in edge_draw_specs.items() if key in arrow_keys
}
collection_specs = {}
if arrow_specs.get("head_width"): # draw arrows
color_key = "edgecolors"
else:
color_key = "colors"
if "color" in edge_draw_specs:
if callable(edge_draw_specs["color"]):
edge_draw_specs["color"] = edge_draw_specs["color"](sheet)
if isinstance(edge_draw_specs["color"], str):
collection_specs[color_key] = edge_draw_specs["color"]
elif hasattr(edge_draw_specs["color"], "__len__"):
collection_specs[color_key] = _wire_color_from_sequence(
edge_draw_specs, sheet
)
if "width" in edge_draw_specs:
collection_specs["linewidths"] = edge_draw_specs["width"]
if "alpha" in edge_draw_specs:
collection_specs["alpha"] = edge_draw_specs["alpha"]
return arrow_specs, collection_specs
def _wire_color_from_sequence(edge_spec, sheet):
""""""
color_ = edge_spec["color"]
color_min, color_max = edge_spec.get("color_range", (color_.min(), color_.max()))
cmap = cm.get_cmap(edge_spec.get("colormap", "viridis"))
if color_.shape in [(sheet.Nv, 3), (sheet.Nv, 4)]:
return (sheet.upcast_srce(color_) + sheet.upcast_trgt(color_)) / 2
elif color_.shape == (sheet.Nv,):
if np.ptp(color_) < 1e-10:
warnings.warn("Attempting to draw a colormap " "with a uniform value")
return np.ones((sheet.Ne, 3)) * 0.7
if not hasattr(color_, "index"):
color_ = pd.Series(color_, index=sheet.vert_df.index)
color_ = (sheet.upcast_srce(color_) + sheet.upcast_trgt(color_)) / 2
return cmap((color_ - color_min) / (color_max - color_min))
elif color_.shape in [(sheet.Ne, 3), (sheet.Ne, 4)]:
return color_
elif color_.shape == (sheet.Ne,):
if np.ptp(color_) < 1e-10:
warnings.warn("Attempting to draw a colormap " "with a uniform value")
return np.ones((sheet.Nv, 3)) * 0.7
return cmap((color_ - color_min) / (color_max - color_min))
def quick_edge_draw(sheet, coords=["x", "y"], ax=None, **draw_spec_kw):
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
lines_x, lines_y = _get_lines(sheet, coords)
ax.plot(lines_x, lines_y, **draw_spec_kw)
ax.set_aspect("equal")
return fig, ax
def _get_lines(sheet, coords):
lines_x, lines_y = np.zeros(2 * sheet.Ne), np.zeros(2 * sheet.Ne)
scoords = ["s" + c for c in coords]
tcoords = ["t" + c for c in coords]
if set(scoords + tcoords).issubset(sheet.edge_df.columns):
srce_x, srce_y = sheet.edge_df[scoords].values.T
trgt_x, trgt_y = sheet.edge_df[tcoords].values.T
else:
srce_x, srce_y = sheet.upcast_srce(sheet.vert_df[coords]).values.T
trgt_x, trgt_y = sheet.upcast_trgt(sheet.vert_df[coords]).values.T
lines_x[::2] = srce_x
lines_x[1::2] = trgt_x
lines_y[::2] = srce_y
lines_y[1::2] = trgt_y
# Trick from https://github.com/matplotlib/
# matplotlib/blob/master/lib/matplotlib/tri/triplot.py#L65
lines_x = np.insert(lines_x, slice(None, None, 2), np.nan)
lines_y = np.insert(lines_y, slice(None, None, 2), np.nan)
return lines_x, lines_y
def plot_forces(
sheet, geom, model, coords, scaling, ax=None, approx_grad=None, **draw_specs_kw
):
"""Plot the net forces at each vertex, with their amplitudes multiplied
by `scaling`. To be clear, this is the oposite of the gradient - grad E.
"""
draw_specs = sheet_spec()
spec_updater(draw_specs, draw_specs_kw)
gcoords = ["g" + c for c in coords]
if approx_grad is not None:
app_grad = approx_grad(sheet, geom, model)
grad_i = (
pd.DataFrame(
index=sheet.vert_df[sheet.vert_df.is_active.astype(bool)].index,
data=app_grad.reshape((-1, len(sheet.coords))),
columns=["g" + c for c in sheet.coords],
)
* scaling
)
else:
grad_i = model.compute_gradient(sheet, components=False) * scaling
grad_i = grad_i.loc[sheet.vert_df["is_active"].astype(bool)]
sheet.vert_df[gcoords]=-grad_i[gcoords] # F = -grad E
if 'extract' in draw_specs:
sheet = sheet.extract_bounding_box(**draw_specs['extract'])
if ax is None:
fig, ax = quick_edge_draw(sheet, coords)
else:
fig = ax.get_figure()
arrows = sheet.vert_df[coords+gcoords]
for _, arrow in arrows.iterrows():
ax.arrow(*arrow, **draw_specs["grad"])
return fig, ax
def plot_scaled_energies(sheet, geom, model, scales, ax=None):
"""Plot scaled energies
Parameters
----------
sheet: a:class: Sheet object
geom: a :class:`Geometry` class
model: a :class:'Model'
scales: np.linspace of float
Returns
-------
fig: a :class:matplotlib.figure.Figure instance
ax: :class:matplotlib.Axes instance, default None
"""
from ..utils import scaled_unscaled
def get_energies():
energies = np.array([e.mean() for e in model.compute_energy(sheet, True)])
return energies
energies = np.array(
[scaled_unscaled(get_energies, scale, sheet, geom) for scale in scales]
)
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
ax.plot(scales, energies.sum(axis=1), "k-", lw=4, alpha=0.3, label="total")
for e, label in zip(energies.T, model.labels):
ax.plot(scales, e, label=label)
ax.legend()
return fig, ax
def get_arc_data(sheet):
srce_pos = sheet.upcast_srce(sheet.vert_df[sheet.coords])
trgt_pos = sheet.upcast_trgt(sheet.vert_df[sheet.coords])
radius = 1 / sheet.edge_df["curvature"]
e_x = sheet.edge_df["dx"] / sheet.edge_df["length"]
e_y = sheet.edge_df["dy"] / sheet.edge_df["length"]
center_x = (srce_pos.x + trgt_pos.x) / 2 - e_y * (radius - sheet.edge_df["sagitta"])
center_y = (srce_pos.y + trgt_pos.y) / 2 - e_x * (radius - sheet.edge_df["sagitta"])
alpha = sheet.edge_df["arc_chord_angle"]
beta = sheet.edge_df["chord_orient"]
# Ok, I admit a fair amount of trial and
# error to get to the stuff below :-p
rot = beta - np.sign(alpha) * np.pi / 2
theta1 = (-alpha + rot) * np.sign(alpha)
theta2 = (alpha + rot) * np.sign(alpha)
center_data = pd.DataFrame.from_dict(
{
"radius": np.abs(radius),
"x": center_x,
"y": center_y,
"theta1": theta1,
"theta2": theta2,
}
)
return center_data
def curved_view(sheet, radius_cutoff=1e3):
center_data = get_arc_data(sheet)
fig, ax = sheet_view(sheet, **{"edge": {"visible": False}})
curves = []
for idx, edge in center_data.iterrows():
if edge["radius"] > radius_cutoff:
st = sheet.edge_df.loc[idx, ["srce", "trgt"]]
xy = sheet.vert_df.loc[st, sheet.coords]
patch = PathPatch(Path(xy))
else:
patch = Arc(
edge[["x", "y"]],
2 * edge["radius"],
2 * edge["radius"],
theta1=edge["theta1"] * 180 / np.pi,
theta2=edge["theta2"] * 180 / np.pi,
)
curves.append(patch)
ax.add_collection(PatchCollection(curves, False, **{"facecolors": "none"}))
ax.autoscale()
return fig, ax
def plot_junction(eptm, edge_index, coords=["x", "y"]):
"""Plots local graph around a junction, for debugging purposes."""
v10, v11 = eptm.edge_df.loc[edge_index, ["srce", "trgt"]]
fig, ax = plt.subplots()
ax.scatter(*eptm.vert_df.loc[[v10, v11], coords].values.T, marker="+", s=300)
v10_out = set(eptm.edge_df[eptm.edge_df["srce"] == v10]["trgt"]) - {v11}
v11_out = set(eptm.edge_df[eptm.edge_df["srce"] == v11]["trgt"]) - {v10}
verts = v10_out.union(v11_out)
ax.scatter(*eptm.vert_df.loc[v10_out, coords].values.T)
ax.scatter(*eptm.vert_df.loc[v11_out, coords].values.T)
for _, edge in eptm.edge_df.query(f"srce == {v10}").iterrows():
ax.plot(
edge[["s" + coords[0], "t" + coords[0]]],
edge[["s" + coords[1], "t" + coords[1]]],
lw=3,
alpha=0.3,
c="r",
)
for _, edge in eptm.edge_df.query(f"srce == {v11}").iterrows():
ax.plot(
edge[["s" + coords[0], "t" + coords[0]]],
edge[["s" + coords[1], "t" + coords[1]]],
"k--",
)
for v in verts:
for _, edge in eptm.edge_df.query(f"srce == {v}").iterrows():
if edge["trgt"] in {v10, v11}:
continue
ax.plot(
edge[["s" + coords[0], "t" + coords[0]]],
edge[["s" + coords[1], "t" + coords[1]]],
"k",
lw=0.4,
)
fig.set_size_inches(12, 12)
return fig, ax
| gpl-3.0 |
Lh4cKg/sl4a | python/python-twitter/twitter_test.py | 90 | 23640 | #!/usr/bin/python2.4
# -*- coding: utf-8 -*-#
#
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unit tests for the twitter.py library'''
__author__ = 'dewitt@google.com'
import os
import simplejson
import time
import calendar
import unittest
import twitter
class StatusTest(unittest.TestCase):
SAMPLE_JSON = '''{"created_at": "Fri Jan 26 23:17:14 +0000 2007", "id": 4391023, "text": "A l\u00e9gp\u00e1rn\u00e1s haj\u00f3m tele van angoln\u00e1kkal.", "user": {"description": "Canvas. JC Penny. Three ninety-eight.", "id": 718443, "location": "Okinawa, Japan", "name": "Kesuke Miyagi", "profile_image_url": "http://twitter.com/system/user/profile_image/718443/normal/kesuke.png", "screen_name": "kesuke", "url": "http://twitter.com/kesuke"}}'''
def _GetSampleUser(self):
return twitter.User(id=718443,
name='Kesuke Miyagi',
screen_name='kesuke',
description=u'Canvas. JC Penny. Three ninety-eight.',
location='Okinawa, Japan',
url='http://twitter.com/kesuke',
profile_image_url='http://twitter.com/system/user/pro'
'file_image/718443/normal/kesuke.pn'
'g')
def _GetSampleStatus(self):
return twitter.Status(created_at='Fri Jan 26 23:17:14 +0000 2007',
id=4391023,
text=u'A légpárnás hajóm tele van angolnákkal.',
user=self._GetSampleUser())
def testInit(self):
'''Test the twitter.Status constructor'''
status = twitter.Status(created_at='Fri Jan 26 23:17:14 +0000 2007',
id=4391023,
text=u'A légpárnás hajóm tele van angolnákkal.',
user=self._GetSampleUser())
def testGettersAndSetters(self):
'''Test all of the twitter.Status getters and setters'''
status = twitter.Status()
status.SetId(4391023)
self.assertEqual(4391023, status.GetId())
created_at = calendar.timegm((2007, 1, 26, 23, 17, 14, -1, -1, -1))
status.SetCreatedAt('Fri Jan 26 23:17:14 +0000 2007')
self.assertEqual('Fri Jan 26 23:17:14 +0000 2007', status.GetCreatedAt())
self.assertEqual(created_at, status.GetCreatedAtInSeconds())
status.SetNow(created_at + 10)
self.assertEqual("about 10 seconds ago", status.GetRelativeCreatedAt())
status.SetText(u'A légpárnás hajóm tele van angolnákkal.')
self.assertEqual(u'A légpárnás hajóm tele van angolnákkal.',
status.GetText())
status.SetUser(self._GetSampleUser())
self.assertEqual(718443, status.GetUser().id)
def testProperties(self):
'''Test all of the twitter.Status properties'''
status = twitter.Status()
status.id = 1
self.assertEqual(1, status.id)
created_at = calendar.timegm((2007, 1, 26, 23, 17, 14, -1, -1, -1))
status.created_at = 'Fri Jan 26 23:17:14 +0000 2007'
self.assertEqual('Fri Jan 26 23:17:14 +0000 2007', status.created_at)
self.assertEqual(created_at, status.created_at_in_seconds)
status.now = created_at + 10
self.assertEqual('about 10 seconds ago', status.relative_created_at)
status.user = self._GetSampleUser()
self.assertEqual(718443, status.user.id)
def _ParseDate(self, string):
return calendar.timegm(time.strptime(string, '%b %d %H:%M:%S %Y'))
def testRelativeCreatedAt(self):
'''Test various permutations of Status relative_created_at'''
status = twitter.Status(created_at='Fri Jan 01 12:00:00 +0000 2007')
status.now = self._ParseDate('Jan 01 12:00:00 2007')
self.assertEqual('about a second ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:00:01 2007')
self.assertEqual('about a second ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:00:02 2007')
self.assertEqual('about 2 seconds ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:00:05 2007')
self.assertEqual('about 5 seconds ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:00:50 2007')
self.assertEqual('about a minute ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:01:00 2007')
self.assertEqual('about a minute ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:01:10 2007')
self.assertEqual('about a minute ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:02:00 2007')
self.assertEqual('about 2 minutes ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:31:50 2007')
self.assertEqual('about 31 minutes ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 12:50:00 2007')
self.assertEqual('about an hour ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 13:00:00 2007')
self.assertEqual('about an hour ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 13:10:00 2007')
self.assertEqual('about an hour ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 14:00:00 2007')
self.assertEqual('about 2 hours ago', status.relative_created_at)
status.now = self._ParseDate('Jan 01 19:00:00 2007')
self.assertEqual('about 7 hours ago', status.relative_created_at)
status.now = self._ParseDate('Jan 02 11:30:00 2007')
self.assertEqual('about a day ago', status.relative_created_at)
status.now = self._ParseDate('Jan 04 12:00:00 2007')
self.assertEqual('about 3 days ago', status.relative_created_at)
status.now = self._ParseDate('Feb 04 12:00:00 2007')
self.assertEqual('about 34 days ago', status.relative_created_at)
def testAsJsonString(self):
'''Test the twitter.Status AsJsonString method'''
self.assertEqual(StatusTest.SAMPLE_JSON,
self._GetSampleStatus().AsJsonString())
def testAsDict(self):
'''Test the twitter.Status AsDict method'''
status = self._GetSampleStatus()
data = status.AsDict()
self.assertEqual(4391023, data['id'])
self.assertEqual('Fri Jan 26 23:17:14 +0000 2007', data['created_at'])
self.assertEqual(u'A légpárnás hajóm tele van angolnákkal.', data['text'])
self.assertEqual(718443, data['user']['id'])
def testEq(self):
'''Test the twitter.Status __eq__ method'''
status = twitter.Status()
status.created_at = 'Fri Jan 26 23:17:14 +0000 2007'
status.id = 4391023
status.text = u'A légpárnás hajóm tele van angolnákkal.'
status.user = self._GetSampleUser()
self.assertEqual(status, self._GetSampleStatus())
def testNewFromJsonDict(self):
'''Test the twitter.Status NewFromJsonDict method'''
data = simplejson.loads(StatusTest.SAMPLE_JSON)
status = twitter.Status.NewFromJsonDict(data)
self.assertEqual(self._GetSampleStatus(), status)
class UserTest(unittest.TestCase):
SAMPLE_JSON = '''{"description": "Indeterminate things", "id": 673483, "location": "San Francisco, CA", "name": "DeWitt", "profile_image_url": "http://twitter.com/system/user/profile_image/673483/normal/me.jpg", "screen_name": "dewitt", "status": {"created_at": "Fri Jan 26 17:28:19 +0000 2007", "id": 4212713, "text": "\\"Select all\\" and archive your Gmail inbox. The page loads so much faster!"}, "url": "http://unto.net/"}'''
def _GetSampleStatus(self):
return twitter.Status(created_at='Fri Jan 26 17:28:19 +0000 2007',
id=4212713,
text='"Select all" and archive your Gmail inbox. '
' The page loads so much faster!')
def _GetSampleUser(self):
return twitter.User(id=673483,
name='DeWitt',
screen_name='dewitt',
description=u'Indeterminate things',
location='San Francisco, CA',
url='http://unto.net/',
profile_image_url='http://twitter.com/system/user/prof'
'ile_image/673483/normal/me.jpg',
status=self._GetSampleStatus())
def testInit(self):
'''Test the twitter.User constructor'''
user = twitter.User(id=673483,
name='DeWitt',
screen_name='dewitt',
description=u'Indeterminate things',
url='http://twitter.com/dewitt',
profile_image_url='http://twitter.com/system/user/prof'
'ile_image/673483/normal/me.jpg',
status=self._GetSampleStatus())
def testGettersAndSetters(self):
'''Test all of the twitter.User getters and setters'''
user = twitter.User()
user.SetId(673483)
self.assertEqual(673483, user.GetId())
user.SetName('DeWitt')
self.assertEqual('DeWitt', user.GetName())
user.SetScreenName('dewitt')
self.assertEqual('dewitt', user.GetScreenName())
user.SetDescription('Indeterminate things')
self.assertEqual('Indeterminate things', user.GetDescription())
user.SetLocation('San Francisco, CA')
self.assertEqual('San Francisco, CA', user.GetLocation())
user.SetProfileImageUrl('http://twitter.com/system/user/profile_im'
'age/673483/normal/me.jpg')
self.assertEqual('http://twitter.com/system/user/profile_image/673'
'483/normal/me.jpg', user.GetProfileImageUrl())
user.SetStatus(self._GetSampleStatus())
self.assertEqual(4212713, user.GetStatus().id)
def testProperties(self):
'''Test all of the twitter.User properties'''
user = twitter.User()
user.id = 673483
self.assertEqual(673483, user.id)
user.name = 'DeWitt'
self.assertEqual('DeWitt', user.name)
user.screen_name = 'dewitt'
self.assertEqual('dewitt', user.screen_name)
user.description = 'Indeterminate things'
self.assertEqual('Indeterminate things', user.description)
user.location = 'San Francisco, CA'
self.assertEqual('San Francisco, CA', user.location)
user.profile_image_url = 'http://twitter.com/system/user/profile_i' \
'mage/673483/normal/me.jpg'
self.assertEqual('http://twitter.com/system/user/profile_image/6734'
'83/normal/me.jpg', user.profile_image_url)
self.status = self._GetSampleStatus()
self.assertEqual(4212713, self.status.id)
def testAsJsonString(self):
'''Test the twitter.User AsJsonString method'''
self.assertEqual(UserTest.SAMPLE_JSON,
self._GetSampleUser().AsJsonString())
def testAsDict(self):
'''Test the twitter.User AsDict method'''
user = self._GetSampleUser()
data = user.AsDict()
self.assertEqual(673483, data['id'])
self.assertEqual('DeWitt', data['name'])
self.assertEqual('dewitt', data['screen_name'])
self.assertEqual('Indeterminate things', data['description'])
self.assertEqual('San Francisco, CA', data['location'])
self.assertEqual('http://twitter.com/system/user/profile_image/6734'
'83/normal/me.jpg', data['profile_image_url'])
self.assertEqual('http://unto.net/', data['url'])
self.assertEqual(4212713, data['status']['id'])
def testEq(self):
'''Test the twitter.User __eq__ method'''
user = twitter.User()
user.id = 673483
user.name = 'DeWitt'
user.screen_name = 'dewitt'
user.description = 'Indeterminate things'
user.location = 'San Francisco, CA'
user.profile_image_url = 'http://twitter.com/system/user/profile_image/67' \
'3483/normal/me.jpg'
user.url = 'http://unto.net/'
user.status = self._GetSampleStatus()
self.assertEqual(user, self._GetSampleUser())
def testNewFromJsonDict(self):
'''Test the twitter.User NewFromJsonDict method'''
data = simplejson.loads(UserTest.SAMPLE_JSON)
user = twitter.User.NewFromJsonDict(data)
self.assertEqual(self._GetSampleUser(), user)
class FileCacheTest(unittest.TestCase):
def testInit(self):
"""Test the twitter._FileCache constructor"""
cache = twitter._FileCache()
self.assert_(cache is not None, 'cache is None')
def testSet(self):
"""Test the twitter._FileCache.Set method"""
cache = twitter._FileCache()
cache.Set("foo",'Hello World!')
cache.Remove("foo")
def testRemove(self):
"""Test the twitter._FileCache.Remove method"""
cache = twitter._FileCache()
cache.Set("foo",'Hello World!')
cache.Remove("foo")
data = cache.Get("foo")
self.assertEqual(data, None, 'data is not None')
def testGet(self):
"""Test the twitter._FileCache.Get method"""
cache = twitter._FileCache()
cache.Set("foo",'Hello World!')
data = cache.Get("foo")
self.assertEqual('Hello World!', data)
cache.Remove("foo")
def testGetCachedTime(self):
"""Test the twitter._FileCache.GetCachedTime method"""
now = time.time()
cache = twitter._FileCache()
cache.Set("foo",'Hello World!')
cached_time = cache.GetCachedTime("foo")
delta = cached_time - now
self.assert_(delta <= 1,
'Cached time differs from clock time by more than 1 second.')
cache.Remove("foo")
class ApiTest(unittest.TestCase):
def setUp(self):
self._urllib = MockUrllib()
api = twitter.Api(username='test', password='test', cache=None)
api.SetUrllib(self._urllib)
self._api = api
def testTwitterError(self):
'''Test that twitter responses containing an error message are wrapped.'''
self._AddHandler('http://twitter.com/statuses/public_timeline.json',
curry(self._OpenTestData, 'public_timeline_error.json'))
# Manually try/catch so we can check the exception's value
try:
statuses = self._api.GetPublicTimeline()
except twitter.TwitterError, error:
# If the error message matches, the test passes
self.assertEqual('test error', error.message)
else:
self.fail('TwitterError expected')
def testGetPublicTimeline(self):
'''Test the twitter.Api GetPublicTimeline method'''
self._AddHandler('http://twitter.com/statuses/public_timeline.json?since_id=12345',
curry(self._OpenTestData, 'public_timeline.json'))
statuses = self._api.GetPublicTimeline(since_id=12345)
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(20, len(statuses))
self.assertEqual(89497702, statuses[0].id)
def testGetUserTimeline(self):
'''Test the twitter.Api GetUserTimeline method'''
self._AddHandler('http://twitter.com/statuses/user_timeline/kesuke.json?count=1',
curry(self._OpenTestData, 'user_timeline-kesuke.json'))
statuses = self._api.GetUserTimeline('kesuke', count=1)
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(89512102, statuses[0].id)
self.assertEqual(718443, statuses[0].user.id)
def testGetFriendsTimeline(self):
'''Test the twitter.Api GetFriendsTimeline method'''
self._AddHandler('http://twitter.com/statuses/friends_timeline/kesuke.json',
curry(self._OpenTestData, 'friends_timeline-kesuke.json'))
statuses = self._api.GetFriendsTimeline('kesuke')
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(20, len(statuses))
self.assertEqual(718443, statuses[0].user.id)
def testGetStatus(self):
'''Test the twitter.Api GetStatus method'''
self._AddHandler('http://twitter.com/statuses/show/89512102.json',
curry(self._OpenTestData, 'show-89512102.json'))
status = self._api.GetStatus(89512102)
self.assertEqual(89512102, status.id)
self.assertEqual(718443, status.user.id)
def testDestroyStatus(self):
'''Test the twitter.Api DestroyStatus method'''
self._AddHandler('http://twitter.com/statuses/destroy/103208352.json',
curry(self._OpenTestData, 'status-destroy.json'))
status = self._api.DestroyStatus(103208352)
self.assertEqual(103208352, status.id)
def testPostUpdate(self):
'''Test the twitter.Api PostUpdate method'''
self._AddHandler('http://twitter.com/statuses/update.json',
curry(self._OpenTestData, 'update.json'))
status = self._api.PostUpdate(u'Моё судно на воздушной подушке полно угрей')
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(u'Моё судно на воздушной подушке полно угрей', status.text)
def testGetReplies(self):
'''Test the twitter.Api GetReplies method'''
self._AddHandler('http://twitter.com/statuses/replies.json?page=1',
curry(self._OpenTestData, 'replies.json'))
statuses = self._api.GetReplies(page=1)
self.assertEqual(36657062, statuses[0].id)
def testGetFriends(self):
'''Test the twitter.Api GetFriends method'''
self._AddHandler('http://twitter.com/statuses/friends.json?page=1',
curry(self._OpenTestData, 'friends.json'))
users = self._api.GetFriends(page=1)
buzz = [u.status for u in users if u.screen_name == 'buzz']
self.assertEqual(89543882, buzz[0].id)
def testGetFollowers(self):
'''Test the twitter.Api GetFollowers method'''
self._AddHandler('http://twitter.com/statuses/followers.json?page=1',
curry(self._OpenTestData, 'followers.json'))
users = self._api.GetFollowers(page=1)
# This is rather arbitrary, but spot checking is better than nothing
alexkingorg = [u.status for u in users if u.screen_name == 'alexkingorg']
self.assertEqual(89554432, alexkingorg[0].id)
def testGetFeatured(self):
'''Test the twitter.Api GetFeatured method'''
self._AddHandler('http://twitter.com/statuses/featured.json',
curry(self._OpenTestData, 'featured.json'))
users = self._api.GetFeatured()
# This is rather arbitrary, but spot checking is better than nothing
stevenwright = [u.status for u in users if u.screen_name == 'stevenwright']
self.assertEqual(86991742, stevenwright[0].id)
def testGetDirectMessages(self):
'''Test the twitter.Api GetDirectMessages method'''
self._AddHandler('http://twitter.com/direct_messages.json?page=1',
curry(self._OpenTestData, 'direct_messages.json'))
statuses = self._api.GetDirectMessages(page=1)
self.assertEqual(u'A légpárnás hajóm tele van angolnákkal.', statuses[0].text)
def testPostDirectMessage(self):
'''Test the twitter.Api PostDirectMessage method'''
self._AddHandler('http://twitter.com/direct_messages/new.json',
curry(self._OpenTestData, 'direct_messages-new.json'))
status = self._api.PostDirectMessage('test', u'Моё судно на воздушной подушке полно угрей')
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(u'Моё судно на воздушной подушке полно угрей', status.text)
def testDestroyDirectMessage(self):
'''Test the twitter.Api DestroyDirectMessage method'''
self._AddHandler('http://twitter.com/direct_messages/destroy/3496342.json',
curry(self._OpenTestData, 'direct_message-destroy.json'))
status = self._api.DestroyDirectMessage(3496342)
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(673483, status.sender_id)
def testCreateFriendship(self):
'''Test the twitter.Api CreateFriendship method'''
self._AddHandler('http://twitter.com/friendships/create/dewitt.json',
curry(self._OpenTestData, 'friendship-create.json'))
user = self._api.CreateFriendship('dewitt')
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(673483, user.id)
def testDestroyFriendship(self):
'''Test the twitter.Api DestroyFriendship method'''
self._AddHandler('http://twitter.com/friendships/destroy/dewitt.json',
curry(self._OpenTestData, 'friendship-destroy.json'))
user = self._api.DestroyFriendship('dewitt')
# This is rather arbitrary, but spot checking is better than nothing
self.assertEqual(673483, user.id)
def testGetUser(self):
'''Test the twitter.Api GetUser method'''
self._AddHandler('http://twitter.com/users/show/dewitt.json',
curry(self._OpenTestData, 'show-dewitt.json'))
user = self._api.GetUser('dewitt')
self.assertEqual('dewitt', user.screen_name)
self.assertEqual(89586072, user.status.id)
def _AddHandler(self, url, callback):
self._urllib.AddHandler(url, callback)
def _GetTestDataPath(self, filename):
directory = os.path.dirname(os.path.abspath(__file__))
test_data_dir = os.path.join(directory, 'testdata')
return os.path.join(test_data_dir, filename)
def _OpenTestData(self, filename):
return open(self._GetTestDataPath(filename))
class MockUrllib(object):
'''A mock replacement for urllib that hardcodes specific responses.'''
def __init__(self):
self._handlers = {}
self.HTTPBasicAuthHandler = MockHTTPBasicAuthHandler
def AddHandler(self, url, callback):
self._handlers[url] = callback
def build_opener(self, *handlers):
return MockOpener(self._handlers)
class MockOpener(object):
'''A mock opener for urllib'''
def __init__(self, handlers):
self._handlers = handlers
self._opened = False
def open(self, url, data=None):
if self._opened:
raise Exception('MockOpener already opened.')
if url in self._handlers:
self._opened = True
return self._handlers[url]()
else:
raise Exception('Unexpected URL %s' % url)
def close(self):
if not self._opened:
raise Exception('MockOpener closed before it was opened.')
self._opened = False
class MockHTTPBasicAuthHandler(object):
'''A mock replacement for HTTPBasicAuthHandler'''
def add_password(self, realm, uri, user, passwd):
# TODO(dewitt): Add verification that the proper args are passed
pass
class curry:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
def __init__(self, fun, *args, **kwargs):
self.fun = fun
self.pending = args[:]
self.kwargs = kwargs.copy()
def __call__(self, *args, **kwargs):
if kwargs and self.kwargs:
kw = self.kwargs.copy()
kw.update(kwargs)
else:
kw = kwargs or self.kwargs
return self.fun(*(self.pending + args), **kw)
def suite():
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(FileCacheTest))
suite.addTests(unittest.makeSuite(StatusTest))
suite.addTests(unittest.makeSuite(UserTest))
suite.addTests(unittest.makeSuite(ApiTest))
return suite
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
jergosh/slr_pipeline | bin/paml_bs_wrapper.py | 1 | 1684 | import glob
import argparse
import pickle
from os import path
from Bio.Phylo.PAML import codeml
# Model A (alternative): model = 2, NSsites = 2, fix_omega = 0
# Model A1 (null): model = 2, NSsites = 2, fix_omega = 1, omega = 1
argparser = argparse.ArgumentParser()
argparser.add_argument('--indir', metavar="dir", type=str, required=True)
argparser.add_argument('--dataset', metavar="str", type=str, required=True)
argparser.add_argument('--sample', metavar="str", type=str, required=True)
def main():
args = argparser.parse_args()
aln = path.join(args.indir, "..", args.dataset+'.paml')
tree = path.join(args.indir, args.dataset+'.nh')
work_dir_1 = path.join(args.indir, '1')
out_file_1 = path.join(work_dir_1, 'A1.out')
cml_1 = codeml.Codeml(alignment=aln, tree=tree, out_file=out_file_1, working_dir=work_dir_1)
cml_1.set_options(method=1, model=2, NSsites=[2], fix_omega=1, omega=1, verbose=1, seqtype=1, CodonFreq=2)
results_1 = cml_1.run()
pickle.dump(results_1, open(path.join(work_dir_1, "results_1.pk"), 'w'))
work_dir_2 = path.join(args.indir, '2')
out_file_2 = path.join(work_dir_2, 'A.out')
cml_2 = codeml.Codeml(alignment=aln, tree=tree, out_file=out_file_2, working_dir=work_dir_2)
cml_2.set_options(method=1, model=2, NSsites=[2], fix_omega=0, omega=1, verbose=1, seqtype=1, CodonFreq=2)
results_2 = cml_2.run()
pickle.dump(results_2, open(path.join(work_dir_2, "results_2.pk"), 'w'))
LRT = 2*(results_2["NSsites"][2]["lnL"] - results_1["NSsites"][2]["lnL"])
print LRT
# find the sites if applicable & write them out
# write out the lnL
if __name__ == "__main__":
main()
| gpl-2.0 |
shashank971/edx-platform | cms/djangoapps/contentstore/views/tests/test_videos.py | 83 | 15190 | #-*- coding: utf-8 -*-
"""
Unit tests for video-related REST APIs.
"""
# pylint: disable=attribute-defined-outside-init
import csv
import json
import dateutil.parser
import re
from StringIO import StringIO
from django.conf import settings
from django.test.utils import override_settings
from mock import Mock, patch
from edxval.api import create_profile, create_video, get_video_info
from contentstore.models import VideoUploadConfig
from contentstore.views.videos import KEY_EXPIRATION_IN_SECONDS, StatusDisplayStrings
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url
from xmodule.modulestore.tests.factories import CourseFactory
class VideoUploadTestMixin(object):
"""
Test cases for the video upload feature
"""
def get_url_for_course_key(self, course_key):
"""Return video handler URL for the given course"""
return reverse_course_url(self.VIEW_NAME, course_key)
def setUp(self):
super(VideoUploadTestMixin, self).setUp()
self.url = self.get_url_for_course_key(self.course.id)
self.test_token = "test_token"
self.course.video_upload_pipeline = {
"course_video_upload_token": self.test_token,
}
self.save_course()
self.profiles = ["profile1", "profile2"]
self.previous_uploads = [
{
"edx_video_id": "test1",
"client_video_id": "test1.mp4",
"duration": 42.0,
"status": "upload",
"courses": [unicode(self.course.id)],
"encoded_videos": [],
},
{
"edx_video_id": "test2",
"client_video_id": "test2.mp4",
"duration": 128.0,
"status": "file_complete",
"courses": [unicode(self.course.id)],
"encoded_videos": [
{
"profile": "profile1",
"url": "http://example.com/profile1/test2.mp4",
"file_size": 1600,
"bitrate": 100,
},
{
"profile": "profile2",
"url": "http://example.com/profile2/test2.mov",
"file_size": 16000,
"bitrate": 1000,
},
],
},
{
"edx_video_id": "non-ascii",
"client_video_id": u"nón-ascii-näme.mp4",
"duration": 256.0,
"status": "transcode_active",
"courses": [unicode(self.course.id)],
"encoded_videos": [
{
"profile": "profile1",
"url": u"http://example.com/profile1/nón-ascii-näme.mp4",
"file_size": 3200,
"bitrate": 100,
},
]
},
]
# Ensure every status string is tested
self.previous_uploads += [
{
"edx_video_id": "status_test_{}".format(status),
"client_video_id": "status_test.mp4",
"duration": 3.14,
"status": status,
"courses": [unicode(self.course.id)],
"encoded_videos": [],
}
for status in (
StatusDisplayStrings._STATUS_MAP.keys() + # pylint:disable=protected-access
["non_existent_status"]
)
]
for profile in self.profiles:
create_profile(profile)
for video in self.previous_uploads:
create_video(video)
def _get_previous_upload(self, edx_video_id):
"""Returns the previous upload with the given video id."""
return next(
video
for video in self.previous_uploads
if video["edx_video_id"] == edx_video_id
)
def test_anon_user(self):
self.client.logout()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
def test_put(self):
response = self.client.put(self.url)
self.assertEqual(response.status_code, 405)
def test_invalid_course_key(self):
response = self.client.get(
self.get_url_for_course_key("Non/Existent/Course")
)
self.assertEqual(response.status_code, 404)
def test_non_staff_user(self):
client, __ = self.create_non_staff_authed_user_client()
response = client.get(self.url)
self.assertEqual(response.status_code, 403)
def test_video_pipeline_not_enabled(self):
settings.FEATURES["ENABLE_VIDEO_UPLOAD_PIPELINE"] = False
self.assertEqual(self.client.get(self.url).status_code, 404)
def test_video_pipeline_not_configured(self):
settings.VIDEO_UPLOAD_PIPELINE = None
self.assertEqual(self.client.get(self.url).status_code, 404)
def test_course_not_configured(self):
self.course.video_upload_pipeline = {}
self.save_course()
self.assertEqual(self.client.get(self.url).status_code, 404)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_VIDEO_UPLOAD_PIPELINE": True})
@override_settings(VIDEO_UPLOAD_PIPELINE={"BUCKET": "test_bucket", "ROOT_PATH": "test_root"})
class VideosHandlerTestCase(VideoUploadTestMixin, CourseTestCase):
"""Test cases for the main video upload endpoint"""
VIEW_NAME = "videos_handler"
def test_get_json(self):
response = self.client.get_json(self.url)
self.assertEqual(response.status_code, 200)
response_videos = json.loads(response.content)["videos"]
self.assertEqual(len(response_videos), len(self.previous_uploads))
for i, response_video in enumerate(response_videos):
# Videos should be returned by creation date descending
original_video = self.previous_uploads[-(i + 1)]
self.assertEqual(
set(response_video.keys()),
set(["edx_video_id", "client_video_id", "created", "duration", "status"])
)
dateutil.parser.parse(response_video["created"])
for field in ["edx_video_id", "client_video_id", "duration"]:
self.assertEqual(response_video[field], original_video[field])
self.assertEqual(
response_video["status"],
StatusDisplayStrings.get(original_video["status"])
)
def test_get_html(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertRegexpMatches(response["Content-Type"], "^text/html(;.*)?$")
# Crude check for presence of data in returned HTML
for video in self.previous_uploads:
self.assertIn(video["edx_video_id"], response.content)
def test_post_non_json(self):
response = self.client.post(self.url, {"files": []})
self.assertEqual(response.status_code, 400)
def test_post_malformed_json(self):
response = self.client.post(self.url, "{", content_type="application/json")
self.assertEqual(response.status_code, 400)
def test_post_invalid_json(self):
def assert_bad(content):
"""Make request with content and assert that response is 400"""
response = self.client.post(
self.url,
json.dumps(content),
content_type="application/json"
)
self.assertEqual(response.status_code, 400)
# Top level missing files key
assert_bad({})
# Entry missing file_name
assert_bad({"files": [{"content_type": "video/mp4"}]})
# Entry missing content_type
assert_bad({"files": [{"file_name": "test.mp4"}]})
@override_settings(AWS_ACCESS_KEY_ID="test_key_id", AWS_SECRET_ACCESS_KEY="test_secret")
@patch("boto.s3.key.Key")
@patch("boto.s3.connection.S3Connection")
def test_post_success(self, mock_conn, mock_key):
files = [
{
"file_name": "first.mp4",
"content_type": "video/mp4",
},
{
"file_name": "second.webm",
"content_type": "video/webm",
},
{
"file_name": "third.mov",
"content_type": "video/quicktime",
},
{
"file_name": "fourth.mp4",
"content_type": "video/mp4",
},
]
bucket = Mock()
mock_conn.return_value = Mock(get_bucket=Mock(return_value=bucket))
mock_key_instances = [
Mock(
generate_url=Mock(
return_value="http://example.com/url_{}".format(file_info["file_name"])
)
)
for file_info in files
]
# If extra calls are made, return a dummy
mock_key.side_effect = mock_key_instances + [Mock()]
response = self.client.post(
self.url,
json.dumps({"files": files}),
content_type="application/json"
)
self.assertEqual(response.status_code, 200)
response_obj = json.loads(response.content)
mock_conn.assert_called_once_with(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
self.assertEqual(len(response_obj["files"]), len(files))
self.assertEqual(mock_key.call_count, len(files))
for i, file_info in enumerate(files):
# Ensure Key was set up correctly and extract id
key_call_args, __ = mock_key.call_args_list[i]
self.assertEqual(key_call_args[0], bucket)
path_match = re.match(
(
settings.VIDEO_UPLOAD_PIPELINE["ROOT_PATH"] +
"/([a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12})$"
),
key_call_args[1]
)
self.assertIsNotNone(path_match)
video_id = path_match.group(1)
mock_key_instance = mock_key_instances[i]
mock_key_instance.set_metadata.assert_any_call(
"course_video_upload_token",
self.test_token
)
mock_key_instance.set_metadata.assert_any_call(
"client_video_id",
file_info["file_name"]
)
mock_key_instance.set_metadata.assert_any_call("course_key", unicode(self.course.id))
mock_key_instance.generate_url.assert_called_once_with(
KEY_EXPIRATION_IN_SECONDS,
"PUT",
headers={"Content-Type": file_info["content_type"]}
)
# Ensure VAL was updated
val_info = get_video_info(video_id)
self.assertEqual(val_info["status"], "upload")
self.assertEqual(val_info["client_video_id"], file_info["file_name"])
self.assertEqual(val_info["status"], "upload")
self.assertEqual(val_info["duration"], 0)
self.assertEqual(val_info["courses"], [unicode(self.course.id)])
# Ensure response is correct
response_file = response_obj["files"][i]
self.assertEqual(response_file["file_name"], file_info["file_name"])
self.assertEqual(response_file["upload_url"], mock_key_instance.generate_url())
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_VIDEO_UPLOAD_PIPELINE": True})
@override_settings(VIDEO_UPLOAD_PIPELINE={"BUCKET": "test_bucket", "ROOT_PATH": "test_root"})
class VideoUrlsCsvTestCase(VideoUploadTestMixin, CourseTestCase):
"""Test cases for the CSV download endpoint for video uploads"""
VIEW_NAME = "video_encodings_download"
def setUp(self):
super(VideoUrlsCsvTestCase, self).setUp()
VideoUploadConfig(profile_whitelist="profile1").save()
def _check_csv_response(self, expected_profiles):
"""
Check that the response is a valid CSV response containing rows
corresponding to previous_uploads and including the expected profiles.
"""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response["Content-Disposition"],
"attachment; filename={course}_video_urls.csv".format(course=self.course.id.course)
)
response_reader = StringIO(response.content)
reader = csv.DictReader(response_reader, dialect=csv.excel)
self.assertEqual(
reader.fieldnames,
(
["Name", "Duration", "Date Added", "Video ID", "Status"] +
["{} URL".format(profile) for profile in expected_profiles]
)
)
rows = list(reader)
self.assertEqual(len(rows), len(self.previous_uploads))
for i, row in enumerate(rows):
response_video = {
key.decode("utf-8"): value.decode("utf-8") for key, value in row.items()
}
# Videos should be returned by creation date descending
original_video = self.previous_uploads[-(i + 1)]
self.assertEqual(response_video["Name"], original_video["client_video_id"])
self.assertEqual(response_video["Duration"], str(original_video["duration"]))
dateutil.parser.parse(response_video["Date Added"])
self.assertEqual(response_video["Video ID"], original_video["edx_video_id"])
self.assertEqual(response_video["Status"], StatusDisplayStrings.get(original_video["status"]))
for profile in expected_profiles:
response_profile_url = response_video["{} URL".format(profile)]
original_encoded_for_profile = next(
(
original_encoded
for original_encoded in original_video["encoded_videos"]
if original_encoded["profile"] == profile
),
None
)
if original_encoded_for_profile:
self.assertEqual(response_profile_url, original_encoded_for_profile["url"])
else:
self.assertEqual(response_profile_url, "")
def test_basic(self):
self._check_csv_response(["profile1"])
def test_profile_whitelist(self):
VideoUploadConfig(profile_whitelist="profile1,profile2").save()
self._check_csv_response(["profile1", "profile2"])
def test_non_ascii_course(self):
course = CourseFactory.create(
number=u"nón-äscii",
video_upload_pipeline={
"course_video_upload_token": self.test_token,
}
)
response = self.client.get(self.get_url_for_course_key(course.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response["Content-Disposition"],
"attachment; filename=video_urls.csv; filename*=utf-8''n%C3%B3n-%C3%A4scii_video_urls.csv"
)
| agpl-3.0 |
matlongsi/micropay | contrib/devtools/github-merge.py | 46 | 10860 | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# This script will locally construct a merge commit for a pull request on a
# github repository, inspect it, sign it and optionally push it.
# The following temporary branches are created/overwritten and deleted:
# * pull/$PULL/base (the current master we're merging onto)
# * pull/$PULL/head (the current state of the remote pull request)
# * pull/$PULL/merge (github's merge)
# * pull/$PULL/local-merge (our merge)
# In case of a clean merge that is accepted by the user, the local branch with
# name $BRANCH is overwritten with the merged result, and optionally pushed.
from __future__ import division,print_function,unicode_literals
import os
from sys import stdin,stdout,stderr
import argparse
import subprocess
import json,codecs
try:
from urllib.request import Request,urlopen
except:
from urllib2 import Request,urlopen
# External tools (can be overridden using environment)
GIT = os.getenv('GIT','git')
BASH = os.getenv('BASH','bash')
# OS specific configuration for terminal attributes
ATTR_RESET = ''
ATTR_PR = ''
COMMIT_FORMAT = '%h %s (%an)%d'
if os.name == 'posix': # if posix, assume we can use basic terminal escapes
ATTR_RESET = '\033[0m'
ATTR_PR = '\033[1;36m'
COMMIT_FORMAT = '%C(bold blue)%h%Creset %s %C(cyan)(%an)%Creset%C(green)%d%Creset'
def git_config_get(option, default=None):
'''
Get named configuration option from git repository.
'''
try:
return subprocess.check_output([GIT,'config','--get',option]).rstrip().decode('utf-8')
except subprocess.CalledProcessError as e:
return default
def retrieve_pr_info(repo,pull):
'''
Retrieve pull request information from github.
Return None if no title can be found, or an error happens.
'''
try:
req = Request("https://api.github.com/repos/"+repo+"/pulls/"+pull)
result = urlopen(req)
reader = codecs.getreader('utf-8')
obj = json.load(reader(result))
return obj
except Exception as e:
print('Warning: unable to retrieve pull information from github: %s' % e)
return None
def ask_prompt(text):
print(text,end=" ",file=stderr)
stderr.flush()
reply = stdin.readline().rstrip()
print("",file=stderr)
return reply
def parse_arguments():
epilog = '''
In addition, you can set the following git configuration variables:
githubmerge.repository (mandatory),
user.signingkey (mandatory),
githubmerge.host (default: git@github.com),
githubmerge.branch (no default),
githubmerge.testcmd (default: none).
'''
parser = argparse.ArgumentParser(description='Utility to merge, sign and push github pull requests',
epilog=epilog)
parser.add_argument('pull', metavar='PULL', type=int, nargs=1,
help='Pull request ID to merge')
parser.add_argument('branch', metavar='BRANCH', type=str, nargs='?',
default=None, help='Branch to merge against (default: githubmerge.branch setting, or base branch for pull, or \'master\')')
return parser.parse_args()
def main():
# Extract settings from git repo
repo = git_config_get('githubmerge.repository')
host = git_config_get('githubmerge.host','git@github.com')
opt_branch = git_config_get('githubmerge.branch',None)
testcmd = git_config_get('githubmerge.testcmd')
signingkey = git_config_get('user.signingkey')
if repo is None:
print("ERROR: No repository configured. Use this command to set:", file=stderr)
print("git config githubmerge.repository <owner>/<repo>", file=stderr)
exit(1)
if signingkey is None:
print("ERROR: No GPG signing key set. Set one using:",file=stderr)
print("git config --global user.signingkey <key>",file=stderr)
exit(1)
host_repo = host+":"+repo # shortcut for push/pull target
# Extract settings from command line
args = parse_arguments()
pull = str(args.pull[0])
# Receive pull information from github
info = retrieve_pr_info(repo,pull)
if info is None:
exit(1)
title = info['title']
# precedence order for destination branch argument:
# - command line argument
# - githubmerge.branch setting
# - base branch for pull (as retrieved from github)
# - 'master'
branch = args.branch or opt_branch or info['base']['ref'] or 'master'
# Initialize source branches
head_branch = 'pull/'+pull+'/head'
base_branch = 'pull/'+pull+'/base'
merge_branch = 'pull/'+pull+'/merge'
local_merge_branch = 'pull/'+pull+'/local-merge'
devnull = open(os.devnull,'w')
try:
subprocess.check_call([GIT,'checkout','-q',branch])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot check out branch %s." % (branch), file=stderr)
exit(3)
try:
subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/pull/'+pull+'/*:refs/heads/pull/'+pull+'/*'])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot find pull request #%s on %s." % (pull,host_repo), file=stderr)
exit(3)
try:
subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+head_branch], stdout=devnull, stderr=stdout)
except subprocess.CalledProcessError as e:
print("ERROR: Cannot find head of pull request #%s on %s." % (pull,host_repo), file=stderr)
exit(3)
try:
subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+merge_branch], stdout=devnull, stderr=stdout)
except subprocess.CalledProcessError as e:
print("ERROR: Cannot find merge of pull request #%s on %s." % (pull,host_repo), file=stderr)
exit(3)
try:
subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/heads/'+branch+':refs/heads/'+base_branch])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot find branch %s on %s." % (branch,host_repo), file=stderr)
exit(3)
subprocess.check_call([GIT,'checkout','-q',base_branch])
subprocess.call([GIT,'branch','-q','-D',local_merge_branch], stderr=devnull)
subprocess.check_call([GIT,'checkout','-q','-b',local_merge_branch])
try:
# Create unsigned merge commit.
if title:
firstline = 'Merge #%s: %s' % (pull,title)
else:
firstline = 'Merge #%s' % (pull,)
message = firstline + '\n\n'
message += subprocess.check_output([GIT,'log','--no-merges','--topo-order','--pretty=format:%h %s (%an)',base_branch+'..'+head_branch]).decode('utf-8')
try:
subprocess.check_call([GIT,'merge','-q','--commit','--no-edit','--no-ff','-m',message.encode('utf-8'),head_branch])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot be merged cleanly.",file=stderr)
subprocess.check_call([GIT,'merge','--abort'])
exit(4)
logmsg = subprocess.check_output([GIT,'log','--pretty=format:%s','-n','1']).decode('utf-8')
if logmsg.rstrip() != firstline.rstrip():
print("ERROR: Creating merge failed (already merged?).",file=stderr)
exit(4)
print('%s#%s%s %s %sinto %s%s' % (ATTR_RESET+ATTR_PR,pull,ATTR_RESET,title,ATTR_RESET+ATTR_PR,branch,ATTR_RESET))
subprocess.check_call([GIT,'log','--graph','--topo-order','--pretty=format:'+COMMIT_FORMAT,base_branch+'..'+head_branch])
print()
# Run test command if configured.
if testcmd:
# Go up to the repository's root.
toplevel = subprocess.check_output([GIT,'rev-parse','--show-toplevel']).strip()
os.chdir(toplevel)
if subprocess.call(testcmd,shell=True):
print("ERROR: Running %s failed." % testcmd,file=stderr)
exit(5)
# Show the created merge.
diff = subprocess.check_output([GIT,'diff',merge_branch+'..'+local_merge_branch])
subprocess.check_call([GIT,'diff',base_branch+'..'+local_merge_branch])
if diff:
print("WARNING: merge differs from github!",file=stderr)
reply = ask_prompt("Type 'ignore' to continue.")
if reply.lower() == 'ignore':
print("Difference with github ignored.",file=stderr)
else:
exit(6)
reply = ask_prompt("Press 'd' to accept the diff.")
if reply.lower() == 'd':
print("Diff accepted.",file=stderr)
else:
print("ERROR: Diff rejected.",file=stderr)
exit(6)
else:
# Verify the result manually.
print("Dropping you on a shell so you can try building/testing the merged source.",file=stderr)
print("Run 'git diff HEAD~' to show the changes being merged.",file=stderr)
print("Type 'exit' when done.",file=stderr)
if os.path.isfile('/etc/debian_version'): # Show pull number on Debian default prompt
os.putenv('debian_chroot',pull)
subprocess.call([BASH,'-i'])
reply = ask_prompt("Type 'm' to accept the merge.")
if reply.lower() == 'm':
print("Merge accepted.",file=stderr)
else:
print("ERROR: Merge rejected.",file=stderr)
exit(7)
# Sign the merge commit.
reply = ask_prompt("Type 's' to sign off on the merge.")
if reply == 's':
try:
subprocess.check_call([GIT,'commit','-q','--gpg-sign','--amend','--no-edit'])
except subprocess.CalledProcessError as e:
print("Error signing, exiting.",file=stderr)
exit(1)
else:
print("Not signing off on merge, exiting.",file=stderr)
exit(1)
# Put the result in branch.
subprocess.check_call([GIT,'checkout','-q',branch])
subprocess.check_call([GIT,'reset','-q','--hard',local_merge_branch])
finally:
# Clean up temporary branches.
subprocess.call([GIT,'checkout','-q',branch])
subprocess.call([GIT,'branch','-q','-D',head_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',base_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',merge_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',local_merge_branch],stderr=devnull)
# Push the result.
reply = ask_prompt("Type 'push' to push the result to %s, branch %s." % (host_repo,branch))
if reply.lower() == 'push':
subprocess.check_call([GIT,'push',host_repo,'refs/heads/'+branch])
if __name__ == '__main__':
main()
| mit |
kerel-fs/skylines | tests/frontend/translations/validation_test.py | 3 | 1314 | import os
from glob import glob
from babel.messages.pofile import read_po
TRANSLATION_PATH = os.path.join('skylines', 'frontend', 'translations')
def get_language_code(filename):
filename = os.path.split(filename)[0]
filename = os.path.split(filename)[0]
filename = os.path.split(filename)[1]
return filename
def pytest_generate_tests(metafunc):
pattern = os.path.join(TRANSLATION_PATH, '*', 'LC_MESSAGES', 'messages.po')
languages = map(get_language_code, glob(pattern))
metafunc.parametrize('language', languages)
def test_pofile(language):
path = os.path.join(TRANSLATION_PATH, language, 'LC_MESSAGES', 'messages.po')
with open(path) as fileobj:
catalog = read_po(fileobj)
errors = list(catalog.check())
if errors:
for message, merrors in errors:
print 'Translation Error:'
for error in merrors:
s = str(error)
if message.lineno:
s += ' (line ' + str(message.lineno) + ')'
print s
print
print str(message.id) + '\n'
print str(message.string) + '\n\n'
raise AssertionError("There are errors in the translation files. Please check the captured output.")
| agpl-3.0 |
John-Lin/ryu | ryu/lib/of_config/__init__.py | 29 | 1641 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OF-Config implementation.
"""
import glob
import os.path
import sys
# we require ncclient
import ryu.contrib
ryu.contrib.update_module_path()
SCHEMA_DIR = os.path.dirname(__file__)
_PREFIX = 'of-config-'
_SUFFIX = '.xsd'
_files = glob.glob(os.path.join(SCHEMA_DIR, 'of-config-*.xsd'))
OF_CONFIG_XSD_FILES = dict(
(os.path.basename(f)[len(_PREFIX):-len(_SUFFIX)], f) for f in _files)
# For convinience
# OF_CONFIG_1_0_XSD = os.path.join(SCHEMA_DIR, 'of-config-1.0.xsd')
# and so on
_this_module = sys.modules[__name__]
for (version, xsd_file) in OF_CONFIG_XSD_FILES.items():
setattr(_this_module,
'OF_CONFIG_%s_XSD' % version.replace('.', '_'), xsd_file)
OFCONFIG_1_1_CONFIG = 'urn:onf:params:xml:ns:onf:of12:config'
OFCONFIG_1_1_YANG = 'urn:onf:of12:config:yang'
# LINC specific?
OFCONFIG_1_1_1_YANG = 'urn:onf:of111:config:yang'
OFCONFIG_YANG_NAMESPACES = {
'1.1': OFCONFIG_1_1_YANG,
'1.1.1': OFCONFIG_1_1_1_YANG,
}
| apache-2.0 |
theguardian/JIRA-APPy | lib/pycrypto/SelfTest/Hash/test_MD4.py | 116 | 2368 | # -*- coding: utf-8 -*-
#
# SelfTest/Hash/MD4.py: Self-test for the MD4 hash function
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Hash.MD4"""
__revision__ = "$Id$"
from Crypto.Util.py3compat import *
# This is a list of (expected_result, input[, description]) tuples.
test_data = [
# Test vectors from RFC 1320
('31d6cfe0d16ae931b73c59d7e0c089c0', '', "'' (empty string)"),
('bde52cb31de33e46245e05fbdbd6fb24', 'a'),
('a448017aaf21d8525fc10ae87aa6729d', 'abc'),
('d9130a8164549fe818874806e1c7014b', 'message digest'),
('d79e1c308aa5bbcdeea8ed63df412da9', 'abcdefghijklmnopqrstuvwxyz',
'a-z'),
('043f8582f241db351ce627e153e7f0e4',
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'A-Z, a-z, 0-9'),
('e33b4ddc9c38f2199c3e7b164fcc0536',
'1234567890123456789012345678901234567890123456'
+ '7890123456789012345678901234567890',
"'1234567890' * 8"),
]
def get_tests(config={}):
from Crypto.Hash import MD4
from common import make_hash_tests
return make_hash_tests(MD4, "MD4", test_data,
digest_size=16,
oid="\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x04")
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| gpl-2.0 |
Accelerite/cinder | cinder/tests/test_backup.py | 2 | 32702 | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Backup code.
"""
import tempfile
import mock
from oslo_config import cfg
from oslo_utils import importutils
from oslo_utils import timeutils
from cinder.backup import manager
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.tests.backup.fake_service_with_verify import\
get_backup_driver
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class FakeBackupException(Exception):
pass
class BaseBackupTest(test.TestCase):
def setUp(self):
super(BaseBackupTest, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(volumes_dir=vol_tmpdir)
with mock.patch("osprofiler.profiler.trace_cls") as mock_trace_cls:
side_effect = lambda value: value
mock_decorator = mock.MagicMock(side_effect=side_effect)
mock_trace_cls.return_value = mock_decorator
self.backup_mgr = \
importutils.import_object(CONF.backup_manager)
self.backup_mgr.host = 'testhost'
self.ctxt = context.get_admin_context()
self.backup_mgr.driver.set_initialized()
def _create_backup_db_entry(self, volume_id=1, display_name='test_backup',
display_description='this is a test backup',
container='volumebackups',
status='creating',
size=1,
object_count=0,
project_id='fake'):
"""Create a backup entry in the DB.
Return the entry ID
"""
backup = {}
backup['volume_id'] = volume_id
backup['user_id'] = 'fake'
backup['project_id'] = project_id
backup['host'] = 'testhost'
backup['availability_zone'] = '1'
backup['display_name'] = display_name
backup['display_description'] = display_description
backup['container'] = container
backup['status'] = status
backup['fail_reason'] = ''
backup['service'] = CONF.backup_driver
backup['size'] = size
backup['object_count'] = object_count
return db.backup_create(self.ctxt, backup)['id']
def _create_volume_db_entry(self, display_name='test_volume',
display_description='this is a test volume',
status='backing-up',
size=1):
"""Create a volume entry in the DB.
Return the entry ID
"""
vol = {}
vol['size'] = size
vol['host'] = 'testhost'
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['status'] = status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = 'detached'
return db.volume_create(self.ctxt, vol)['id']
def _create_exported_record_entry(self, vol_size=1):
"""Create backup metadata export entry."""
vol_id = self._create_volume_db_entry(status='available',
size=vol_size)
backup_id = self._create_backup_db_entry(status='available',
volume_id=vol_id)
export = self.backup_mgr.export_record(self.ctxt, backup_id)
return export
def _create_export_record_db_entry(self,
volume_id='0000',
status='creating',
project_id='fake'):
"""Create a backup entry in the DB.
Return the entry ID
"""
backup = {}
backup['volume_id'] = volume_id
backup['user_id'] = 'fake'
backup['project_id'] = project_id
backup['status'] = status
return db.backup_create(self.ctxt, backup)['id']
class BackupTestCase(BaseBackupTest):
"""Test Case for backups."""
def test_init_host(self):
"""Make sure stuck volumes and backups are reset to correct
states when backup_manager.init_host() is called
"""
vol1_id = self._create_volume_db_entry(status='backing-up')
vol2_id = self._create_volume_db_entry(status='restoring-backup')
backup1_id = self._create_backup_db_entry(status='creating')
backup2_id = self._create_backup_db_entry(status='restoring')
backup3_id = self._create_backup_db_entry(status='deleting')
self.backup_mgr.init_host()
vol1 = db.volume_get(self.ctxt, vol1_id)
self.assertEqual(vol1['status'], 'available')
vol2 = db.volume_get(self.ctxt, vol2_id)
self.assertEqual(vol2['status'], 'error_restoring')
backup1 = db.backup_get(self.ctxt, backup1_id)
self.assertEqual(backup1['status'], 'error')
backup2 = db.backup_get(self.ctxt, backup2_id)
self.assertEqual(backup2['status'], 'available')
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
backup3_id)
def test_create_backup_with_bad_volume_status(self):
"""Test error handling when creating a backup from a volume
with a bad status
"""
vol_id = self._create_volume_db_entry(status='available', size=1)
backup_id = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume,
self.backup_mgr.create_backup,
self.ctxt,
backup_id)
def test_create_backup_with_bad_backup_status(self):
"""Test error handling when creating a backup with a backup
with a bad status
"""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='available',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.create_backup,
self.ctxt,
backup_id)
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
def test_create_backup_with_error(self, _mock_volume_backup):
"""Test error handling when error occurs during backup creation."""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(volume_id=vol_id)
_mock_volume_backup.side_effect = FakeBackupException('fake')
self.assertRaises(FakeBackupException,
self.backup_mgr.create_backup,
self.ctxt,
backup_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'available')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'error')
self.assertTrue(_mock_volume_backup.called)
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
def test_create_backup(self, _mock_volume_backup):
"""Test normal backup creation."""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup_id = self._create_backup_db_entry(volume_id=vol_id)
self.backup_mgr.create_backup(self.ctxt, backup_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'available')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
self.assertEqual(backup['size'], vol_size)
self.assertTrue(_mock_volume_backup.called)
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
def test_create_backup_with_notify(self, _mock_volume_backup, notify):
"""Test normal backup creation with notifications."""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup_id = self._create_backup_db_entry(volume_id=vol_id)
self.backup_mgr.create_backup(self.ctxt, backup_id)
self.assertEqual(2, notify.call_count)
def test_restore_backup_with_bad_volume_status(self):
"""Test error handling when restoring a backup to a volume
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='available', size=1)
backup_id = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume,
self.backup_mgr.restore_backup,
self.ctxt,
backup_id,
vol_id)
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
def test_restore_backup_with_bad_backup_status(self):
"""Test error handling when restoring a backup with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup_id = self._create_backup_db_entry(status='available',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.restore_backup,
self.ctxt,
backup_id,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'error')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'error')
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
def test_restore_backup_with_driver_error(self, _mock_volume_restore):
"""Test error handling when an error occurs during backup restore."""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup_id = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
_mock_volume_restore.side_effect = FakeBackupException('fake')
self.assertRaises(FakeBackupException,
self.backup_mgr.restore_backup,
self.ctxt,
backup_id,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'error_restoring')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
self.assertTrue(_mock_volume_restore.called)
def test_restore_backup_with_bad_service(self):
"""Test error handling when attempting a restore of a backup
with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup_id = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
service = 'cinder.tests.backup.bad_service'
db.backup_update(self.ctxt, backup_id, {'service': service})
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.restore_backup,
self.ctxt,
backup_id,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'error')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
def test_restore_backup(self, _mock_volume_restore):
"""Test normal backup restoration."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=vol_size)
backup_id = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
self.backup_mgr.restore_backup(self.ctxt, backup_id, vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'available')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
self.assertTrue(_mock_volume_restore.called)
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
def test_restore_backup_with_notify(self, _mock_volume_restore, notify):
"""Test normal backup restoration with notifications."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=vol_size)
backup_id = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
self.backup_mgr.restore_backup(self.ctxt, backup_id, vol_id)
self.assertEqual(2, notify.call_count)
def test_delete_backup_with_bad_backup_status(self):
"""Test error handling when deleting a backup with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='available',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.delete_backup,
self.ctxt,
backup_id)
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'error')
def test_delete_backup_with_error(self):
"""Test error handling when an error occurs during backup deletion."""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting',
display_name='fail_on_delete',
volume_id=vol_id)
self.assertRaises(IOError,
self.backup_mgr.delete_backup,
self.ctxt,
backup_id)
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'error')
def test_delete_backup_with_bad_service(self):
"""Test error handling when attempting a delete of a backup
with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting',
volume_id=vol_id)
service = 'cinder.tests.backup.bad_service'
db.backup_update(self.ctxt, backup_id, {'service': service})
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.delete_backup,
self.ctxt,
backup_id)
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'error')
def test_delete_backup_with_no_service(self):
"""Test error handling when attempting a delete of a backup
with no service defined for that backup, relates to bug #1162908
"""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting',
volume_id=vol_id)
db.backup_update(self.ctxt, backup_id, {'service': None})
self.backup_mgr.delete_backup(self.ctxt, backup_id)
def test_delete_backup(self):
"""Test normal backup deletion."""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting',
volume_id=vol_id)
self.backup_mgr.delete_backup(self.ctxt, backup_id)
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
backup_id)
ctxt_read_deleted = context.get_admin_context('yes')
backup = db.backup_get(ctxt_read_deleted, backup_id)
self.assertEqual(backup.deleted, True)
self.assertGreaterEqual(timeutils.utcnow(), backup.deleted_at)
self.assertEqual(backup.status, 'deleted')
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
def test_delete_backup_with_notify(self, notify):
"""Test normal backup deletion with notifications."""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting',
volume_id=vol_id)
self.backup_mgr.delete_backup(self.ctxt, backup_id)
self.assertEqual(2, notify.call_count)
def test_list_backup(self):
backups = db.backup_get_all_by_project(self.ctxt, 'project1')
self.assertEqual(len(backups), 0)
self._create_backup_db_entry()
b2 = self._create_backup_db_entry(project_id='project1')
backups = db.backup_get_all_by_project(self.ctxt, 'project1')
self.assertEqual(len(backups), 1)
self.assertEqual(backups[0].id, b2)
def test_backup_get_all_by_project_with_deleted(self):
"""Test deleted backups don't show up in backup_get_all_by_project.
Unless context.read_deleted is 'yes'.
"""
backups = db.backup_get_all_by_project(self.ctxt, 'fake')
self.assertEqual(len(backups), 0)
backup_id_keep = self._create_backup_db_entry()
backup_id = self._create_backup_db_entry()
db.backup_destroy(self.ctxt, backup_id)
backups = db.backup_get_all_by_project(self.ctxt, 'fake')
self.assertEqual(len(backups), 1)
self.assertEqual(backups[0].id, backup_id_keep)
ctxt_read_deleted = context.get_admin_context('yes')
backups = db.backup_get_all_by_project(ctxt_read_deleted, 'fake')
self.assertEqual(len(backups), 2)
def test_backup_get_all_by_host_with_deleted(self):
"""Test deleted backups don't show up in backup_get_all_by_project.
Unless context.read_deleted is 'yes'
"""
backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(len(backups), 0)
backup_id_keep = self._create_backup_db_entry()
backup_id = self._create_backup_db_entry()
db.backup_destroy(self.ctxt, backup_id)
backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(len(backups), 1)
self.assertEqual(backups[0].id, backup_id_keep)
ctxt_read_deleted = context.get_admin_context('yes')
backups = db.backup_get_all_by_host(ctxt_read_deleted, 'testhost')
self.assertEqual(len(backups), 2)
def test_backup_manager_driver_name(self):
""""Test mapping between backup services and backup drivers."""
self.override_config('backup_driver', "cinder.backup.services.swift")
backup_mgr = \
importutils.import_object(CONF.backup_manager)
self.assertEqual('cinder.backup.drivers.swift',
backup_mgr.driver_name)
def test_export_record_with_bad_service(self):
"""Test error handling when attempting an export of a backup
record with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='available',
volume_id=vol_id)
service = 'cinder.tests.backup.bad_service'
db.backup_update(self.ctxt, backup_id, {'service': service})
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.export_record,
self.ctxt,
backup_id)
def test_export_record_with_bad_backup_status(self):
"""Test error handling when exporting a backup record with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='available',
size=1)
backup_id = self._create_backup_db_entry(status='error',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.export_record,
self.ctxt,
backup_id)
def test_export_record(self):
"""Test normal backup record export."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='available',
size=vol_size)
backup_id = self._create_backup_db_entry(status='available',
volume_id=vol_id)
export = self.backup_mgr.export_record(self.ctxt, backup_id)
self.assertEqual(export['backup_service'], CONF.backup_driver)
self.assertTrue('backup_url' in export)
def test_import_record_with_verify_not_implemented(self):
"""Test normal backup record import.
Test the case when import succeeds for the case that the
driver does not support verify.
"""
vol_size = 1
export = self._create_exported_record_entry(vol_size=vol_size)
imported_record = self._create_export_record_db_entry()
backup_hosts = []
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
backup = db.backup_get(self.ctxt, imported_record)
self.assertEqual(backup['status'], 'available')
self.assertEqual(backup['size'], vol_size)
def test_import_record_with_bad_service(self):
"""Test error handling when attempting an import of a backup
record with a different service to that used to create the backup.
"""
export = self._create_exported_record_entry()
export['backup_service'] = 'cinder.tests.backup.bad_service'
imported_record = self._create_export_record_db_entry()
#Test the case where the additional hosts list is empty
backup_hosts = []
self.assertRaises(exception.ServiceNotFound,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
#Test that the import backup keeps calling other hosts to find a
#suitable host for the backup service
backup_hosts = ['fake1', 'fake2']
BackupAPI_import = 'cinder.backup.rpcapi.BackupAPI.import_record'
with mock.patch(BackupAPI_import) as _mock_backup_import:
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
self.assertTrue(_mock_backup_import.called)
def test_import_record_with_invalid_backup(self):
"""Test error handling when attempting an import of a backup
record where the backup driver returns an exception.
"""
export = self._create_exported_record_entry()
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_record_import_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'import_record'))
imported_record = self._create_export_record_db_entry()
backup_hosts = []
with mock.patch(_mock_record_import_class) as _mock_record_import:
_mock_record_import.side_effect = FakeBackupException('fake')
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
self.assertTrue(_mock_record_import.called)
backup = db.backup_get(self.ctxt, imported_record)
self.assertEqual(backup['status'], 'error')
class BackupTestCaseWithVerify(BaseBackupTest):
"""Test Case for backups."""
def setUp(self):
self.override_config("backup_driver",
"cinder.tests.backup.fake_service_with_verify")
super(BackupTestCaseWithVerify, self).setUp()
def test_import_record_with_verify(self):
"""Test normal backup record import.
Test the case when import succeeds for the case that the
driver implements verify.
"""
vol_size = 1
export = self._create_exported_record_entry(vol_size=vol_size)
imported_record = self._create_export_record_db_entry()
backup_hosts = []
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
with mock.patch(_mock_backup_verify_class):
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
backup = db.backup_get(self.ctxt, imported_record)
self.assertEqual(backup['status'], 'available')
self.assertEqual(backup['size'], vol_size)
def test_import_record_with_verify_invalid_backup(self):
"""Test error handling when attempting an import of a backup
record where the backup driver returns an exception.
"""
vol_size = 1
export = self._create_exported_record_entry(vol_size=vol_size)
imported_record = self._create_export_record_db_entry()
backup_hosts = []
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
with mock.patch(_mock_backup_verify_class) as _mock_record_verify:
_mock_record_verify.side_effect = \
exception.InvalidBackup(reason='fake')
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
self.assertTrue(_mock_record_verify.called)
backup = db.backup_get(self.ctxt, imported_record)
self.assertEqual(backup['status'], 'error')
def test_backup_reset_status_from_nonrestoring_to_available(
self):
vol_id = self._create_volume_db_entry(status='available',
size=1)
backup_id = self._create_backup_db_entry(status='error',
volume_id=vol_id)
with mock.patch.object(manager.BackupManager,
'_map_service_to_driver') as \
mock_map_service_to_driver:
mock_map_service_to_driver.return_value = \
get_backup_driver(self.ctxt)
self.backup_mgr.reset_status(self.ctxt,
backup_id,
'available')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
def test_backup_reset_status_to_available_invalid_backup(self):
volume = db.volume_create(self.ctxt, {'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = db.backup_create(self.ctxt,
{'status': 'error',
'service':
CONF.backup_driver,
'volume_id': volume['id']})
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
with mock.patch(_mock_backup_verify_class) as \
_mock_record_verify:
_mock_record_verify.side_effect = \
exception.BackupVerifyUnsupportedDriver(reason='fake')
self.assertRaises(exception.BackupVerifyUnsupportedDriver,
self.backup_mgr.reset_status,
self.ctxt,
backup['id'],
'available')
backup = db.backup_get(self.ctxt, backup['id'])
self.assertEqual(backup['status'], 'error')
def test_backup_reset_status_from_restoring_to_available(self):
volume = db.volume_create(self.ctxt,
{'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = db.backup_create(self.ctxt,
{'status': 'restoring',
'service':
CONF.backup_driver,
'volume_id': volume['id']})
self.backup_mgr.reset_status(self.ctxt,
backup['id'],
'available')
backup = db.backup_get(self.ctxt, backup['id'])
self.assertEqual(backup['status'], 'available')
def test_backup_reset_status_to_error(self):
volume = db.volume_create(self.ctxt,
{'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = db.backup_create(self.ctxt,
{'status': 'creating',
'service':
CONF.backup_driver,
'volume_id': volume['id']})
self.backup_mgr.reset_status(self.ctxt,
backup['id'],
'error')
backup = db.backup_get(self.ctxt, backup['id'])
self.assertEqual(backup['status'], 'error')
| apache-2.0 |
vuolter/pyload | src/pyload/core/managers/plugin_manager.py | 1 | 17073 | # -*- coding: utf-8 -*-
import importlib
import os
import re
import sys
from ast import literal_eval
from itertools import chain
# import semver
from pyload import APPID, PKGDIR
class PluginManager:
ROOT = "pyload.plugins."
USERROOT = "plugins."
TYPES = (
"decrypter",
"container",
"downloader",
"anticaptcha",
"extractor",
"account",
"addon",
"base",
)
_PATTERN = re.compile(r'\s*__pattern__\s*=\s*r?(?:"|\')([^"\']+)')
_VERSION = re.compile(r'\s*__version__\s*=\s*(?:"|\')([\d.]+)')
# _PYLOAD_VERSION = re.compile(r'\s*__pyload_version__\s*=\s*(?:"|\')([\d.]+)')
_CONFIG = re.compile(r"\s*__config__\s*=\s*(\[[^\]]+\])", re.MULTILINE)
_DESC = re.compile(r'\s*__description__\s*=\s*(?:"|"""|\')([^"\']+)', re.MULTILINE)
def __init__(self, core):
self.pyload = core
self._ = core._
self.plugins = {}
self.create_index()
# save generated config
self.pyload.config.save_config(self.pyload.config.plugin, self.pyload.config.pluginpath)
# register for import addon
sys.meta_path.append(self)
def create_index(self):
"""
create information for all plugins available.
"""
def merge(dst, src, overwrite=False):
"""
merge dict of dicts.
"""
for name in src:
if name in dst:
if overwrite:
dst[name].update(src[name])
else:
for k in set(src[name].keys()) - set(dst[name].keys()):
dst[name][k] = src[name][k]
else:
dst[name] = src[name]
self.pyload.log.debug("Indexing plugins...")
sys.path.append(os.path.join(self.pyload.userdir, "plugins"))
userplugins_dir = os.path.join(self.pyload.userdir, "plugins")
os.makedirs(userplugins_dir, exist_ok=True)
try:
fp = open(os.path.join(userplugins_dir, "__init__.py"), mode="wb")
fp.close()
except Exception:
pass
self.crypter_plugins, config = self.parse("decrypters", pattern=True)
self.plugins["decrypter"] = self.crypter_plugins
default_config = config
self.container_plugins, config = self.parse("containers", pattern=True)
self.plugins["container"] = self.container_plugins
merge(default_config, config)
self.hoster_plugins, config = self.parse("downloaders", pattern=True)
self.plugins["downloader"] = self.hoster_plugins
merge(default_config, config)
self.addon_plugins, config = self.parse("addons")
self.plugins["addon"] = self.addon_plugins
merge(default_config, config)
self.captcha_plugins, config = self.parse("anticaptchas")
self.plugins["anticaptcha"] = self.captcha_plugins
merge(default_config, config)
self.extract_plugins, config = self.parse("extractors")
self.plugins["extractor"] = self.extract_plugins
merge(default_config, config)
self.account_plugins, config = self.parse("accounts")
self.plugins["account"] = self.account_plugins
merge(default_config, config)
self.internal_plugins, config = self.parse("base")
self.plugins["base"] = self.internal_plugins
merge(default_config, config)
for name, config in default_config.items():
desc = config.pop("desc", "")
config = [[k] + list(v) for k, v in config.items()]
try:
self.pyload.config.add_plugin_config(name, config, desc)
except Exception as exc:
self.pyload.log.error(
self._("Invalid config in {}: {}").format(name, config),
exc,
exc_info=self.pyload.debug > 1,
stack_info=self.pyload.debug > 2,
)
def parse(self, folder, pattern=False, home={}):
"""
returns dict with information
home contains parsed plugins from pyload.
{
name : {path, version, config, (pattern, re), (plugin, class)}
}
"""
plugins = {}
if home:
pfolder = os.path.join(self.pyload.userdir, "plugins", folder)
os.makedirs(pfolder, exist_ok=True)
try:
fp = open(os.path.join(pfolder, "__init__.py"), mode="wb")
fp.close()
except Exception:
pass
else:
pfolder = os.path.join(PKGDIR, "plugins", folder)
configs = {}
for entry in os.listdir(pfolder):
if (
os.path.isfile(os.path.join(pfolder, entry)) and entry.endswith(".py")
) and not entry.startswith("_"):
with open(os.path.join(pfolder, entry)) as data:
content = data.read()
name = entry[:-3]
if name[-1] == ".":
name = name[:-4]
# m_pyver = self._PYLOAD_VERSION.search(content)
# if m_pyver is None:
# self.pyload.log.debug(
# f"__pyload_version__ not found in plugin {name}"
# )
# else:
# pyload_version = m_pyver.group(1)
# requires_version = f"{pyload_version}.0"
# requires_version_info = semver.parse_version_info(requires_version)
# if self.pyload.version_info.major:
# core_version = self.pyload.version_info.major
# plugin_version = requires_version_info.major
# else:
# core_version = self.pyload.version_info.minor
# plugin_version = requires_version_info.minor
# if core_version > plugin_version:
# self.pyload.log.warning(
# self._(
# "Plugin {} not compatible with current pyLoad version"
# ).format(name)
# )
# continue
m_ver = self._VERSION.search(content)
if m_ver is None:
self.pyload.log.debug(f"__version__ not found in plugin {name}")
version = 0
else:
version = float(m_ver.group(1))
# home contains plugins from pyload root
if isinstance(home, dict) and name in home:
if home[name]["v"] >= version:
continue
plugins[name] = {}
plugins[name]["v"] = version
module = entry.replace(".pyc", "").replace(".py", "")
# the plugin is loaded from user directory
plugins[name]["user"] = True if home else False
plugins[name]["name"] = module
plugins[name]["folder"] = folder
if pattern:
m_pat = self._PATTERN.search(content)
pattern = r"^unmachtable$" if m_pat is None else m_pat.group(1)
plugins[name]["pattern"] = pattern
try:
plugins[name]["re"] = re.compile(pattern)
except Exception:
self.pyload.log.error(
self._("{} has a invalid pattern").format(name)
)
# internals have no config
if folder == "base":
self.pyload.config.delete_config(name)
continue
m_desc = self._DESC.search(content)
desc = "" if m_desc is None else m_desc.group(1)
config = self._CONFIG.findall(content)
if not config:
new_config = {"enabled": ["bool", "Activated", False], "desc": desc}
configs[name] = new_config
continue
config = literal_eval(
config[0].strip().replace("\n", "").replace("\r", "")
)
if isinstance(config, list) and all(
isinstance(c, tuple) for c in config
):
config = {x[0]: x[1:] for x in config}
else:
self.pyload.log.error(
self._("Invalid config in {}: {}").format(name, config)
)
continue
if folder == "addons" and "enabled" not in config:
config["enabled"] = ["bool", "Activated", False]
config["desc"] = desc
configs[name] = config
if not home and folder != "base":
temp_plugins, temp_configs = self.parse(folder, pattern, plugins or True)
plugins.update(temp_plugins)
configs.update(temp_configs)
return plugins, configs
def parse_urls(self, urls):
"""
parse plugins for given list of urls.
"""
last = (None, {})
res = [] #: tupels of (url, plugin)
for url in urls:
if type(url) not in (
str,
bytes,
memoryview,
): #: check memoryview (as py2 byffer)
continue
found = False
# NOTE: E1136: Value 'last' is unsubscriptable (unsubscriptable-object)
if last != (None, {}) and last[1]["re"].match(url):
res.append((url, last[0]))
continue
for name, value in chain(
self.crypter_plugins.items(),
self.hoster_plugins.items(),
self.container_plugins.items(),
):
if value["re"].match(url):
res.append((url, name))
last = (name, value)
found = True
break
if not found:
res.append((url, "DefaultPlugin"))
return res
def find_plugin(self, name, pluginlist=("decrypter", "downloader", "container")):
for ptype in pluginlist:
if name in self.plugins[ptype]:
return self.plugins[ptype][name], ptype
return None, None
def get_plugin(self, name, original=False):
"""
return plugin module from downloader|decrypter|container.
"""
plugin, type = self.find_plugin(name)
if not plugin:
self.pyload.log.warning(self._("Plugin {} not found").format(name))
plugin = self.hoster_plugins["DefaultPlugin"]
if "new_module" in plugin and not original:
return plugin["new_module"]
return self.load_module(type, name)
def get_plugin_name(self, name):
"""
used to obtain new name if other plugin was injected.
"""
plugin, type = self.find_plugin(name)
if "new_name" in plugin:
return plugin["new_name"]
return name
def load_module(self, type, name):
"""
Returns loaded module for plugin.
:param type: plugin type, subfolder of module.plugins
:param name:
"""
plugins = self.plugins[type]
if name in plugins:
if APPID in plugins[name]:
return plugins[name][APPID]
try:
module_name = plugins[name]["name"]
module_folder = plugins[name]["folder"]
module = __import__(
self.ROOT + f"{module_folder}.{module_name}",
globals(),
locals(),
plugins[name]["name"],
)
plugins[name][APPID] = module #: cache import, maybe unneeded
return module
except Exception as exc:
self.pyload.log.error(
self._("Error importing {name}: {msg}").format(name=name, msg=exc),
exc_info=self.pyload.debug > 1,
stack_info=self.pyload.debug > 2,
)
else:
self.pyload.log.debug(f"Plugin {name} not found")
self.pyload.log.debug(f"Available plugins : {plugins}")
def load_class(self, type, name):
"""
Returns the class of a plugin with the same name.
"""
module = self.load_module(type, name)
if module:
return getattr(module, name)
def get_account_plugins(self):
"""
return list of account plugin names.
"""
return list(self.account_plugins.keys())
def find_module(self, fullname, path=None):
# redirecting imports if necesarry
if fullname.startswith(self.ROOT) or fullname.startswith(
self.USERROOT
): #: os.seperate pyload plugins
if fullname.startswith(self.USERROOT):
user = 1
else:
user = 0 #: used as bool and int
split = fullname.split(".")
if len(split) != 4 - user:
return
type, name = split[2 - user : 4 - user]
if type in self.plugins and name in self.plugins[type]:
# userplugin is a newer version
if not user and self.plugins[type][name]["user"]:
return self
# imported from userdir, but pyloads is newer
if user and not self.plugins[type][name]["user"]:
return self
def reload_plugins(self, type_plugins):
"""
reloads and reindexes plugins.
"""
def merge(dst, src, overwrite=False):
"""
merge dict of dicts.
"""
for name in src:
if name in dst:
if overwrite:
dst[name].update(src[name])
else:
for k in set(src[name].keys()) - set(dst[name].keys()):
dst[name][k] = src[name][k]
else:
dst[name] = src[name]
if not type_plugins:
return False
self.pyload.log.debug(f"Request reload of plugins: {type_plugins}")
as_dict = {}
for t, n in type_plugins:
if t in as_dict:
as_dict[t].append(n)
else:
as_dict[t] = [n]
# we do not reload addons or internals, would cause to much side effects
if "addon" in as_dict or "base" in as_dict:
return False
for type in as_dict.keys():
for plugin in as_dict[type]:
if plugin in self.plugins[type]:
if APPID in self.plugins[type][plugin]:
self.pyload.log.debug(f"Reloading {plugin}")
importlib.reload(self.plugins[type][plugin][APPID])
# index creation
self.crypter_plugins, config = self.parse("decrypters", pattern=True)
self.plugins["decrypter"] = self.crypter_plugins
default_config = config
self.container_plugins, config = self.parse("containers", pattern=True)
self.plugins["container"] = self.container_plugins
merge(default_config, config)
self.hoster_plugins, config = self.parse("downloaders", pattern=True)
self.plugins["downloader"] = self.hoster_plugins
merge(default_config, config)
temp, config = self.parse("addons")
merge(default_config, config)
self.captcha_plugins, config = self.parse("anticaptchas")
self.plugins["anticaptcha"] = self.captcha_plugins
merge(default_config, config)
self.extract_plugins, config = self.parse("extractors")
self.plugins["extractor"] = self.extract_plugins
merge(default_config, config)
self.account_plugins, config = self.parse("accounts")
self.plugins["account"] = self.account_plugins
merge(default_config, config)
for name, config in default_config.items():
desc = config.pop("desc", "")
config = [[k] + list(v) for k, v in config.items()]
try:
self.pyload.config.add_plugin_config(name, config, desc)
except Exception:
self.pyload.log.error(
self._("Invalid config in {}: {}").format(name, config),
exc_info=self.pyload.debug > 1,
stack_info=self.pyload.debug > 2,
)
if "account" in as_dict: #: accounts needs to be reloaded
self.pyload.account_manager.init_plugins()
self.pyload.scheduler.add_job(
0, self.pyload.account_manager.get_account_infos
)
return True
| agpl-3.0 |
nathanaelle/novm | novm/block.py | 3 | 1307 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Block device functions.
"""
import os
from . import virtio
from . import utils
class Disk(virtio.Driver):
""" A Virtio block device. """
virtio_driver = "block"
def create(self,
index=0,
filename=None,
dev=None,
**kwargs):
if filename is None:
filename = "/dev/null"
if dev is None:
dev = "vd" + chr(ord("a") + index)
# Open the device.
f = open(filename, 'r+b')
fd = os.dup(f.fileno())
utils.clear_cloexec(fd)
return super(Disk, self).create(data={
"dev": dev,
"fd": fd,
}, **kwargs)
virtio.Driver.register(Disk)
| apache-2.0 |
ibc/MediaSoup | worker/deps/gyp/test/mac/gyptest-xctest.py | 18 | 1262 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that xctest targets are correctly configured.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['xcode'])
# This test appears to be flaky.
test.skip_test() # bug=531
# Ignore this test if Xcode 5 is not installed
import subprocess
job = subprocess.Popen(['xcodebuild', '-version'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = job.communicate()
if job.returncode != 0:
raise Exception('Error %d running xcodebuild' % job.returncode)
xcode_version, build_number = out.splitlines()
# Convert the version string from 'Xcode 5.0' to ['5','0'].
xcode_version = xcode_version.split()[-1].split('.')
if xcode_version < ['5']:
test.pass_test()
CHDIR = 'xctest'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', chdir=CHDIR, arguments=['-scheme', 'classes', 'test'])
test.built_file_must_match('tests.xctest/Contents/Resources/resource.txt',
'foo\n', chdir=CHDIR)
test.pass_test()
| isc |
intelie/pycollector | src/third/SimpleCV/Shell/Example.py | 2 | 1264 | #!/usr/bin/python
import os
import glob
from SimpleCV import *
def listFiles():
files = []
for dirname, dirnames, filenames in os.walk('./SimpleCV/examples'):
for subdirname in dirnames:
#print os.path.join(dirname, subdirname)
filenames = [ fi for fi in filenames if fi.endswith(".py") ]
for filename in filenames:
#print os.path.join(dirname, filename)
files.append(filename.replace(".py",""))
return files
def magic_examples(self, arg):
HOMEDIR = os.getcwd()
files = listFiles()
if(arg.lower() == "list"):
for file in files:
print file
elif(arg == ""):
print "To use examples type:"
print "example name"
print ""
print "to see which examples are available type:"
print "example list"
print ""
elif(arg in files):
os.chdir("./SimpleCV/examples")
try:
__import__(arg)
except ImportError:
print "Error: can't run example: " + arg
os.chdir(HOMEDIR)
elif(arg.lower() == "joshua"):
print "GREETINGS PROFESSOR FALKEN"
print ""
print "HELLO"
print ""
print "A STRANGE GAME."
print "THE ONLY WINNING MOVE IS"
print "NOT TO PLAY."
print ""
print "HOW ABOUT A NICE GAME OF CHESS?"
print ""
else:
print "Example: " + arg + " does not exist, or an error occured"
| bsd-3-clause |
DanCech/graphite-web | webapp/graphite/url_shortener/views.py | 9 | 1045 | try:
from django.urls import reverse
except ImportError: # Django < 1.10
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.http import HttpResponse, HttpResponsePermanentRedirect
from graphite.url_shortener.baseconv import base62
from graphite.url_shortener.models import Link
import re
def follow(request, link_id):
"""Follow existing links"""
key = base62.to_decimal(link_id)
link = get_object_or_404(Link, pk=key)
return HttpResponsePermanentRedirect(reverse('browser') + link.url)
def shorten(request, path):
if request.META.get('QUERY_STRING', None):
path += '?' + request.META['QUERY_STRING']
# Remove _salt, _dc and _uniq to avoid creating many copies of the same URL
path = re.sub('&_(uniq|salt|dc)=[0-9.]+', "", path)
link, created = Link.objects.get_or_create(url=path)
link_id = base62.from_decimal(link.id)
url = reverse('follow', kwargs={'link_id': link_id})
return HttpResponse(url, content_type='text/plain')
| apache-2.0 |
oppia/oppia | core/domain/platform_parameter_domain.py | 2 | 29698 | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for platform parameters."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import json
import re
from constants import constants
from core.domain import change_domain
import feconf
import python_utils
import utils
SERVER_MODES = python_utils.create_enum('dev', 'test', 'prod') # pylint: disable=invalid-name
FEATURE_STAGES = SERVER_MODES # pylint: disable=invalid-name
DATA_TYPES = python_utils.create_enum('bool', 'string', 'number') # pylint: disable=invalid-name
ALLOWED_SERVER_MODES = [
SERVER_MODES.dev, SERVER_MODES.test, SERVER_MODES.prod]
ALLOWED_FEATURE_STAGES = [
FEATURE_STAGES.dev, FEATURE_STAGES.test, FEATURE_STAGES.prod]
ALLOWED_PLATFORM_TYPES = constants.PLATFORM_PARAMETER_ALLOWED_PLATFORM_TYPES
ALLOWED_BROWSER_TYPES = constants.PLATFORM_PARAMETER_ALLOWED_BROWSER_TYPES
ALLOWED_APP_VERSION_FLAVORS = (
constants.PLATFORM_PARAMETER_ALLOWED_APP_VERSION_FLAVORS)
APP_VERSION_WITH_HASH_REGEXP = re.compile(
constants.PLATFORM_PARAMETER_APP_VERSION_WITH_HASH_REGEXP)
APP_VERSION_WITHOUT_HASH_REGEXP = re.compile(
constants.PLATFORM_PARAMETER_APP_VERSION_WITHOUT_HASH_REGEXP)
class PlatformParameterChange(change_domain.BaseChange):
"""Domain object for changes made to a platform parameter object.
The allowed commands, together with the attributes:
- 'edit_rules' (with new_rules)
"""
CMD_EDIT_RULES = 'edit_rules'
ALLOWED_COMMANDS = [{
'name': CMD_EDIT_RULES,
'required_attribute_names': ['new_rules'],
'optional_attribute_names': [],
'user_id_attribute_names': []
}]
class EvaluationContext(python_utils.OBJECT):
"""Domain object representing the context for parameter evaluation."""
def __init__(
self, platform_type, browser_type, app_version, server_mode):
self._platform_type = platform_type
self._browser_type = browser_type
self._app_version = app_version
self._server_mode = server_mode
@property
def platform_type(self):
"""Returns platform type.
Returns:
str. The platform type, e.g. 'Web', 'Android', 'Backend'.
"""
return self._platform_type
@property
def browser_type(self):
"""Returns client browser type.
Returns:
str|None. The client browser type, e.g. 'Chrome', 'FireFox',
'Edge'. None if the platform type is not Web.
"""
return self._browser_type
@property
def app_version(self):
# TODO(#11208): Update the documentation below to reflect the change
# when the GAE app version is used for web & backend.
"""Returns client application version.
Returns:
str|None. The version of native application, e.g. '1.0.0',
None if the platform type is Web.
"""
return self._app_version
@property
def server_mode(self):
"""Returns the server mode of Oppia.
Returns:
Enum(SERVER_MODES). The the server mode of Oppia,
must be one of the following: dev, test, prod.
"""
return self._server_mode
@property
def is_valid(self):
"""Returns whether this context object is valid for evaluating
parameters. An invalid context object usually indicates that one of the
object's required fields is missing or an unexpected value. Note that
objects which are not valid will still pass validation. This method
should return true and validate() should not raise an exception before
using this object for platform evaluation.
Returns:
bool. Whether this context object can be used for evaluating
parameters.
"""
return (
self._platform_type is not None and
self._platform_type in ALLOWED_PLATFORM_TYPES)
def validate(self):
"""Validates the EvaluationContext domain object, raising an exception
if the object is in an irrecoverable error state.
"""
if (
self._browser_type is not None and
self._browser_type not in ALLOWED_BROWSER_TYPES):
raise utils.ValidationError(
'Invalid browser type \'%s\', must be one of %s.' % (
self._browser_type, ALLOWED_BROWSER_TYPES))
if self._app_version is not None:
match = APP_VERSION_WITH_HASH_REGEXP.match(self._app_version)
if match is None:
raise utils.ValidationError(
'Invalid version \'%s\', expected to match regexp %s.' % (
self._app_version, APP_VERSION_WITH_HASH_REGEXP))
elif (
match.group(2) is not None and
match.group(2) not in ALLOWED_APP_VERSION_FLAVORS):
raise utils.ValidationError(
'Invalid version flavor \'%s\', must be one of %s if'
' specified.' % (
match.group(2), ALLOWED_APP_VERSION_FLAVORS))
if self._server_mode not in ALLOWED_SERVER_MODES:
raise utils.ValidationError(
'Invalid server mode \'%s\', must be one of %s.' % (
self._server_mode, ALLOWED_SERVER_MODES))
@classmethod
def from_dict(cls, client_context_dict, server_context_dict):
"""Creates a new EvaluationContext object by combining both client side
and server side context.
Args:
client_context_dict: dict. The client side context.
server_context_dict: dict. The server side context.
Returns:
EvaluationContext. The corresponding EvaluationContext domain
object.
"""
return cls(
client_context_dict.get('platform_type'),
client_context_dict.get('browser_type'),
client_context_dict.get('app_version'),
server_context_dict.get('server_mode'),
)
class PlatformParameterFilter(python_utils.OBJECT):
"""Domain object for filters in platform parameters."""
SUPPORTED_FILTER_TYPES = [
'server_mode', 'platform_type', 'browser_type', 'app_version',
'app_version_flavor',
]
SUPPORTED_OP_FOR_FILTERS = {
'server_mode': ['='],
'platform_type': ['='],
'browser_type': ['='],
'app_version_flavor': ['=', '<', '<=', '>', '>='],
'app_version': ['=', '<', '<=', '>', '>='],
}
def __init__(self, filter_type, conditions):
self._type = filter_type
self._conditions = conditions
@property
def type(self):
"""Returns filter type.
Returns:
str. The filter type.
"""
return self._type
@property
def conditions(self):
"""Returns filter conditions.
Returns:
list((str, str)). The filter conditions. Each element of the list
is a 2-tuple (op, value), where op is the operator for comparison,
value is the value used for comparison.
"""
return self._conditions
def evaluate(self, context):
"""Tries to match the given context with the filter against its
value(s).
Args:
context: EvaluationContext. The context for evaluation.
Returns:
bool. True if the filter is matched.
"""
return any(
self._evaluate_single_value(op, value, context)
for op, value in self._conditions
)
def _evaluate_single_value(self, op, value, context):
"""Tries to match the given context with the filter against the
given value.
Args:
op: str. The operator for comparison, e.g. '='.
value: str. The value to match against.
context: EvaluationContext. The context for evaluation.
Returns:
bool. True if the filter is matched.
"""
if op not in self.SUPPORTED_OP_FOR_FILTERS[self._type]:
raise Exception(
'Unsupported comparison operator \'%s\' for %s filter, '
'expected one of %s.' % (
op, self._type, self.SUPPORTED_OP_FOR_FILTERS[self._type]))
matched = False
if self._type == 'server_mode' and op == '=':
matched = context.server_mode.value == value
elif self._type == 'platform_type' and op == '=':
matched = context.platform_type == value
elif self._type == 'browser_type' and op == '=':
matched = context.browser_type == value
elif self._type == 'app_version_flavor':
matched = self._match_version_flavor(op, value, context.app_version)
elif self._type == 'app_version':
matched = self._match_version_expression(
op, value, context.app_version)
return matched
def validate(self):
"""Validates the PlatformParameterFilter domain object."""
if self._type not in self.SUPPORTED_FILTER_TYPES:
raise utils.ValidationError(
'Unsupported filter type \'%s\'' % self._type)
for op, _ in self._conditions:
if op not in self.SUPPORTED_OP_FOR_FILTERS[self._type]:
raise utils.ValidationError(
'Unsupported comparison operator \'%s\' for %s filter, '
'expected one of %s.' % (
op, self._type,
self.SUPPORTED_OP_FOR_FILTERS[self._type]))
if self._type == 'server_mode':
for _, mode in self._conditions:
if not any([mode == server_mode.value
for server_mode in ALLOWED_SERVER_MODES]):
raise utils.ValidationError(
'Invalid server mode \'%s\', must be one of %s.' % (
mode, ALLOWED_SERVER_MODES))
elif self._type == 'platform_type':
for _, platform_type in self._conditions:
if platform_type not in ALLOWED_PLATFORM_TYPES:
raise utils.ValidationError(
'Invalid platform type \'%s\', must be one of %s.' % (
platform_type, ALLOWED_PLATFORM_TYPES))
elif self._type == 'app_version_flavor':
for _, flavor in self._conditions:
if flavor not in ALLOWED_APP_VERSION_FLAVORS:
raise utils.ValidationError(
'Invalid app version flavor \'%s\', must be one of'
' %s.' % (flavor, ALLOWED_APP_VERSION_FLAVORS))
elif self._type == 'app_version':
for _, version in self._conditions:
if not APP_VERSION_WITHOUT_HASH_REGEXP.match(version):
raise utils.ValidationError(
'Invalid version expression \'%s\', expected to match'
'regexp %s.' % (
version, APP_VERSION_WITHOUT_HASH_REGEXP))
def to_dict(self):
"""Returns a dict representation of the PlatformParameterFilter domain
object.
Returns:
dict. A dict mapping of all fields of PlatformParameterFilter
object.
"""
return {
'type': self._type,
'conditions': self._conditions,
}
@classmethod
def from_dict(cls, filter_dict):
"""Returns an PlatformParameterFilter object from a dict.
Args:
filter_dict: dict. A dict mapping of all fields of
PlatformParameterFilter object.
Returns:
PlatformParameterFilter. The corresponding PlatformParameterFilter
domain object.
"""
return cls(filter_dict['type'], filter_dict['conditions'])
def _match_version_expression(self, op, value, client_version):
"""Tries to match the version expression against the client version.
Args:
op: str. The operator for comparison, e.g. '=', '>'.
value: str. The version for comparison, e.g. '1.0.1'.
client_version: str|None. The client version, e.g. '1.0.1-3aebf3h'.
Returns:
bool. True if the expression matches the version.
"""
if client_version is None:
return False
match = APP_VERSION_WITH_HASH_REGEXP.match(client_version)
client_version_without_hash = match.group(1)
is_equal = value == client_version_without_hash
is_client_version_smaller = self._is_first_version_smaller(
client_version_without_hash, value)
is_client_version_larger = self._is_first_version_smaller(
value, client_version_without_hash
)
if op == '=':
return is_equal
elif op == '<':
return is_client_version_smaller
elif op == '<=':
return is_equal or is_client_version_smaller
elif op == '>':
return is_client_version_larger
elif op == '>=':
return is_equal or is_client_version_larger
def _is_first_version_smaller(self, version_a, version_b):
"""Compares two version strings, return True if the first version is
smaller.
Args:
version_a: str. The version string (e.g. '1.0.0').
version_b: str. The version string (e.g. '1.0.0').
Returns:
bool. True if the first version is smaller.
"""
version_a = version_a.split('.')
version_b = version_b.split('.')
for sub_version_a, sub_version_b in python_utils.ZIP(
version_a, version_b):
if int(sub_version_a) < int(sub_version_b):
return True
elif int(sub_version_a) > int(sub_version_b):
return False
return False
def _match_version_flavor(self, op, flavor, client_version):
"""Matches the client version flavor.
Args:
op: str. The operator for comparison, e.g. '=', '>'.
flavor: str. The flavor to match, e.g. 'alpha', 'beta', 'test',
'release'.
client_version: str. The version of the client, given in the form
of '<version>-<hash>-<flavor>'. The hash and flavor of client
version is optional, but if absent, no flavor filter will
match to it.
Returns:
bool. True is the client_version matches the given flavor using
the operator.
"""
match = APP_VERSION_WITH_HASH_REGEXP.match(client_version)
client_flavor = match.group(2)
# An unspecified client flavor means no flavor-based filters should
# match to it.
if client_flavor is None:
return False
is_equal = flavor == client_flavor
is_client_flavor_smaller = self._is_first_flavor_smaller(
client_flavor, flavor)
is_client_flavor_larger = self._is_first_flavor_smaller(
flavor, client_flavor)
if op == '=':
return is_equal
elif op == '<':
return is_client_flavor_smaller
elif op == '<=':
return is_equal or is_client_flavor_smaller
elif op == '>':
return is_client_flavor_larger
elif op == '>=':
return is_equal or is_client_flavor_larger
def _is_first_flavor_smaller(self, flavor_a, flavor_b):
"""Compares two version flavors, return True if the first version is
smaller in the following ordering:
'test' < 'alpha' < 'beta' < 'release'.
Args:
flavor_a: str. The version flavor.
flavor_b: str. The version flavor.
Returns:
bool. True if the first flavor is smaller.
"""
return (
ALLOWED_APP_VERSION_FLAVORS.index(flavor_a) <
ALLOWED_APP_VERSION_FLAVORS.index(flavor_b)
)
class PlatformParameterRule(python_utils.OBJECT):
"""Domain object for rules in platform parameters."""
def __init__(self, filters, value_when_matched):
self._filters = filters
self._value_when_matched = value_when_matched
@property
def filters(self):
"""Returns the filters of the rule.
Returns:
list(PlatformParameterFilter). The filters of the rule.
"""
return self._filters
@property
def value_when_matched(self):
"""Returns the value outcome if this rule is matched.
Returns:
*. The value outcome.
"""
return self._value_when_matched
def evaluate(self, context):
"""Tries to match the given context with the rule against its filter(s).
A rule is matched when all its filters are matched.
Args:
context: EvaluationContext. The context for evaluation.
Returns:
bool. True if the rule is matched.
"""
return all(
filter_domain.evaluate(context)
for filter_domain in self._filters)
def has_server_mode_filter(self):
"""Checks if the rule has a filter with type 'server_mode'.
Returns:
bool. True if the rule has a filter with type 'server_mode'.
"""
return any(
filter_domain.type == 'server_mode'
for filter_domain in self._filters)
def to_dict(self):
"""Returns a dict representation of the PlatformParameterRule domain
object.
Returns:
dict. A dict mapping of all fields of PlatformParameterRule
object.
"""
return {
'filters': [
filter_domain.to_dict() for filter_domain in self._filters],
'value_when_matched': self._value_when_matched,
}
def validate(self):
"""Validates the PlatformParameterRule domain object."""
for filter_domain_object in self._filters:
filter_domain_object.validate()
@classmethod
def from_dict(cls, rule_dict):
"""Returns an PlatformParameterRule object from a dict.
Args:
rule_dict: dict. A dict mapping of all fields of
PlatformParameterRule object.
Returns:
PlatformParameterRule. The corresponding PlatformParameterRule
domain object.
"""
return cls(
[
PlatformParameterFilter.from_dict(filter_dict)
for filter_dict in rule_dict['filters']],
rule_dict['value_when_matched'],
)
class PlatformParameter(python_utils.OBJECT):
"""Domain object for platform parameters."""
DATA_TYPE_PREDICATES_DICT = {
DATA_TYPES.bool.value: lambda x: isinstance(x, bool),
DATA_TYPES.string.value: (
lambda x: isinstance(x, python_utils.BASESTRING)),
DATA_TYPES.number.value: lambda x: isinstance(x, (float, int)),
}
PARAMETER_NAME_REGEXP = r'^[A-Za-z0-9_]{1,100}$'
def __init__(
self, name, description, data_type, rules,
rule_schema_version, default_value, is_feature, feature_stage):
self._name = name
self._description = description
self._data_type = data_type
self._rules = rules
self._rule_schema_version = rule_schema_version
self._default_value = default_value
self._is_feature = is_feature
self._feature_stage = feature_stage
@property
def name(self):
"""Returns the name of the platform parameter.
Returns:
str. The name of the platform parameter.
"""
return self._name
@property
def description(self):
"""Returns the description of the platform parameter.
Returns:
str. The description of the platform parameter.
"""
return self._description
@property
def data_type(self):
"""Returns the data type of the platform parameter.
Returns:
DATA_TYPES. The data type of the platform parameter.
"""
return self._data_type
@property
def rules(self):
"""Returns the rules of the platform parameter.
Returns:
list(PlatformParameterRules). The rules of the platform parameter.
"""
return self._rules
def set_rules(self, new_rules):
"""Sets the rules of the PlatformParameter.
Args:
new_rules: list(PlatformParameterRules). The new rules of the
parameter.
"""
self._rules = new_rules
@property
def rule_schema_version(self):
"""Returns the schema version of the rules.
Returns:
int. The schema version of the rules.
"""
return self._rule_schema_version
@property
def default_value(self):
"""Returns the default value of the platform parameter.
Returns:
*. The default value of the platform parameter.
"""
return self._default_value
@property
def is_feature(self):
"""Returns whether this parameter is also a feature flag.
Returns:
bool. True if the parameter is a feature flag.
"""
return self._is_feature
@property
def feature_stage(self):
"""Returns the stage of the feature flag.
Returns:
FEATURE_STAGES|None. The stage of the feature flag, None if the
parameter isn't a feature flag.
"""
return self._feature_stage
def validate(self):
"""Validates the PlatformParameter domain object."""
if re.match(self.PARAMETER_NAME_REGEXP, self._name) is None:
raise utils.ValidationError(
'Invalid parameter name \'%s\', expected to match regexp '
'%s.' % (self._name, self.PARAMETER_NAME_REGEXP))
if self._data_type not in self.DATA_TYPE_PREDICATES_DICT:
raise utils.ValidationError(
'Unsupported data type \'%s\'.' % self._data_type)
predicate = self.DATA_TYPE_PREDICATES_DICT[self.data_type]
if not predicate(self._default_value):
raise utils.ValidationError(
'Expected %s, received \'%s\' in default value.' % (
self._data_type, self._default_value))
for rule in self._rules:
if not predicate(rule.value_when_matched):
raise utils.ValidationError(
'Expected %s, received \'%s\' in value_when_matched.' % (
self._data_type, rule.value_when_matched))
if not rule.has_server_mode_filter():
raise utils.ValidationError(
'All rules must have a server_mode filter.')
rule.validate()
if self._is_feature:
self._validate_feature_flag()
def evaluate(self, context):
"""Evaluates the value of the platform parameter in the given context.
The value of first matched rule is returned as the result.
Note that if the provided context is in an invalid state (e.g. its
is_valid property returns false) then this parameter will defer to its
default value since it may not be safe to partially evaluate the
parameter for an unrecognized or partially recognized context.
Args:
context: EvaluationContext. The context for evaluation.
Returns:
*. The evaluate result of the platform parameter.
"""
if context.is_valid:
for rule in self._rules:
if rule.evaluate(context):
return rule.value_when_matched
return self._default_value
def to_dict(self):
"""Returns a dict representation of the PlatformParameter domain
object.
Returns:
dict. A dict mapping of all fields of PlatformParameter object.
"""
return {
'name': self._name,
'description': self._description,
'data_type': self._data_type,
'rules': [rule.to_dict() for rule in self._rules],
'rule_schema_version': self._rule_schema_version,
'default_value': self._default_value,
'is_feature': self._is_feature,
'feature_stage': self._feature_stage
}
def _validate_feature_flag(self):
"""Validates the PlatformParameter domain object that is a feature
flag.
"""
if self._data_type != DATA_TYPES.bool.value:
raise utils.ValidationError(
'Data type of feature flags must be bool, got \'%s\' '
'instead.' % self._data_type)
if not any([self._feature_stage == feature_stage.value
for feature_stage in ALLOWED_FEATURE_STAGES]):
raise utils.ValidationError(
'Invalid feature stage, got \'%s\', expected one of %s.' % (
self._feature_stage, ALLOWED_FEATURE_STAGES))
enabling_rules = [
rule for rule in self._rules if rule.value_when_matched]
for rule in enabling_rules:
server_mode_filters = [
server_mode_filter for server_mode_filter in rule.filters
if server_mode_filter.type == 'server_mode']
for server_mode_filter in server_mode_filters:
server_modes = [
value for _, value in server_mode_filter.conditions]
if self._feature_stage == FEATURE_STAGES.dev.value:
if (
SERVER_MODES.test.value in server_modes or
SERVER_MODES.prod.value in server_modes):
raise utils.ValidationError(
'Feature in dev stage cannot be enabled in test or'
' production environments.')
elif self._feature_stage == FEATURE_STAGES.test.value:
if SERVER_MODES.prod.value in server_modes:
raise utils.ValidationError(
'Feature in test stage cannot be enabled in '
'production environment.')
@classmethod
def from_dict(cls, param_dict):
"""Returns an PlatformParameter object from a dict.
Args:
param_dict: dict. A dict mapping of all fields of
PlatformParameter object.
Returns:
PlatformParameter. The corresponding PlatformParameter domain
object.
"""
if (param_dict['rule_schema_version'] !=
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION):
# NOTE: When there's a new rule schema version, a new method with
# name of the form '_convert_rule_v1_dict_to_v2_dict` should be
# added to the class and called here to convert the rule dicts to
# the latest schema.
raise Exception(
'Current platform parameter rule schema version is v%s, '
'received v%s, and there\'s no convert method from v%s to '
'v%s.' % (
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION,
param_dict['rule_schema_version'],
feconf.CURRENT_PLATFORM_PARAMETER_RULE_SCHEMA_VERSION,
param_dict['rule_schema_version']))
return cls(
param_dict['name'],
param_dict['description'],
param_dict['data_type'],
[
PlatformParameterRule.from_dict(rule_dict)
for rule_dict in param_dict['rules']],
param_dict['rule_schema_version'],
param_dict['default_value'],
param_dict['is_feature'],
param_dict['feature_stage'],
)
def serialize(self):
"""Returns the object serialized as a JSON string.
Returns:
str. JSON-encoded string encoding all of the information composing
the object.
"""
platform_parameter_dict = self.to_dict()
return json.dumps(platform_parameter_dict)
@classmethod
def deserialize(cls, json_string):
"""Returns a PlatformParameter domain object decoded from a JSON
string.
Args:
json_string: str. A JSON-encoded string that can be
decoded into a dictionary representing a PlatformParameter.
Only call on strings that were created using serialize().
Returns:
PlatformParameter. The corresponding PlatformParameter domain
object.
"""
platform_parameter_dict = json.loads(json_string)
platform_parameter = cls.from_dict(
platform_parameter_dict)
return platform_parameter
| apache-2.0 |
PythonNut/servo | tests/wpt/css-tests/tools/html5lib/html5lib/inputstream.py | 618 | 30855 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
from six.moves import http_client
import codecs
import re
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import encodings, ReparseException
from . import utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
try:
from io import BufferedIOBase
except ImportError:
class BufferedIOBase(object):
pass
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_re = re.compile("[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True):
if isinstance(source, http_client.HTTPResponse):
# Work around Python bug #20007: read(0) closes the connection.
# http://bugs.python.org/issue20007
isUnicode = False
elif hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
if encoding is not None:
raise TypeError("Cannot explicitly set an encoding with a unicode string")
return HTMLUnicodeInputStream(source)
else:
return HTMLBinaryInputStream(source, encoding, parseMeta, chardet)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Craziness
if len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile("[\uD800-\uDFFF]")
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile("([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
# List of where new lines occur
self.newLines = [0]
self.charEncoding = ("utf-8", "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub("\ufffd", data)
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
self.charEncoding = (codecName(encoding), "certain")
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 512
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
# Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
# Call superclass
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except:
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
# First look for a BOM
# This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
# If there is no BOM need to look for meta elements with encoding
# information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
# Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
try:
from charade.universaldetector import UniversalDetector
except ImportError:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence = "tentative"
encoding = self.defaultEncoding
# Substitute for equivalent encodings:
encodingSub = {"iso-8859-1": "windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = codecName(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, bytes):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding:
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
| mpl-2.0 |
un33k/CouchPotatoServer | couchpotato/core/downloaders/nzbvortex.py | 44 | 8228 | from base64 import b64encode
import os
from uuid import uuid4
import hashlib
import traceback
from requests import HTTPError
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.helpers.encoding import tryUrlencode, sp
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
log = CPLog(__name__)
autoload = 'NZBVortex'
class NZBVortex(DownloaderBase):
protocol = ['nzb']
api_level = None
session_id = None
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
# Send the nzb
try:
nzb_filename = self.createFileName(data, filedata, media, unique_tag = True)
response = self.call('nzb/add', files = {'file': (nzb_filename, filedata, 'application/octet-stream')}, parameters = {
'name': nzb_filename,
'groupname': self.conf('group')
})
if response and response.get('result', '').lower() == 'ok':
return self.downloadReturnId(nzb_filename)
log.error('Something went wrong sending the NZB file. Response: %s', response)
return False
except:
log.error('Something went wrong sending the NZB file: %s', traceback.format_exc())
return False
def test(self):
""" Check if connection works
:return: bool
"""
try:
login_result = self.login()
except:
return False
return login_result
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
raw_statuses = self.call('nzb')
release_downloads = ReleaseDownloadList(self)
for nzb in raw_statuses.get('nzbs', []):
nzb_id = os.path.basename(nzb['nzbFileName'])
if nzb_id in ids:
# Check status
status = 'busy'
if nzb['state'] == 20:
status = 'completed'
elif nzb['state'] in [21, 22, 24]:
status = 'failed'
release_downloads.append({
'temp_id': nzb['id'],
'id': nzb_id,
'name': nzb['uiTitle'],
'status': status,
'original_status': nzb['state'],
'timeleft': -1,
'folder': sp(nzb['destinationPath']),
})
return release_downloads
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', release_download['name'])
try:
self.call('nzb/%s/cancel' % release_download['temp_id'])
except:
log.error('Failed deleting: %s', traceback.format_exc(0))
return False
return True
def login(self):
nonce = self.call('auth/nonce', auth = False).get('authNonce')
cnonce = uuid4().hex
hashed = b64encode(hashlib.sha256('%s:%s:%s' % (nonce, cnonce, self.conf('api_key'))).digest())
params = {
'nonce': nonce,
'cnonce': cnonce,
'hash': hashed
}
login_data = self.call('auth/login', parameters = params, auth = False)
# Save for later
if login_data.get('loginResult') == 'successful':
self.session_id = login_data.get('sessionID')
return True
log.error('Login failed, please check you api-key')
return False
def call(self, call, parameters = None, is_repeat = False, auth = True, *args, **kwargs):
# Login first
if not parameters: parameters = {}
if not self.session_id and auth:
self.login()
# Always add session id to request
if self.session_id:
parameters['sessionid'] = self.session_id
params = tryUrlencode(parameters)
url = cleanHost(self.conf('host')) + 'api/' + call
try:
data = self.getJsonData('%s%s' % (url, '?' + params if params else ''), *args, cache_timeout = 0, show_error = False, **kwargs)
if data:
return data
except HTTPError as e:
sc = e.response.status_code
if sc == 403:
# Try login and do again
if not is_repeat:
self.login()
return self.call(call, parameters = parameters, is_repeat = True, **kwargs)
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
return {}
def getApiLevel(self):
if not self.api_level:
try:
data = self.call('app/apilevel', auth = False)
self.api_level = float(data.get('apilevel'))
except HTTPError as e:
sc = e.response.status_code
if sc == 403:
log.error('This version of NZBVortex isn\'t supported. Please update to 2.8.6 or higher')
else:
log.error('NZBVortex doesn\'t seem to be running or maybe the remote option isn\'t enabled yet: %s', traceback.format_exc(1))
return self.api_level
def isEnabled(self, manual = False, data = None):
if not data: data = {}
return super(NZBVortex, self).isEnabled(manual, data) and self.getApiLevel()
config = [{
'name': 'nzbvortex',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'nzbvortex',
'label': 'NZBVortex',
'description': 'Use <a href="http://www.nzbvortex.com/landing/" target="_blank">NZBVortex</a> to download NZBs.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'nzb',
},
{
'name': 'host',
'default': 'https://localhost:4321',
'description': 'Hostname with port. Usually <strong>https://localhost:4321</strong>',
},
{
'name': 'api_key',
'label': 'Api Key',
},
{
'name': 'group',
'label': 'Group',
'description': 'The group CP places the nzb in. Make sure to create it in NZBVortex.',
},
{
'name': 'manual',
'default': False,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},
],
}
],
}]
| gpl-3.0 |
TFResources/TFLearning | tf_v1.0/test_mnist_softmax.py | 1 | 1696 | import local_mnist as input_data
import tensorflow as tf
from tensorflow.python import debug as tf_debug
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
with tf.Graph().as_default():
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, W) + b
with tf.name_scope('loss'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))
tf.summary.scalar('loss', cross_entropy)
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy, name="minimize")
# Build the summary Tensor based on the TF collection of Summaries.
summary = tf.summary.merge_all()
sess = tf.InteractiveSession()
summary_writer = tf.summary.FileWriter("MNIST_train", graph=sess.graph)
sess.run(tf.global_variables_initializer())
sess = tf_debug.LocalCLIDebugWrapperSession(sess)
for i in range(1000):
batch = mnist.train.next_batch(100)
feed_dict = {x: batch[0], y_: batch[1]}
sess.run(train_step, feed_dict=feed_dict)
# Update the events file.
if i % 500 == 0:
summary_str = sess.run(summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, i)
summary_writer.flush()
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images,
y_: mnist.test.labels}))
| apache-2.0 |
martinky82/abrt | tests/runtests/ureport/fakefaf.py | 3 | 2083 | #!/usr/bin/python3
# Single purpose HTTP server
# - accepts POST of ureport JSON and dumps it to a file
import sys
import json
import http.server as BaseHTTPServer
import cgi
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_POST(self):
# parse form data
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type'],
}
)
self.send_response(202)
self.send_header('Content-Type', 'application/json')
self.send_header('Connection', 'close')
self.end_headers()
if self.path != '/faf/reports/new/':
with open(self.save_ureport, 'wb') as fh:
fh.write('{"invalid_request_path": "%s"}' % self.path)
return
ureport = json.load(form['file'].file)
with open(self.save_ureport, 'wb') as fh:
ureport_dump = json.dumps(ureport, indent=2).encode()
fh.write(ureport_dump)
response = {
'bthash': '691cf824e3e07457156125636e86c50279e29496',
'message': 'https://retrace.fedoraproject.org/faf/reports/6437/\nhttps://bugzilla.redhat.com/show_bug.cgi?id=851210',
'reported_to': [
{
'type': 'url',
'value': 'https://retrace.fedoraproject.org/faf/reports/6437/',
'reporter': 'ABRT Server'
},
{
'type': 'url',
'value': 'https://bugzilla.redhat.com/show_bug.cgi?id=851210',
'reporter': 'Bugzilla'
}
],
'result': True
}
response_dump = json.dumps(response, indent=2).encode()
self.wfile.write(response_dump)
PORT = 12345
print("Serving at port", PORT)
Handler.save_ureport = sys.argv[1] if len(sys.argv) > 1 else 'ureport.json'
httpd = BaseHTTPServer.HTTPServer(("", PORT), Handler)
httpd.serve_forever()
| gpl-2.0 |
vlachoudis/sl4a | python/src/Lib/test/test_getargs2.py | 58 | 11595 | import unittest
from test import test_support
from _testcapi import getargs_keywords
import warnings
warnings.filterwarnings("ignore",
category=DeprecationWarning,
message=".*integer argument expected, got float",
module=__name__)
warnings.filterwarnings("ignore",
category=DeprecationWarning,
message=".*integer argument expected, got float",
module="unittest")
"""
> How about the following counterproposal. This also changes some of
> the other format codes to be a little more regular.
>
> Code C type Range check
>
> b unsigned char 0..UCHAR_MAX
> h signed short SHRT_MIN..SHRT_MAX
> B unsigned char none **
> H unsigned short none **
> k * unsigned long none
> I * unsigned int 0..UINT_MAX
> i int INT_MIN..INT_MAX
> l long LONG_MIN..LONG_MAX
> K * unsigned long long none
> L long long LLONG_MIN..LLONG_MAX
> Notes:
>
> * New format codes.
>
> ** Changed from previous "range-and-a-half" to "none"; the
> range-and-a-half checking wasn't particularly useful.
Plus a C API or two, e.g. PyInt_AsLongMask() ->
unsigned long and PyInt_AsLongLongMask() -> unsigned
long long (if that exists).
"""
LARGE = 0x7FFFFFFF
VERY_LARGE = 0xFF0000121212121212121242L
from _testcapi import UCHAR_MAX, USHRT_MAX, UINT_MAX, ULONG_MAX, INT_MAX, \
INT_MIN, LONG_MIN, LONG_MAX, PY_SSIZE_T_MIN, PY_SSIZE_T_MAX
# fake, they are not defined in Python's header files
LLONG_MAX = 2**63-1
LLONG_MIN = -2**63
ULLONG_MAX = 2**64-1
class Long:
def __int__(self):
return 99L
class Int:
def __int__(self):
return 99
class Unsigned_TestCase(unittest.TestCase):
def test_b(self):
from _testcapi import getargs_b
# b returns 'unsigned char', and does range checking (0 ... UCHAR_MAX)
self.failUnlessEqual(3, getargs_b(3.14))
self.failUnlessEqual(99, getargs_b(Long()))
self.failUnlessEqual(99, getargs_b(Int()))
self.assertRaises(OverflowError, getargs_b, -1)
self.failUnlessEqual(0, getargs_b(0))
self.failUnlessEqual(UCHAR_MAX, getargs_b(UCHAR_MAX))
self.assertRaises(OverflowError, getargs_b, UCHAR_MAX + 1)
self.failUnlessEqual(42, getargs_b(42))
self.failUnlessEqual(42, getargs_b(42L))
self.assertRaises(OverflowError, getargs_b, VERY_LARGE)
def test_B(self):
from _testcapi import getargs_B
# B returns 'unsigned char', no range checking
self.failUnlessEqual(3, getargs_B(3.14))
self.failUnlessEqual(99, getargs_B(Long()))
self.failUnlessEqual(99, getargs_B(Int()))
self.failUnlessEqual(UCHAR_MAX, getargs_B(-1))
self.failUnlessEqual(UCHAR_MAX, getargs_B(-1L))
self.failUnlessEqual(0, getargs_B(0))
self.failUnlessEqual(UCHAR_MAX, getargs_B(UCHAR_MAX))
self.failUnlessEqual(0, getargs_B(UCHAR_MAX+1))
self.failUnlessEqual(42, getargs_B(42))
self.failUnlessEqual(42, getargs_B(42L))
self.failUnlessEqual(UCHAR_MAX & VERY_LARGE, getargs_B(VERY_LARGE))
def test_H(self):
from _testcapi import getargs_H
# H returns 'unsigned short', no range checking
self.failUnlessEqual(3, getargs_H(3.14))
self.failUnlessEqual(99, getargs_H(Long()))
self.failUnlessEqual(99, getargs_H(Int()))
self.failUnlessEqual(USHRT_MAX, getargs_H(-1))
self.failUnlessEqual(0, getargs_H(0))
self.failUnlessEqual(USHRT_MAX, getargs_H(USHRT_MAX))
self.failUnlessEqual(0, getargs_H(USHRT_MAX+1))
self.failUnlessEqual(42, getargs_H(42))
self.failUnlessEqual(42, getargs_H(42L))
self.failUnlessEqual(VERY_LARGE & USHRT_MAX, getargs_H(VERY_LARGE))
def test_I(self):
from _testcapi import getargs_I
# I returns 'unsigned int', no range checking
self.failUnlessEqual(3, getargs_I(3.14))
self.failUnlessEqual(99, getargs_I(Long()))
self.failUnlessEqual(99, getargs_I(Int()))
self.failUnlessEqual(UINT_MAX, getargs_I(-1))
self.failUnlessEqual(0, getargs_I(0))
self.failUnlessEqual(UINT_MAX, getargs_I(UINT_MAX))
self.failUnlessEqual(0, getargs_I(UINT_MAX+1))
self.failUnlessEqual(42, getargs_I(42))
self.failUnlessEqual(42, getargs_I(42L))
self.failUnlessEqual(VERY_LARGE & UINT_MAX, getargs_I(VERY_LARGE))
def test_k(self):
from _testcapi import getargs_k
# k returns 'unsigned long', no range checking
# it does not accept float, or instances with __int__
self.assertRaises(TypeError, getargs_k, 3.14)
self.assertRaises(TypeError, getargs_k, Long())
self.assertRaises(TypeError, getargs_k, Int())
self.failUnlessEqual(ULONG_MAX, getargs_k(-1))
self.failUnlessEqual(0, getargs_k(0))
self.failUnlessEqual(ULONG_MAX, getargs_k(ULONG_MAX))
self.failUnlessEqual(0, getargs_k(ULONG_MAX+1))
self.failUnlessEqual(42, getargs_k(42))
self.failUnlessEqual(42, getargs_k(42L))
self.failUnlessEqual(VERY_LARGE & ULONG_MAX, getargs_k(VERY_LARGE))
class Signed_TestCase(unittest.TestCase):
def test_i(self):
from _testcapi import getargs_i
# i returns 'int', and does range checking (INT_MIN ... INT_MAX)
self.failUnlessEqual(3, getargs_i(3.14))
self.failUnlessEqual(99, getargs_i(Long()))
self.failUnlessEqual(99, getargs_i(Int()))
self.assertRaises(OverflowError, getargs_i, INT_MIN-1)
self.failUnlessEqual(INT_MIN, getargs_i(INT_MIN))
self.failUnlessEqual(INT_MAX, getargs_i(INT_MAX))
self.assertRaises(OverflowError, getargs_i, INT_MAX+1)
self.failUnlessEqual(42, getargs_i(42))
self.failUnlessEqual(42, getargs_i(42L))
self.assertRaises(OverflowError, getargs_i, VERY_LARGE)
def test_l(self):
from _testcapi import getargs_l
# l returns 'long', and does range checking (LONG_MIN ... LONG_MAX)
self.failUnlessEqual(3, getargs_l(3.14))
self.failUnlessEqual(99, getargs_l(Long()))
self.failUnlessEqual(99, getargs_l(Int()))
self.assertRaises(OverflowError, getargs_l, LONG_MIN-1)
self.failUnlessEqual(LONG_MIN, getargs_l(LONG_MIN))
self.failUnlessEqual(LONG_MAX, getargs_l(LONG_MAX))
self.assertRaises(OverflowError, getargs_l, LONG_MAX+1)
self.failUnlessEqual(42, getargs_l(42))
self.failUnlessEqual(42, getargs_l(42L))
self.assertRaises(OverflowError, getargs_l, VERY_LARGE)
def test_n(self):
from _testcapi import getargs_n
# n returns 'Py_ssize_t', and does range checking
# (PY_SSIZE_T_MIN ... PY_SSIZE_T_MAX)
self.failUnlessEqual(3, getargs_n(3.14))
self.failUnlessEqual(99, getargs_n(Long()))
self.failUnlessEqual(99, getargs_n(Int()))
self.assertRaises(OverflowError, getargs_n, PY_SSIZE_T_MIN-1)
self.failUnlessEqual(PY_SSIZE_T_MIN, getargs_n(PY_SSIZE_T_MIN))
self.failUnlessEqual(PY_SSIZE_T_MAX, getargs_n(PY_SSIZE_T_MAX))
self.assertRaises(OverflowError, getargs_n, PY_SSIZE_T_MAX+1)
self.failUnlessEqual(42, getargs_n(42))
self.failUnlessEqual(42, getargs_n(42L))
self.assertRaises(OverflowError, getargs_n, VERY_LARGE)
class LongLong_TestCase(unittest.TestCase):
def test_L(self):
from _testcapi import getargs_L
# L returns 'long long', and does range checking (LLONG_MIN ... LLONG_MAX)
self.failUnlessRaises(TypeError, getargs_L, "Hello")
self.failUnlessEqual(3, getargs_L(3.14))
self.failUnlessEqual(99, getargs_L(Long()))
self.failUnlessEqual(99, getargs_L(Int()))
self.assertRaises(OverflowError, getargs_L, LLONG_MIN-1)
self.failUnlessEqual(LLONG_MIN, getargs_L(LLONG_MIN))
self.failUnlessEqual(LLONG_MAX, getargs_L(LLONG_MAX))
self.assertRaises(OverflowError, getargs_L, LLONG_MAX+1)
self.failUnlessEqual(42, getargs_L(42))
self.failUnlessEqual(42, getargs_L(42L))
self.assertRaises(OverflowError, getargs_L, VERY_LARGE)
def test_K(self):
from _testcapi import getargs_K
# K return 'unsigned long long', no range checking
self.assertRaises(TypeError, getargs_K, 3.14)
self.assertRaises(TypeError, getargs_K, Long())
self.assertRaises(TypeError, getargs_K, Int())
self.failUnlessEqual(ULLONG_MAX, getargs_K(ULLONG_MAX))
self.failUnlessEqual(0, getargs_K(0))
self.failUnlessEqual(0, getargs_K(ULLONG_MAX+1))
self.failUnlessEqual(42, getargs_K(42))
self.failUnlessEqual(42, getargs_K(42L))
self.failUnlessEqual(VERY_LARGE & ULLONG_MAX, getargs_K(VERY_LARGE))
class Tuple_TestCase(unittest.TestCase):
def test_tuple(self):
from _testcapi import getargs_tuple
ret = getargs_tuple(1, (2, 3))
self.assertEquals(ret, (1,2,3))
# make sure invalid tuple arguments are handled correctly
class seq:
def __len__(self):
return 2
def __getitem__(self, n):
raise ValueError
self.assertRaises(TypeError, getargs_tuple, 1, seq())
class Keywords_TestCase(unittest.TestCase):
def test_positional_args(self):
# using all positional args
self.assertEquals(
getargs_keywords((1,2), 3, (4,(5,6)), (7,8,9), 10),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
)
def test_mixed_args(self):
# positional and keyword args
self.assertEquals(
getargs_keywords((1,2), 3, (4,(5,6)), arg4=(7,8,9), arg5=10),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
)
def test_keyword_args(self):
# all keywords
self.assertEquals(
getargs_keywords(arg1=(1,2), arg2=3, arg3=(4,(5,6)), arg4=(7,8,9), arg5=10),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
)
def test_optional_args(self):
# missing optional keyword args, skipping tuples
self.assertEquals(
getargs_keywords(arg1=(1,2), arg2=3, arg5=10),
(1, 2, 3, -1, -1, -1, -1, -1, -1, 10)
)
def test_required_args(self):
# required arg missing
try:
getargs_keywords(arg1=(1,2))
except TypeError, err:
self.assertEquals(str(err), "Required argument 'arg2' (pos 2) not found")
else:
self.fail('TypeError should have been raised')
def test_too_many_args(self):
try:
getargs_keywords((1,2),3,(4,(5,6)),(7,8,9),10,111)
except TypeError, err:
self.assertEquals(str(err), "function takes at most 5 arguments (6 given)")
else:
self.fail('TypeError should have been raised')
def test_invalid_keyword(self):
# extraneous keyword arg
try:
getargs_keywords((1,2),3,arg5=10,arg666=666)
except TypeError, err:
self.assertEquals(str(err), "'arg666' is an invalid keyword argument for this function")
else:
self.fail('TypeError should have been raised')
def test_main():
tests = [Signed_TestCase, Unsigned_TestCase, Tuple_TestCase, Keywords_TestCase]
try:
from _testcapi import getargs_L, getargs_K
except ImportError:
pass # PY_LONG_LONG not available
else:
tests.append(LongLong_TestCase)
test_support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
| apache-2.0 |
lakshayg/tensorflow | tensorflow/contrib/resampler/python/ops/resampler_ops.py | 66 | 2809 | # pylint: disable=g-bad-file-header
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tensorflow op performing differentiable resampling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.resampler.ops import gen_resampler_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_resampler_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_resampler_ops.so"))
def resampler(data, warp, name="resampler"):
"""Resamples input data at user defined coordinates.
The resampler currently only supports bilinear interpolation of 2D data.
Args:
data: Tensor of shape `[batch_size, data_height, data_width,
data_num_channels]` containing 2D data that will be resampled.
warp: Tensor of minimum rank 2 containing the coordinates at which
resampling will be performed. Since only bilinear interpolation is
currently supported, the last dimension of the `warp` tensor must be 2.
name: Optional name of the op.
Returns:
Tensor of resampled values from `data`. The output tensor shape is
determined by the shape of the warp tensor. For example, if `data` is of
shape `[batch_size, data_height, data_width, data_num_channels]` and warp of
shape `[batch_size, dim_0, ... , dim_n, 2]` the output will be of shape
`[batch_size, dim_0, ... , dim_n, data_num_channels]`.
Raises:
ImportError: if the wrapper generated during compilation is not present when
the function is called.
"""
with ops.name_scope(name, "resampler", [data, warp]):
data_tensor = ops.convert_to_tensor(data, name="data")
warp_tensor = ops.convert_to_tensor(warp, name="warp")
return gen_resampler_ops.resampler(data_tensor, warp_tensor)
@ops.RegisterGradient("Resampler")
def _resampler_grad(op, grad_output):
data, warp = op.inputs
grad_output_tensor = ops.convert_to_tensor(grad_output, name="grad_output")
return gen_resampler_ops.resampler_grad(data, warp, grad_output_tensor)
ops.NotDifferentiable("ResamplerGrad")
| apache-2.0 |
ruddra/django-oscar | oscar/apps/shipping/base.py | 4 | 2252 | from decimal import Decimal as D
import warnings
class Base(object):
"""
Shipping method interface class
This is the superclass to the classes in methods.py, and a de-facto
superclass to the classes in models.py. This allows using all
shipping methods interchangeably (aka polymorphism).
The interface is all properties.
"""
# CORE INTERFACE
# --------------
#: Used to store this method in the session. Each shipping method should
# have a unique code.
code = '__default__'
#: The name of the shipping method, shown to the customer during checkout
name = 'Default shipping'
#: A more detailed description of the shipping method shown to the customer
# during checkout. Can contain HTML.
description = ''
#: Shipping charge including taxes
charge_excl_tax = D('0.00')
#: Shipping charge excluding taxes
charge_incl_tax = None
#: Whether we now the shipping tax applicable (and hence whether
# charge_incl_tax returns a value.
is_tax_known = False
# END OF CORE INTERFACE
# ---------------------
# These are not intended to be overridden and are used to track shipping
# discounts.
is_discounted = False
discount = D('0.00')
def _get_tax(self):
return self.charge_incl_tax - self.charge_excl_tax
def _set_tax(self, value):
self.charge_incl_tax = self.charge_excl_tax + value
self.is_tax_known = True
tax = property(_get_tax, _set_tax)
def set_basket(self, basket):
self.basket = basket
def basket_charge_excl_tax(self):
warnings.warn((
"Use the charge_excl_tax property not basket_charge_excl_tax. "
"Basket.basket_charge_excl_tax will be removed "
"in v0.7"),
DeprecationWarning)
return self.charge_excl_tax
def basket_charge_incl_tax(self):
warnings.warn((
"Use the charge_incl_tax property not basket_charge_incl_tax. "
"Basket.basket_charge_incl_tax will be removed "
"in v0.7"),
DeprecationWarning)
return self.charge_incl_tax
# For backwards compatibility, keep an alias called "ShippingMethod"
ShippingMethod = Base
| bsd-3-clause |
rmboggs/django | django/template/utils.py | 199 | 3733 | import os
from collections import Counter, OrderedDict
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import lru_cache
from django.utils._os import upath
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
class InvalidTemplateEngineError(ImproperlyConfigured):
pass
class EngineHandler(object):
def __init__(self, templates=None):
"""
templates is an optional list of template engine definitions
(structured like settings.TEMPLATES).
"""
self._templates = templates
self._engines = {}
@cached_property
def templates(self):
if self._templates is None:
self._templates = settings.TEMPLATES
templates = OrderedDict()
backend_names = []
for tpl in self._templates:
tpl = tpl.copy()
try:
# This will raise an exception if 'BACKEND' doesn't exist or
# isn't a string containing at least one dot.
default_name = tpl['BACKEND'].rsplit('.', 2)[-2]
except Exception:
invalid_backend = tpl.get('BACKEND', '<not defined>')
raise ImproperlyConfigured(
"Invalid BACKEND for a template engine: {}. Check "
"your TEMPLATES setting.".format(invalid_backend))
tpl.setdefault('NAME', default_name)
tpl.setdefault('DIRS', [])
tpl.setdefault('APP_DIRS', False)
tpl.setdefault('OPTIONS', {})
templates[tpl['NAME']] = tpl
backend_names.append(tpl['NAME'])
counts = Counter(backend_names)
duplicates = [alias for alias, count in counts.most_common() if count > 1]
if duplicates:
raise ImproperlyConfigured(
"Template engine aliases aren't unique, duplicates: {}. "
"Set a unique NAME for each engine in settings.TEMPLATES."
.format(", ".join(duplicates)))
return templates
def __getitem__(self, alias):
try:
return self._engines[alias]
except KeyError:
try:
params = self.templates[alias]
except KeyError:
raise InvalidTemplateEngineError(
"Could not find config for '{}' "
"in settings.TEMPLATES".format(alias))
# If importing or initializing the backend raises an exception,
# self._engines[alias] isn't set and this code may get executed
# again, so we must preserve the original params. See #24265.
params = params.copy()
backend = params.pop('BACKEND')
engine_cls = import_string(backend)
engine = engine_cls(params)
self._engines[alias] = engine
return engine
def __iter__(self):
return iter(self.templates)
def all(self):
return [self[alias] for alias in self]
@lru_cache.lru_cache()
def get_app_template_dirs(dirname):
"""
Return an iterable of paths of directories to load app templates from.
dirname is the name of the subdirectory containing templates inside
installed applications.
"""
template_dirs = []
for app_config in apps.get_app_configs():
if not app_config.path:
continue
template_dir = os.path.join(app_config.path, dirname)
if os.path.isdir(template_dir):
template_dirs.append(upath(template_dir))
# Immutable return value because it will be cached and shared by callers.
return tuple(template_dirs)
| bsd-3-clause |
rvalyi/OpenUpgrade | addons/hr_contract/base_action_rule.py | 389 | 2646 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013 OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.base_action_rule.base_action_rule import get_datetime
from openerp.osv import fields, osv
class base_action_rule(osv.Model):
""" Add resource and calendar for time-based conditions """
_name = 'base.action.rule'
_inherit = ['base.action.rule']
_columns = {
'trg_date_resource_field_id': fields.many2one(
'ir.model.fields', 'Use employee work schedule',
help='Use the user\'s working schedule.',
),
}
def _check_delay(self, cr, uid, action, record, record_dt, context=None):
""" Override the check of delay to try to use a user-related calendar.
If no calendar is found, fallback on the default behavior. """
if action.trg_date_calendar_id and action.trg_date_range_type == 'day' and action.trg_date_resource_field_id:
user = record[action.trg_date_resource_field_id.name]
if user.employee_ids and user.employee_ids[0].contract_id \
and user.employee_ids[0].contract_id.working_hours:
calendar = user.employee_ids[0].contract_id.working_hours
start_dt = get_datetime(record_dt)
resource_id = user.employee_ids[0].resource_id.id
action_dt = self.pool['resource.calendar'].schedule_days_get_date(
cr, uid, calendar.id, action.trg_date_range,
day_date=start_dt, compute_leaves=True, resource_id=resource_id,
context=context
)
return action_dt
return super(base_action_rule, self)._check_delay(cr, uid, action, record, record_dt, context=context)
| agpl-3.0 |
Shiroy/servo | tests/wpt/web-platform-tests/tools/pytest/doc/en/genapi.py | 203 | 1131 | import textwrap
import inspect
class Writer:
def __init__(self, clsname):
self.clsname = clsname
def __enter__(self):
self.file = open("%s.api" % self.clsname, "w")
return self
def __exit__(self, *args):
self.file.close()
print "wrote", self.file.name
def line(self, line):
self.file.write(line+"\n")
def docmethod(self, method):
doc = " ".join(method.__doc__.split())
indent = " "
w = textwrap.TextWrapper(initial_indent=indent,
subsequent_indent=indent)
spec = inspect.getargspec(method)
del spec.args[0]
self.line(".. py:method:: " + method.__name__ +
inspect.formatargspec(*spec))
self.line("")
self.line(w.fill(doc))
self.line("")
def pytest_funcarg__a(request):
with Writer("request") as writer:
writer.docmethod(request.getfuncargvalue)
writer.docmethod(request.cached_setup)
writer.docmethod(request.addfinalizer)
writer.docmethod(request.applymarker)
def test_hello(a):
pass
| mpl-2.0 |
jeremiahyan/odoo | addons/test_mail_full/models/mailing_mailing.py | 2 | 1050 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from odoo import models
_logger = logging.getLogger(__name__)
class Mailing(models.Model):
_inherit = 'mailing.mailing'
def _get_opt_out_list_sms(self):
"""Returns a set of emails opted-out in target model"""
self.ensure_one()
if self.mailing_model_real in ('mail.test.sms.bl.optout',
'mail.test.sms.partner',
'mail.test.sms.partner.2many'):
res_ids = self._get_recipients()
opt_out_contacts = set(self.env[self.mailing_model_real].search([
('id', 'in', res_ids),
('opt_out', '=', True)
]).ids)
_logger.info(
"Mass-mailing %s targets %s, optout: %s emails",
self, self.mailing_model_real, len(opt_out_contacts))
return opt_out_contacts
return super(Mailing, self)._get_opt_out_list_sms()
| gpl-3.0 |
vmax-feihu/hue | desktop/core/ext-py/elementtree/elementtree/SimpleXMLWriter.py | 103 | 8616 | #
# SimpleXMLWriter
# $Id: SimpleXMLWriter.py 2312 2005-03-02 18:13:39Z fredrik $
#
# a simple XML writer
#
# history:
# 2001-12-28 fl created
# 2002-11-25 fl fixed attribute encoding
# 2002-12-02 fl minor fixes for 1.5.2
# 2004-06-17 fl added pythondoc markup
# 2004-07-23 fl added flush method (from Jay Graves)
# 2004-10-03 fl added declaration method
#
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The SimpleXMLWriter module is
#
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
##
# Tools to write XML files, without having to deal with encoding
# issues, well-formedness, etc.
# <p>
# The current version does not provide built-in support for
# namespaces. To create files using namespaces, you have to provide
# "xmlns" attributes and explicitly add prefixes to tags and
# attributes.
#
# <h3>Patterns</h3>
#
# The following example generates a small XHTML document.
# <pre>
#
# from elementtree.SimpleXMLWriter import XMLWriter
# import sys
#
# w = XMLWriter(sys.stdout)
#
# html = w.start("html")
#
# w.start("head")
# w.element("title", "my document")
# w.element("meta", name="generator", value="my application 1.0")
# w.end()
#
# w.start("body")
# w.element("h1", "this is a heading")
# w.element("p", "this is a paragraph")
#
# w.start("p")
# w.data("this is ")
# w.element("b", "bold")
# w.data(" and ")
# w.element("i", "italic")
# w.data(".")
# w.end("p")
#
# w.close(html)
# </pre>
##
import re, sys, string
try:
unicode("")
except NameError:
def encode(s, encoding):
# 1.5.2: application must use the right encoding
return s
_escape = re.compile(r"[&<>\"\x80-\xff]+") # 1.5.2
else:
def encode(s, encoding):
return s.encode(encoding)
_escape = re.compile(eval(r'u"[&<>\"\u0080-\uffff]+"'))
def encode_entity(text, pattern=_escape):
# map reserved and non-ascii characters to numerical entities
def escape_entities(m):
out = []
for char in m.group():
out.append("&#%d;" % ord(char))
return string.join(out, "")
return encode(pattern.sub(escape_entities, text), "ascii")
del _escape
#
# the following functions assume an ascii-compatible encoding
# (or "utf-16")
def escape_cdata(s, encoding=None, replace=string.replace):
s = replace(s, "&", "&")
s = replace(s, "<", "<")
s = replace(s, ">", ">")
if encoding:
try:
return encode(s, encoding)
except UnicodeError:
return encode_entity(s)
return s
def escape_attrib(s, encoding=None, replace=string.replace):
s = replace(s, "&", "&")
s = replace(s, "'", "'")
s = replace(s, "\"", """)
s = replace(s, "<", "<")
s = replace(s, ">", ">")
if encoding:
try:
return encode(s, encoding)
except UnicodeError:
return encode_entity(s)
return s
##
# XML writer class.
#
# @param file A file or file-like object. This object must implement
# a <b>write</b> method that takes an 8-bit string.
# @param encoding Optional encoding.
class XMLWriter:
def __init__(self, file, encoding="us-ascii"):
if not hasattr(file, "write"):
file = open(file, "w")
self.__write = file.write
if hasattr(file, "flush"):
self.flush = file.flush
self.__open = 0 # true if start tag is open
self.__tags = []
self.__data = []
self.__encoding = encoding
def __flush(self):
# flush internal buffers
if self.__open:
self.__write(">")
self.__open = 0
if self.__data:
data = string.join(self.__data, "")
self.__write(escape_cdata(data, self.__encoding))
self.__data = []
##
# Writes an XML declaration.
def declaration(self):
encoding = self.__encoding
if encoding == "us-ascii" or encoding == "utf-8":
self.__write("<?xml version='1.0'?>\n")
else:
self.__write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
##
# Opens a new element. Attributes can be given as keyword
# arguments, or as a string/string dictionary. You can pass in
# 8-bit strings or Unicode strings; the former are assumed to use
# the encoding passed to the constructor. The method returns an
# opaque identifier that can be passed to the <b>close</b> method,
# to close all open elements up to and including this one.
#
# @param tag Element tag.
# @param attrib Attribute dictionary. Alternatively, attributes
# can be given as keyword arguments.
# @return An element identifier.
def start(self, tag, attrib={}, **extra):
self.__flush()
tag = escape_cdata(tag, self.__encoding)
self.__data = []
self.__tags.append(tag)
self.__write("<%s" % tag)
if attrib or extra:
attrib = attrib.copy()
attrib.update(extra)
attrib = attrib.items()
attrib.sort()
for k, v in attrib:
k = escape_cdata(k, self.__encoding)
v = escape_attrib(v, self.__encoding)
self.__write(" %s=\"%s\"" % (k, v))
self.__open = 1
return len(self.__tags)-1
##
# Adds a comment to the output stream.
#
# @param comment Comment text, as an 8-bit string or Unicode string.
def comment(self, comment):
self.__flush()
self.__write("<!-- %s -->\n" % escape_cdata(comment, self.__encoding))
##
# Adds character data to the output stream.
#
# @param text Character data, as an 8-bit string or Unicode string.
def data(self, text):
self.__data.append(text)
##
# Closes the current element (opened by the most recent call to
# <b>start</b>).
#
# @param tag Element tag. If given, the tag must match the start
# tag. If omitted, the current element is closed.
def end(self, tag=None):
if tag:
assert self.__tags, "unbalanced end(%s)" % tag
assert escape_cdata(tag, self.__encoding) == self.__tags[-1],\
"expected end(%s), got %s" % (self.__tags[-1], tag)
else:
assert self.__tags, "unbalanced end()"
tag = self.__tags.pop()
if self.__data:
self.__flush()
elif self.__open:
self.__open = 0
self.__write(" />")
return
self.__write("</%s>" % tag)
##
# Closes open elements, up to (and including) the element identified
# by the given identifier.
#
# @param id Element identifier, as returned by the <b>start</b> method.
def close(self, id):
while len(self.__tags) > id:
self.end()
##
# Adds an entire element. This is the same as calling <b>start</b>,
# <b>data</b>, and <b>end</b> in sequence. The <b>text</b> argument
# can be omitted.
def element(self, tag, text=None, attrib={}, **extra):
apply(self.start, (tag, attrib), extra)
if text:
self.data(text)
self.end()
##
# Flushes the output stream.
def flush(self):
pass # replaced by the constructor
| apache-2.0 |
jrd/urwidm | setup.py | 1 | 2924 | #!/bin/env python
# coding: utf-8
# vim:et:sta:sw=2:sts=2:ts=2:tw=0:
from __future__ import division, print_function, absolute_import
from setuptools import setup
import os
import codecs
import re
from glob import glob
MODULE_NAME = 'urwidm'
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with codecs.EncodedFile(open(os.path.join(*paths), 'rb'), 'utf-8') as f:
return f.read()
def find_info(info, *file_paths):
file_paths = list(file_paths)
file_paths.append('__init__.py')
info_file = read(*file_paths)
python_simple_string = r"(?:[^'\"\\]*)"
python_escapes = r"(?:\\['\"\\])"
python_string = r"{delim}((?:{simple}{esc}?)*){delim}".format(delim=r"['\"]", simple=python_simple_string, esc=python_escapes)
info_match = re.search(r"^__{0}__ = {1}".format(info, python_string), info_file, re.M)
if info_match:
return info_match.group(1)
else:
python_arrays = r"\[(?:{ps})?((?:, {ps})*)\]".format(ps=python_string)
info_match = re.search(r"^__{0}__ = {1}".format(info, python_arrays), info_file, re.M)
if info_match:
matches = [info_match.group(1)]
if info_match.groups(2):
matches.extend(re.findall(r", {0}".format(python_string), info_match.group(2)))
return ', '.join(matches)
raise RuntimeError("Unable to find {0} string.".format(info))
def find_version(*file_paths):
return find_info('version', *file_paths)
doc_dir = os.path.join('doc', '{0}-{1}'.format(MODULE_NAME, find_version(MODULE_NAME)))
doc_files = glob(os.path.join('examples', '*'))
config = {
'name': 'UrwidMore',
'description': 'More widgets for Urwid',
'long_description': read('README.rst'),
'license': find_info('license', MODULE_NAME),
'author': find_info('credits', MODULE_NAME),
'author_email': find_info('email', MODULE_NAME),
'version': find_version(MODULE_NAME),
'url': 'https://github.com/jrd/urwidmore/',
'download_url': 'https://github.com/jrd/urwidmore/archive/master.zip',
'packages': [MODULE_NAME],
'include_package_data': True,
'data_files': [(doc_dir, doc_files)],
'test_suite': 'tests',
'classifiers': [ # https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Console :: Curses',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Widget Sets',
],
}
setup(**config)
| lgpl-2.1 |
polymeris/qgis | python/plugins/mapserver_export/mapserverexportdialog.py | 6 | 3046 | """
/***************************************************************************
MapServerExport - A QGIS plugin to export a saved project file
to a MapServer map file
-------------------
begin : 2008-01-07
copyright : (C) 2007 by Gary E.Sherman
email : sherman at mrcc.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4 import QtCore, QtGui
from ms_export import defaults
from ui_qgsmapserverexportbase import Ui_QgsMapserverExportBase
# create the dialog for mapserver export
class MapServerExportDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QDialog.__init__(self, parent)
# Set up the user interface from Designer.
self.ui = Ui_QgsMapserverExportBase()
self.ui.setupUi(self)
units = ["meters", "dd", "feet", "miles", "inches", "kilometers"]
# make them able to be translated
tr_units = [ QtGui.QApplication.translate("QgsMapserverExportBase", "meters", None, QtGui.QApplication.UnicodeUTF8), QtGui.QApplication.translate("QgsMapserverExportBase", "dd", None, QtGui.QApplication.UnicodeUTF8), QtGui.QApplication.translate("QgsMapserverExportBase", "feet", None, QtGui.QApplication.UnicodeUTF8), QtGui.QApplication.translate("QgsMapserverExportBase", "miles", None, QtGui.QApplication.UnicodeUTF8), QtGui.QApplication.translate("QgsMapserverExportBase", "inches", None, QtGui.QApplication.UnicodeUTF8), QtGui.QApplication.translate("QgsMapserverExportBase", "kilometers", None, QtGui.QApplication.UnicodeUTF8) ]
for unit in units:
self.ui.cmbMapUnits.addItem( QtGui.QApplication.translate("QgsMapserverExportBase", unit, None, QtGui.QApplication.UnicodeUTF8), QtCore.QVariant(unit) )
# TODO: set default unit. Is now the first value entered in the unit-list above
# Set defaults from ms_export.py:
self.ui.txtMapServerUrl.setText(defaults.mapServerUrl)
self.ui.txtFontsetPath.setText(defaults.fontsPath)
self.ui.txtSymbolsetPath.setText(defaults.symbolsPath)
self.ui.checkBoxAntiAlias.setChecked(defaults.antialias)
self.ui.checkBoxDump.setChecked(defaults.dump)
self.ui.checkBoxForce.setChecked(defaults.force)
self.ui.checkBoxPartials.setChecked(defaults.partials)
self.ui.txtMapWidth.setText(defaults.width)
self.ui.txtMapHeight.setText(defaults.height)
| gpl-2.0 |
ABaldwinHunter/django-clone | tests/utils_tests/test_numberformat.py | 307 | 4049 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal
from sys import float_info
from unittest import TestCase
from django.utils.numberformat import format as nformat
class TestNumberFormat(TestCase):
def test_format_number(self):
self.assertEqual(nformat(1234, '.'), '1234')
self.assertEqual(nformat(1234.2, '.'), '1234.2')
self.assertEqual(nformat(1234, '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=','),
'1234')
self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=',',
force_grouping=True), '12,34')
self.assertEqual(nformat(-1234.33, '.', decimal_pos=1), '-1234.3')
def test_format_string(self):
self.assertEqual(nformat('1234', '.'), '1234')
self.assertEqual(nformat('1234.2', '.'), '1234.2')
self.assertEqual(nformat('1234', '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=','),
'1234')
self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=',',
force_grouping=True), '12,34')
self.assertEqual(nformat('-1234.33', '.', decimal_pos=1), '-1234.3')
self.assertEqual(nformat('10000', '.', grouping=3,
thousand_sep='comma', force_grouping=True),
'10comma000')
def test_large_number(self):
most_max = ('{}179769313486231570814527423731704356798070567525844996'
'598917476803157260780028538760589558632766878171540458953'
'514382464234321326889464182768467546703537516986049910576'
'551282076245490090389328944075868508455133942304583236903'
'222948165808559332123348274797826204144723168738177180919'
'29988125040402618412485836{}')
most_max2 = ('{}35953862697246314162905484746340871359614113505168999'
'31978349536063145215600570775211791172655337563430809179'
'07028764928468642653778928365536935093407075033972099821'
'15310256415249098018077865788815173701691026788460916647'
'38064458963316171186642466965495956524082894463374763543'
'61838599762500808052368249716736')
int_max = int(float_info.max)
self.assertEqual(nformat(int_max, '.'), most_max.format('', '8'))
self.assertEqual(nformat(int_max + 1, '.'), most_max.format('', '9'))
self.assertEqual(nformat(int_max * 2, '.'), most_max2.format(''))
self.assertEqual(nformat(0 - int_max, '.'), most_max.format('-', '8'))
self.assertEqual(nformat(-1 - int_max, '.'), most_max.format('-', '9'))
self.assertEqual(nformat(-2 * int_max, '.'), most_max2.format('-'))
def test_decimal_numbers(self):
self.assertEqual(nformat(Decimal('1234'), '.'), '1234')
self.assertEqual(nformat(Decimal('1234.2'), '.'), '1234.2')
self.assertEqual(nformat(Decimal('1234'), '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=','), '1234')
self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34')
self.assertEqual(nformat(Decimal('-1234.33'), '.', decimal_pos=1), '-1234.3')
self.assertEqual(nformat(Decimal('0.00000001'), '.', decimal_pos=8), '0.00000001')
def test_decimal_subclass(self):
class EuroDecimal(Decimal):
"""
Wrapper for Decimal which prefixes each amount with the € symbol.
"""
def __format__(self, specifier, **kwargs):
amount = super(EuroDecimal, self).__format__(specifier, **kwargs)
return '€ {}'.format(amount)
price = EuroDecimal('1.23')
self.assertEqual(nformat(price, ','), '€ 1,23')
| bsd-3-clause |
biblepay/biblepay | test/functional/test_framework/test_framework.py | 1 | 42692 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2014-2020 The Däsh Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
import copy
from collections import deque
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
import traceback
from concurrent.futures import ThreadPoolExecutor
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
PortSeed,
MAX_NODES,
assert_equal,
check_json_precision,
connect_nodes_bi,
connect_nodes,
copy_datadir,
disconnect_nodes,
force_finish_mnsync,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
satoshi_round,
sync_blocks,
sync_mempools,
wait_until,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
GENESISTIME = 1417713337
class BitcoinTestFramework():
"""Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.extra_args_from_options = []
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave biblepayds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop biblepayds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../../src"),
help="Source directory containing biblepayd/biblepay-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use biblepay-cli instead of RPC for all commands")
parser.add_option("--biblepayd-arg", dest="biblepayd_extra_args", default=[], type='string', action='append',
help="Pass extra args to all biblepayd instances")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
self.extra_args_from_options = self.options.biblepayd_extra_args
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
try:
if self.nodes:
self.stop_nodes()
except BaseException as e:
success = False
self.log.exception("Unexpected exception caught during shutdown")
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: biblepayds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = [self.options.tmpdir + "/test_framework.log"]
filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for fn in filenames:
try:
with open(fn, 'r') as f:
print("From", fn, ":")
print("".join(deque(f, MAX_LINES_TO_PRINT)))
except OSError:
print("Opening file %s failed." % fn)
traceback.print_exc()
if success == TestStatus.PASSED:
self.log.info("Tests successful")
sys.exit(TEST_EXIT_PASSED)
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
sys.exit(TEST_EXIT_SKIPPED)
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
logging.shutdown()
sys.exit(TEST_EXIT_FAILED)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
self.set_genesis_mocktime()
else:
self._initialize_chain()
self.set_cache_mocktime()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
stderr = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
if hasattr(self, "stderr"):
stderr = self.stderr
self.add_nodes(self.num_nodes, extra_args, stderr=stderr)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
old_num_nodes = len(self.nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(old_num_nodes + i, self.options.tmpdir, extra_args[i], self.extra_args_from_options, rpchost, timewait=timewait, binary=binary[i], stderr=stderr, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a biblepayd"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, stderr=None, *args, **kwargs):
"""Start multiple biblepayds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], stderr, *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i, wait=0):
"""Stop a biblepayd test node"""
self.nodes[i].stop_node(wait=wait)
self.nodes[i].wait_until_stopped()
def stop_nodes(self, wait=0):
"""Stop multiple biblepayd test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node(wait=wait)
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'biblepayd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "biblepayd should have exited with an error"
else:
assert_msg = "biblepayd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_blocks(self, nodes=None, **kwargs):
sync_blocks(nodes or self.nodes, **kwargs)
def sync_mempools(self, nodes=None, **kwargs):
if self.mocktime != 0:
if 'wait' not in kwargs:
kwargs['wait'] = 0.1
if 'wait_func' not in kwargs:
kwargs['wait_func'] = lambda: self.bump_mocktime(3, nodes=nodes)
sync_mempools(nodes or self.nodes, **kwargs)
def sync_all(self, nodes=None, **kwargs):
self.sync_blocks(nodes, **kwargs)
self.sync_mempools(nodes, **kwargs)
def disable_mocktime(self):
self.mocktime = 0
for node in self.nodes:
node.mocktime = 0
def bump_mocktime(self, t, update_nodes=True, nodes=None):
self.mocktime += t
if update_nodes:
set_node_times(nodes or self.nodes, self.mocktime)
def set_cache_mocktime(self):
# For backwared compatibility of the python scripts
# with previous versions of the cache, set MOCKTIME
# to regtest genesis time + (201 * 156)
self.mocktime = GENESISTIME + (201 * 156)
for node in self.nodes:
node.mocktime = self.mocktime
def set_genesis_mocktime(self):
self.mocktime = GENESISTIME
for node in self.nodes:
node.mocktime = self.mocktime
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as biblepayd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self, extra_args=None, stderr=None):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run biblepayds:
self.set_genesis_mocktime()
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("BIBLEPAYD", "biblepayd"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0", "-mocktime="+str(GENESISTIME)]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
if extra_args is not None:
args.extend(extra_args)
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], extra_args_from_options=self.extra_args_from_options, rpchost=None, timewait=None, binary=None, stderr=stderr, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
block_time = GENESISTIME
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 156
# Must sync before next peer starts generating blocks
self.sync_blocks()
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallets', 'chainstate', 'blocks', 'evodb', 'llmq', 'backups']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in biblepay.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
MASTERNODE_COLLATERAL = 1000
class MasternodeInfo:
def __init__(self, proTxHash, ownerAddr, votingAddr, pubKeyOperator, keyOperator, collateral_address, collateral_txid, collateral_vout):
self.proTxHash = proTxHash
self.ownerAddr = ownerAddr
self.votingAddr = votingAddr
self.pubKeyOperator = pubKeyOperator
self.keyOperator = keyOperator
self.collateral_address = collateral_address
self.collateral_txid = collateral_txid
self.collateral_vout = collateral_vout
class BiblePayTestFramework(BitcoinTestFramework):
def set_biblepay_test_params(self, num_nodes, masterodes_count, extra_args=None, fast_dip3_enforcement=False):
self.mn_count = masterodes_count
self.num_nodes = num_nodes
self.mninfo = []
self.setup_clean_chain = True
self.is_network_split = False
# additional args
if extra_args is None:
extra_args = [[]] * num_nodes
assert_equal(len(extra_args), num_nodes)
self.extra_args = [copy.deepcopy(a) for a in extra_args]
self.extra_args[0] += ["-sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK"]
self.fast_dip3_enforcement = fast_dip3_enforcement
if fast_dip3_enforcement:
for i in range(0, num_nodes):
self.extra_args[i].append("-dip3params=30:50")
# LLMQ default test params (no need to pass -llmqtestparams)
self.llmq_size = 3
self.llmq_threshold = 2
def set_biblepay_dip8_activation(self, activate_after_block):
window = int((activate_after_block + 2) / 3)
threshold = int((window + 1) / 2)
for i in range(0, self.num_nodes):
self.extra_args[i].append("-vbparams=dip0008:0:999999999999:%d:%d" % (window, threshold))
def set_biblepay_llmq_test_params(self, llmq_size, llmq_threshold):
self.llmq_size = llmq_size
self.llmq_threshold = llmq_threshold
for i in range(0, self.num_nodes):
self.extra_args[i].append("-llmqtestparams=%d:%d" % (self.llmq_size, self.llmq_threshold))
def create_simple_node(self):
idx = len(self.nodes)
self.add_nodes(1, extra_args=[self.extra_args[idx]])
self.start_node(idx)
for i in range(0, idx):
connect_nodes(self.nodes[i], idx)
def prepare_masternodes(self):
self.log.info("Preparing %d masternodes" % self.mn_count)
for idx in range(0, self.mn_count):
self.prepare_masternode(idx)
def prepare_masternode(self, idx):
bls = self.nodes[0].bls('generate')
address = self.nodes[0].getnewaddress()
txid = self.nodes[0].sendtoaddress(address, MASTERNODE_COLLATERAL)
txraw = self.nodes[0].getrawtransaction(txid, True)
collateral_vout = 0
for vout_idx in range(0, len(txraw["vout"])):
vout = txraw["vout"][vout_idx]
if vout["value"] == MASTERNODE_COLLATERAL:
collateral_vout = vout_idx
self.nodes[0].lockunspent(False, [{'txid': txid, 'vout': collateral_vout}])
# send to same address to reserve some funds for fees
self.nodes[0].sendtoaddress(address, 0.001)
ownerAddr = self.nodes[0].getnewaddress()
votingAddr = self.nodes[0].getnewaddress()
rewardsAddr = self.nodes[0].getnewaddress()
port = p2p_port(len(self.nodes) + idx)
if (idx % 2) == 0:
self.nodes[0].lockunspent(True, [{'txid': txid, 'vout': collateral_vout}])
proTxHash = self.nodes[0].protx('register_fund', address, '127.0.0.1:%d' % port, ownerAddr, bls['public'], votingAddr, 0, rewardsAddr, address)
else:
self.nodes[0].generate(1)
proTxHash = self.nodes[0].protx('register', txid, collateral_vout, '127.0.0.1:%d' % port, ownerAddr, bls['public'], votingAddr, 0, rewardsAddr, address)
self.nodes[0].generate(1)
self.mninfo.append(MasternodeInfo(proTxHash, ownerAddr, votingAddr, bls['public'], bls['secret'], address, txid, collateral_vout))
self.sync_all()
self.log.info("Prepared masternode %d: collateral_txid=%s, collateral_vout=%d, protxHash=%s" % (idx, txid, collateral_vout, proTxHash))
def remove_mastermode(self, idx):
mn = self.mninfo[idx]
rawtx = self.nodes[0].createrawtransaction([{"txid": mn.collateral_txid, "vout": mn.collateral_vout}], {self.nodes[0].getnewaddress(): 999.9999})
rawtx = self.nodes[0].signrawtransaction(rawtx)
self.nodes[0].sendrawtransaction(rawtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
self.mninfo.remove(mn)
self.log.info("Removed masternode %d", idx)
def prepare_datadirs(self):
# stop faucet node so that we can copy the datadir
self.stop_node(0)
start_idx = len(self.nodes)
for idx in range(0, self.mn_count):
copy_datadir(0, idx + start_idx, self.options.tmpdir)
# restart faucet node
self.start_node(0)
def start_masternodes(self):
self.log.info("Starting %d masternodes", self.mn_count)
start_idx = len(self.nodes)
self.add_nodes(self.mn_count)
executor = ThreadPoolExecutor(max_workers=20)
def do_connect(idx):
# Connect to the control node only, masternodes should take care of intra-quorum connections themselves
connect_nodes(self.mninfo[idx].node, 0)
jobs = []
# start up nodes in parallel
for idx in range(0, self.mn_count):
self.mninfo[idx].nodeIdx = idx + start_idx
jobs.append(executor.submit(self.start_masternode, self.mninfo[idx]))
# wait for all nodes to start up
for job in jobs:
job.result()
jobs.clear()
# connect nodes in parallel
for idx in range(0, self.mn_count):
jobs.append(executor.submit(do_connect, idx))
# wait for all nodes to connect
for job in jobs:
job.result()
jobs.clear()
executor.shutdown()
def start_masternode(self, mninfo, extra_args=None):
args = ['-masternodeblsprivkey=%s' % mninfo.keyOperator] + self.extra_args[mninfo.nodeIdx]
if extra_args is not None:
args += extra_args
self.start_node(mninfo.nodeIdx, extra_args=args)
mninfo.node = self.nodes[mninfo.nodeIdx]
force_finish_mnsync(mninfo.node)
def setup_network(self):
self.log.info("Creating and starting controller node")
self.add_nodes(1, extra_args=[self.extra_args[0]])
self.start_node(0)
required_balance = MASTERNODE_COLLATERAL * self.mn_count + 1
self.log.info("Generating %d coins" % required_balance)
while self.nodes[0].getbalance() < required_balance:
self.bump_mocktime(1)
self.nodes[0].generate(10)
num_simple_nodes = self.num_nodes - self.mn_count - 1
self.log.info("Creating and starting %s simple nodes", num_simple_nodes)
for i in range(0, num_simple_nodes):
self.create_simple_node()
self.log.info("Activating DIP3")
if not self.fast_dip3_enforcement:
while self.nodes[0].getblockcount() < 500:
self.nodes[0].generate(10)
self.sync_all()
# create masternodes
self.prepare_masternodes()
self.prepare_datadirs()
self.start_masternodes()
# non-masternodes where disconnected from the control node during prepare_datadirs,
# let's reconnect them back to make sure they receive updates
for i in range(0, num_simple_nodes):
connect_nodes(self.nodes[i+1], 0)
self.bump_mocktime(1)
self.nodes[0].generate(1)
# sync nodes
self.sync_all()
self.bump_mocktime(1)
mn_info = self.nodes[0].masternodelist("status")
assert (len(mn_info) == self.mn_count)
for status in mn_info.values():
assert (status == 'ENABLED')
def create_raw_tx(self, node_from, node_to, amount, min_inputs, max_inputs):
assert (min_inputs <= max_inputs)
# fill inputs
inputs = []
balances = node_from.listunspent()
in_amount = 0.0
last_amount = 0.0
for tx in balances:
if len(inputs) < min_inputs:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount += float(tx['amount'])
inputs.append(input)
elif in_amount > amount:
break
elif len(inputs) < max_inputs:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount += float(tx['amount'])
inputs.append(input)
else:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount -= last_amount
in_amount += float(tx['amount'])
inputs[-1] = input
last_amount = float(tx['amount'])
assert (len(inputs) >= min_inputs)
assert (len(inputs) <= max_inputs)
assert (in_amount >= amount)
# fill outputs
receiver_address = node_to.getnewaddress()
change_address = node_from.getnewaddress()
fee = 0.001
outputs = {}
outputs[receiver_address] = satoshi_round(amount)
outputs[change_address] = satoshi_round(in_amount - amount - fee)
rawtx = node_from.createrawtransaction(inputs, outputs)
ret = node_from.signrawtransaction(rawtx)
decoded = node_from.decoderawtransaction(ret['hex'])
ret = {**decoded, **ret}
return ret
def wait_for_tx(self, txid, node, expected=True, timeout=15):
def check_tx():
try:
return node.getrawtransaction(txid)
except:
return False
if wait_until(check_tx, timeout=timeout, sleep=0.5, do_assert=expected) and not expected:
raise AssertionError("waiting unexpectedly succeeded")
def wait_for_instantlock(self, txid, node, expected=True, timeout=15):
def check_instantlock():
try:
return node.getrawtransaction(txid, True)["instantlock"]
except:
return False
if wait_until(check_instantlock, timeout=timeout, sleep=0.5, do_assert=expected) and not expected:
raise AssertionError("waiting unexpectedly succeeded")
def wait_for_chainlocked_block(self, node, block_hash, expected=True, timeout=15):
def check_chainlocked_block():
try:
block = node.getblock(block_hash)
return block["confirmations"] > 0 and block["chainlock"]
except:
return False
if wait_until(check_chainlocked_block, timeout=timeout, sleep=0.1, do_assert=expected) and not expected:
raise AssertionError("waiting unexpectedly succeeded")
def wait_for_chainlocked_block_all_nodes(self, block_hash, timeout=15):
for node in self.nodes:
self.wait_for_chainlocked_block(node, block_hash, timeout=timeout)
def wait_for_best_chainlock(self, node, block_hash, timeout=15):
wait_until(lambda: node.getbestchainlock()["blockhash"] == block_hash, timeout=timeout, sleep=0.1)
def wait_for_sporks_same(self, timeout=30):
def check_sporks_same():
sporks = self.nodes[0].spork('show')
return all(node.spork('show') == sporks for node in self.nodes[1:])
wait_until(check_sporks_same, timeout=timeout, sleep=0.5)
def wait_for_quorum_connections(self, expected_connections, nodes, timeout = 60, wait_proc=None):
def check_quorum_connections():
all_ok = True
for node in nodes:
s = node.quorum("dkgstatus")
if s["session"] == {}:
continue
if "quorumConnections" not in s:
all_ok = False
break
s = s["quorumConnections"]
if "llmq_test" not in s:
all_ok = False
break
cnt = 0
for c in s["llmq_test"]:
if c["connected"]:
cnt += 1
if cnt < expected_connections:
all_ok = False
break
if not all_ok and wait_proc is not None:
wait_proc()
return all_ok
wait_until(check_quorum_connections, timeout=timeout, sleep=1)
def wait_for_masternode_probes(self, mninfos, timeout = 30, wait_proc=None):
def check_probes():
def ret():
if wait_proc is not None:
wait_proc()
return False
for mn in mninfos:
s = mn.node.quorum('dkgstatus')
if s["session"] == {}:
continue
if "quorumConnections" not in s:
return ret()
s = s["quorumConnections"]
if "llmq_test" not in s:
return ret()
for c in s["llmq_test"]:
if c["proTxHash"] == mn.proTxHash:
continue
if not c["outbound"]:
mn2 = mn.node.protx('info', c["proTxHash"])
if [m for m in mninfos if c["proTxHash"] == m.proTxHash]:
# MN is expected to be online and functioning, so let's verify that the last successful
# probe is not too old. Probes are retried after 50 minutes, while DKGs consider a probe
# as failed after 60 minutes
if mn2['metaInfo']['lastOutboundSuccessElapsed'] > 55 * 60:
return ret()
else:
# MN is expected to be offline, so let's only check that the last probe is not too long ago
if mn2['metaInfo']['lastOutboundAttemptElapsed'] > 55 * 60 and mn2['metaInfo']['lastOutboundSuccessElapsed'] > 55 * 60:
return ret()
return True
wait_until(check_probes, timeout=timeout, sleep=1)
def wait_for_quorum_phase(self, quorum_hash, phase, expected_member_count, check_received_messages, check_received_messages_count, mninfos, timeout=30, sleep=0.1):
def check_dkg_session():
all_ok = True
member_count = 0
for mn in mninfos:
s = mn.node.quorum("dkgstatus")["session"]
if "llmq_test" not in s:
continue
member_count += 1
s = s["llmq_test"]
if s["quorumHash"] != quorum_hash:
all_ok = False
break
if "phase" not in s:
all_ok = False
break
if s["phase"] != phase:
all_ok = False
break
if check_received_messages is not None:
if s[check_received_messages] < check_received_messages_count:
all_ok = False
break
if all_ok and member_count != expected_member_count:
return False
return all_ok
wait_until(check_dkg_session, timeout=timeout, sleep=sleep)
def wait_for_quorum_commitment(self, quorum_hash, nodes, timeout = 15):
def check_dkg_comitments():
all_ok = True
for node in nodes:
s = node.quorum("dkgstatus")
if "minableCommitments" not in s:
all_ok = False
break
s = s["minableCommitments"]
if "llmq_test" not in s:
all_ok = False
break
s = s["llmq_test"]
if s["quorumHash"] != quorum_hash:
all_ok = False
break
return all_ok
wait_until(check_dkg_comitments, timeout=timeout, sleep=0.1)
def mine_quorum(self, expected_members=None, expected_connections=2, expected_contributions=None, expected_complaints=0, expected_justifications=0, expected_commitments=None, mninfos=None):
if expected_members is None:
expected_members = self.llmq_size
if expected_contributions is None:
expected_contributions = self.llmq_size
if expected_commitments is None:
expected_commitments = self.llmq_size
if mninfos is None:
mninfos = self.mninfo
self.log.info("Mining quorum: expected_members=%d, expected_connections=%d, expected_contributions=%d, expected_complaints=%d, expected_justifications=%d, "
"expected_commitments=%d" % (expected_members, expected_connections, expected_contributions, expected_complaints,
expected_justifications, expected_commitments))
nodes = [self.nodes[0]] + [mn.node for mn in mninfos]
quorums = self.nodes[0].quorum("list")
# move forward to next DKG
skip_count = 24 - (self.nodes[0].getblockcount() % 24)
if skip_count != 0:
self.bump_mocktime(1, nodes=nodes)
self.nodes[0].generate(skip_count)
sync_blocks(nodes)
q = self.nodes[0].getbestblockhash()
self.log.info("Waiting for phase 1 (init)")
self.wait_for_quorum_phase(q, 1, expected_members, None, 0, mninfos)
self.wait_for_quorum_connections(expected_connections, nodes, wait_proc=lambda: self.bump_mocktime(1, nodes=nodes))
if self.nodes[0].spork('show')['SPORK_21_QUORUM_ALL_CONNECTED'] == 0:
self.wait_for_masternode_probes(mninfos, wait_proc=lambda: self.bump_mocktime(1, nodes=nodes))
self.bump_mocktime(1, nodes=nodes)
self.nodes[0].generate(2)
sync_blocks(nodes)
self.log.info("Waiting for phase 2 (contribute)")
self.wait_for_quorum_phase(q, 2, expected_members, "receivedContributions", expected_contributions, mninfos)
self.bump_mocktime(1, nodes=nodes)
self.nodes[0].generate(2)
sync_blocks(nodes)
self.log.info("Waiting for phase 3 (complain)")
self.wait_for_quorum_phase(q, 3, expected_members, "receivedComplaints", expected_complaints, mninfos)
self.bump_mocktime(1, nodes=nodes)
self.nodes[0].generate(2)
sync_blocks(nodes)
self.log.info("Waiting for phase 4 (justify)")
self.wait_for_quorum_phase(q, 4, expected_members, "receivedJustifications", expected_justifications, mninfos)
self.bump_mocktime(1, nodes=nodes)
self.nodes[0].generate(2)
sync_blocks(nodes)
self.log.info("Waiting for phase 5 (commit)")
self.wait_for_quorum_phase(q, 5, expected_members, "receivedPrematureCommitments", expected_commitments, mninfos)
self.bump_mocktime(1, nodes=nodes)
self.nodes[0].generate(2)
sync_blocks(nodes)
self.log.info("Waiting for phase 6 (mining)")
self.wait_for_quorum_phase(q, 6, expected_members, None, 0, mninfos)
self.log.info("Waiting final commitment")
self.wait_for_quorum_commitment(q, nodes)
self.log.info("Mining final commitment")
self.bump_mocktime(1, nodes=nodes)
self.nodes[0].generate(1)
while quorums == self.nodes[0].quorum("list"):
time.sleep(2)
self.bump_mocktime(1, nodes=nodes)
self.nodes[0].generate(1)
sync_blocks(nodes)
new_quorum = self.nodes[0].quorum("list", 1)["llmq_test"][0]
quorum_info = self.nodes[0].quorum("info", 100, new_quorum)
# Mine 8 (SIGN_HEIGHT_OFFSET) more blocks to make sure that the new quorum gets eligable for signing sessions
self.nodes[0].generate(8)
sync_blocks(nodes)
self.log.info("New quorum: height=%d, quorumHash=%s, minedBlock=%s" % (quorum_info["height"], new_quorum, quorum_info["minedBlock"]))
return new_quorum
def get_quorum_masternodes(self, q):
qi = self.nodes[0].quorum('info', 100, q)
result = []
for m in qi['members']:
result.append(self.get_mninfo(m['proTxHash']))
return result
def get_mninfo(self, proTxHash):
for mn in self.mninfo:
if mn.proTxHash == proTxHash:
return mn
return None
def wait_for_mnauth(self, node, count, timeout=10):
def test():
pi = node.getpeerinfo()
c = 0
for p in pi:
if "verified_proregtx_hash" in p and p["verified_proregtx_hash"] != "":
c += 1
return c >= count
wait_until(test, timeout=timeout)
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
| mit |
elainenaomi/sciwonc-dataflow-examples | dissertation2017/Experiment 1B/instances/7_1_workflow_full_10files_secondary_w1_1sh_3rs_noannot_with_proj_3s/statscpumemory_0/StatsCPUMemory.py | 50 | 3364 | #!/usr/bin/env python
"""
This activity will calculate the average_cpu of CPU request in whole data.
These fields are optional and could be null.
"""
# It will connect to DataStoreClient
from sciwonc.dataflow.DataStoreClient import DataStoreClient
import ConfigDB_StatsCPUMemory_0
import math
# connector and config
client = DataStoreClient("mongodb", ConfigDB_StatsCPUMemory_0)
# according to config
data = client.getData() # return an array of docs (like a csv reader)
output = []
sum_cpu = 0
sum_memory = 0
sum_ratio = 0
total_valid_tasks = 0
total_tasks = 0
total_variance_cpu = 0
total_variance_memory = 0
total_variance_ratio = 0
if(data):
# processing
while True:
doc = data.next()
if doc is None:
break;
total_tasks += 1
if doc['CPU request'] and doc['memory request']:
sum_cpu = sum_cpu + float(doc['CPU request'])
sum_memory = sum_memory + float(doc['memory request'])
ratio = float(doc['CPU request'])/float(doc['memory request']) if float(doc['memory request']) > 0 else 0
sum_ratio = sum_ratio + ratio
total_valid_tasks += 1
# average
average_cpu = sum_cpu / total_valid_tasks if total_valid_tasks > 0 else None
average_memory = sum_memory / total_valid_tasks if total_valid_tasks > 0 else None
average_ratio = sum_ratio / total_valid_tasks if total_valid_tasks > 0 else None
# variance
if average_cpu and average_memory and average_ratio:
data = client.getData() # return an array of docs (like a csv reader)
# processing
while True:
doc = data.next()
if doc is None:
break;
if doc['CPU request'] and doc['memory request']:
total_variance_cpu = total_variance_cpu + (float(doc['CPU request']) - average_cpu) ** 2
total_variance_memory = total_variance_memory + (float(doc['memory request']) - average_memory) ** 2
ratio = float(doc['CPU request'])/float(doc['memory request']) if float(doc['memory request']) > 0 else 0
total_variance_ratio = total_variance_ratio + (ratio - average_ratio) ** 2
newline = {}
newline['sum cpu'] = sum_cpu
newline['sum variance cpu'] = total_variance_cpu
newline['average cpu'] = average_cpu if average_cpu > 0 else None
newline['standard deviation cpu'] = math.sqrt(total_variance_cpu/total_valid_tasks)
newline['variance cpu'] = total_variance_cpu/total_valid_tasks
newline['sum memory'] = sum_memory
newline['sum variance memory'] = total_variance_memory
newline['average memory'] = average_memory if average_memory > 0 else None
newline['standard deviation memory'] = math.sqrt(total_variance_memory/total_valid_tasks)
newline['variance memory'] = total_variance_memory/total_valid_tasks
newline['sum ratio'] = sum_ratio
newline['sum variance ratio'] = total_variance_ratio
newline['average ratio'] = average_ratio if average_ratio > 0 else None
newline['standard deviation ratio'] = math.sqrt(total_variance_ratio/total_valid_tasks)
newline['variance ratio'] = total_variance_ratio/total_valid_tasks
newline['total valid tasks'] = total_valid_tasks
newline['total tasks'] = total_tasks
output.append(newline)
# save
client.saveData(output)
| gpl-3.0 |
proxysh/Safejumper-for-Mac | buildlinux/env64/lib/python2.7/site-packages/pycparser/ply/ctokens.py | 197 | 3177 | # ----------------------------------------------------------------------
# ctokens.py
#
# Token specifications for symbols in ANSI C and C++. This file is
# meant to be used as a library in other tokenizers.
# ----------------------------------------------------------------------
# Reserved words
tokens = [
# Literals (identifier, integer constant, float constant, string constant, char const)
'ID', 'TYPEID', 'INTEGER', 'FLOAT', 'STRING', 'CHARACTER',
# Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
# Increment/decrement (++,--)
'INCREMENT', 'DECREMENT',
# Structure dereference (->)
'ARROW',
# Ternary operator (?)
'TERNARY',
# Delimeters ( ) [ ] { } , . ; :
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'COMMA', 'PERIOD', 'SEMI', 'COLON',
# Ellipsis (...)
'ELLIPSIS',
]
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MODULO = r'%'
t_OR = r'\|'
t_AND = r'&'
t_NOT = r'~'
t_XOR = r'\^'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# Assignment operators
t_EQUALS = r'='
t_TIMESEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_MODEQUAL = r'%='
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
t_LSHIFTEQUAL = r'<<='
t_RSHIFTEQUAL = r'>>='
t_ANDEQUAL = r'&='
t_OREQUAL = r'\|='
t_XOREQUAL = r'\^='
# Increment/decrement
t_INCREMENT = r'\+\+'
t_DECREMENT = r'--'
# ->
t_ARROW = r'->'
# ?
t_TERNARY = r'\?'
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_PERIOD = r'\.'
t_SEMI = r';'
t_COLON = r':'
t_ELLIPSIS = r'\.\.\.'
# Identifiers
t_ID = r'[A-Za-z_][A-Za-z0-9_]*'
# Integer literal
t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
# Floating literal
t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
t_STRING = r'\"([^\\\n]|(\\.))*?\"'
# Character constant 'c' or L'c'
t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
# Comment (C-Style)
def t_COMMENT(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
return t
# Comment (C++-Style)
def t_CPPCOMMENT(t):
r'//.*\n'
t.lexer.lineno += 1
return t
| gpl-2.0 |
daajoe/trellis | trellis/extractor/parambfs.py | 1 | 10608 | # noinspection PyRedundantParentheses
import copy, os
from itertools import permutations
import random
import networkx as nx
# from networkx.drawing.nx_agraph import graphviz_layout, write_dot
# import matplotlib.pyplot as plt
# #import matplotlib
from trellis.extractor.extractor import Extractor
from trellis.td import TreeDecomposition
class ParamExtractor(Extractor):
@staticmethod
def bfs(decomp, max_bag_size=None, budget=50, rand=False, c1=1.0, c2=0.5, beta=5, gamma=10, delta=2):
# get the bags from the tree decomposition
"""
:param budget: the number of vertices in the local decomp
:param max_bag_size: the bagsize from where we want to start bfs
:type decomp: decomposition
"""
rest_decomp = copy.deepcopy(decomp)
bag_lengths = dict(zip(decomp.bags.keys(), map(len, decomp.bags.values())))
bags = decomp.bags
# root of the BFS is the bag with max elements
root_id = decomp.get_first_node(max_bag_size)
root = bag_lengths.keys()[root_id]
bfs_queue = [root]
bfs_depth = dict()
bfs_common_nodes = {}
parent = {}
# initialization for BFS
for i in decomp.tree.nodes():
bfs_depth[i] = -1
parent[i] = -1
bfs_depth[root] = 0
parent[root] = root
internal_nodes = []
bfs_common_nodes[root] = decomp.bags[root]
sub_vertices = set(decomp.bags[root])
# root is the internal node should not be deleted from the local tree
internal_nodes.append(root)
# maybe change this part Not sure how to avoid this.
while bfs_queue:
# show_graph(decomp.tree, 1)
# print "BFS:", bfs_queue
if rand:
random.shuffle(bfs_queue)
v2 = bfs_queue.pop(0)
# print v2,bfs_queue
# print v2,decomp.tree[v2]
# if any of the neighbours have a bag of size > current bag do not continue on this bag
# changing the checking to the intersection of two bags i.e. check how many vertices are common.
for w in decomp.tree[v2]:
flag = 0
if bfs_depth[w] == -1:
parent[w] = v2
bfs_common_nodes[w] = bags[w].intersection(bags[v2])
bfs_depth[w] = bfs_depth[v2] + 1
if c1 * len(bags[w]) - c2 * len(bfs_common_nodes[w]) <= 1:
if w not in bfs_queue and w not in internal_nodes:
bfs_queue.append(w)
if w not in internal_nodes:
internal_nodes.append(w)
sub_vertices |= decomp.bags[w]
continue
if bfs_depth[w] <= beta:
if w not in bfs_queue and w not in internal_nodes:
bfs_queue.append(w)
if w not in internal_nodes:
internal_nodes.append(w)
sub_vertices |= decomp.bags[w]
continue
sub_tree = ParamExtractor.subtree(decomp, w, v2)
if len(sub_tree) <= gamma:
for w1 in sub_tree:
if w1 not in bfs_queue and w1 not in internal_nodes:
bfs_queue.append(w1)
bfs_depth[w1] = bfs_depth[w] + 1
parent[w1] = w
if w1 not in internal_nodes:
internal_nodes.append(w1)
sub_vertices |= decomp.bags[w1]
continue
else:
flag = 1
if flag == 1:
new_node = max(rest_decomp.tree.nodes()) + 1
rest_decomp.tree.add_node(new_node)
rest_decomp.tree.add_edge(new_node, w)
rest_decomp.tree.add_edge(new_node, parent[w])
rest_decomp.tree.remove_edge(w, parent[w])
rest_decomp.bags[new_node] = set(bfs_common_nodes[w])
if w in internal_nodes:
internal_nodes.remove(w)
if new_node not in internal_nodes:
internal_nodes.append(new_node)
if len(sub_vertices) >= budget + delta * max_bag_size:
break
print len(internal_nodes), len(sub_vertices)
# rest_decomp.show(layout=1)
return internal_nodes, sub_vertices, rest_decomp
@staticmethod
def subtree(decomp, w, v):
neigh = decomp.tree.neighbors(w)
neigh.remove(v)
dfs_visited = [w]
while neigh:
try:
n = neigh.pop()
dfs_visited.append(n)
for i in decomp.tree.neighbors(n):
if i in dfs_visited:
continue
neigh.append(i)
except StopIteration:
break
return dfs_visited
@staticmethod
def extract_graph(internal_nodes, decomp, g):
"""
generates graph for the local tree decomposition
ASSUMPTION: vertices have not been relabelled
:return:
:param g: input graph type: Networkx Graph
:param internal_nodes: nodes of tree decomposition which are picked by BFS type: list
:param decomp: Tree decomposition type: Networkx Graph
:return: sub_graph: graph generated by the local tree decomposition by adding clique for all leaf nodes/bags type: networkx graph
:return: rest_decomp: Sub Tree Decomposition after removing the local tree decomposition type: networkx Graph
:return: connecting_leave: The leaves where local tree decomposition connects with the rest_decomp type:list
- """
y = decomp.tree.subgraph(internal_nodes)
# show_graph(y,layout=1)
sub_nodes = set()
for n in y.nodes():
sub_nodes |= set(decomp.bags[n])
connecting_nodes = {}
sub_graph = g.subgraph(sub_nodes)
for leaf, degree in y.degree().iteritems():
if degree != 1:
continue
if decomp.tree.degree(leaf) > y.degree(leaf):
internal_nodes.remove(leaf)
connecting_nodes[leaf] = decomp.bags[leaf]
for i, j in permutations(decomp.bags[leaf], r=2):
sub_graph.add_edge(i, j)
rest_decomp = TreeDecomposition(tree=decomp.tree.subgraph(set(decomp.tree.nodes()) - set(internal_nodes)))
#TODO:
#,
# temp_path=self.temp_path,
# delete_temp=self.delete_temp, plot_if_td_invalid=self.plot_if_td_invalid
for i in internal_nodes:
del decomp.bags[i]
rest_decomp.bags = decomp.bags
return sub_graph, rest_decomp, connecting_nodes
@staticmethod
def extract_decomposition(decomp, g, max_bag_size=None, budget=50,
extractor_args={'extractor_c1': 1.0, 'extractor_c2': 0.5, 'extractor_beta': 3,
'extractor_gamma': 5, 'extractor_random': False, 'extractor_delta': 2}):
internal_nodes, _, rest_decomp = ParamExtractor.bfs(decomp, max_bag_size=max_bag_size, budget=budget,
c1=extractor_args['extractor_c1'],
c2=extractor_args['extractor_c2'],
beta=extractor_args['extractor_beta'],
gamma=extractor_args['extractor_gamma'],
rand=extractor_args['extractor_random'],
delta=extractor_args['extractor_delta'])
sub_graph, rest_decomp, connecting_leaves = ParamExtractor.extract_graph(internal_nodes,
copy.deepcopy(rest_decomp), g)
# exit(0)
return rest_decomp, sub_graph, connecting_leaves
@staticmethod
def connect_decomp(rest_decomp, sub_decomp, connecting_nodes, graph, td_name, always_validate=True):
if rest_decomp.tree.number_of_nodes() == 0:
return TreeDecomposition(tree=sub_decomp.tree, bags=sub_decomp.bags, graph=graph, td_name=td_name)
new_decomp = nx.union(rest_decomp.tree, sub_decomp.tree)
for node, bag in connecting_nodes.iteritems():
connect = True
for key, value in sub_decomp.bags.iteritems():
rest_decomp.bags[key] = value
if bag.issubset(value) and connect:
new_decomp.add_edge(node, key)
connect = False
td = TreeDecomposition(tree=new_decomp, bags=rest_decomp.bags, graph=graph, td_name=td_name)
if always_validate:
td.validate2()
return td
#
# def show_graph(graph, layout, nolabel=0, write=0, file_name=None, dnd=0, labels=None):
# """ show graph
# layout 1:graphviz,
# 2:circular,
# 3:spring,
# 4:spectral,
# 5: random,
# 6: shell
# """
# if dnd == 0:
# m = graph.copy()
# pos = graphviz_layout(m)
# if layout == 1:
# pos = graphviz_layout(m)
# elif layout == 2:
# pos = nx.circular_layout(m)
# elif layout == 3:
# pos = nx.spring_layout(m)
# elif layout == 4:
# pos = nx.spectral_layout(m)
# elif layout == 5:
# pos = nx.random_layout(m)
# elif layout == 6:
# pos = nx.shell_layout(m)
# if not nolabel:
# nx.draw_networkx_edge_labels(m, pos)
# nx.draw_networkx_nodes(m, pos)
# if labels:
# labels = {k: '%s:%s'%(k,str(sorted(list(v)))) for k,v in labels.iteritems()}
# nx.draw_networkx_labels(m, pos, labels)
# else:
# nx.draw_networkx_labels(m, pos)
# if write != 0:
# write_dot(m, file_name + ".dot")
# os.system("dot -Tps " + file_name + ".dot -o " + file_name + '.ps')
# else:
# # plt.ion()
# # nx.draw(m, pos)
# # plt.plot(m,pos)
# nx.draw(m, pos)
# # plt.show(block=False)
# plt.show()
| gpl-3.0 |
ASP1234/voc | tests/builtins/test_vars.py | 4 | 1709 | from .. utils import TranspileTestCase, BuiltinFunctionTestCase
from unittest import expectedFailure
class VarsTests(TranspileTestCase):
@expectedFailure
def test_simple(self):
self.assertCodeExecution("""
print("There are %s vars" % len(vars()))
x = 1
y = 'z'
print("There are %s vars" % len(vars()))
def method():
print("In method: there are %s vars" % len(vars()))
print("vars()['x'] =", vars()['x'])
print("vars()['y'] =", vars()['y'])
try:
print("vars()['z'] =", vars()['z'])
except KeyError:
print("Variable z not defined")
vars()[y] = 2
print("In method: there are %s vars" % len(vars()))
method()
print("There are %s vars" % len(vars()))
print("vars()['x'] =", vars()['x'])
print("vars()['y'] =", vars()['y'])
print("vars()['z'] =", vars()['z'])
print('x', x)
print('y', y)
print('z', z)
print('Done')
""", run_in_function=False)
class BuiltinVarsFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["vars"]
not_implemented = [
'test_bool',
'test_bytearray',
'test_bytes',
'test_class',
'test_complex',
'test_dict',
'test_float',
'test_frozenset',
'test_int',
'test_list',
'test_None',
'test_NotImplemented',
'test_set',
'test_str',
'test_tuple',
'test_range',
'test_slice',
]
| bsd-3-clause |
UManPychron/pychron | pychron/canvas/canvas2D/detector_block_canvas.py | 2 | 1654 | # ===============================================================================
# Copyright 2016 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from __future__ import absolute_import
from pychron.canvas.canvas2D.scene.detector_block_scene import \
DetectorBlockScene
from pychron.canvas.canvas2D.scene.scene_canvas import SceneCanvas
class DetectorBlockCanvas(SceneCanvas):
scene_klass = DetectorBlockScene
show_axes = False
show_grids = False
use_zoom = False
use_pan = False
def load_canvas_file(self, path):
self.scene.load(path)
def set_detector_offset(self, det, v):
self.scene.set_detector_offset(det, v)
self.invalidate_and_redraw()
def set_detector_deflection(self, det, v):
self.scene.set_detector_deflection(det, v)
# ============= EOF =============================================
| apache-2.0 |
alihalabyah/ansible | v1/ansible/cache/memory.py | 133 | 1252 | # (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.cache.base import BaseCacheModule
class CacheModule(BaseCacheModule):
def __init__(self, *args, **kwargs):
self._cache = {}
def get(self, key):
return self._cache.get(key)
def set(self, key, value):
self._cache[key] = value
def keys(self):
return self._cache.keys()
def contains(self, key):
return key in self._cache
def delete(self, key):
del self._cache[key]
def flush(self):
self._cache = {}
def copy(self):
return self._cache.copy()
| gpl-3.0 |
tpodowd/boto | boto/manage/__init__.py | 271 | 1108 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| mit |
bvamanan/ns3 | waf-tools/pkgconfig.py | 52 | 2015 | # -*- mode: python; encoding: utf-8 -*-
# Gustavo Carneiro (gjamc) 2008
import Options
import Configure
import subprocess
import config_c
import sys
def configure(conf):
pkg_config = conf.find_program('pkg-config', var='PKG_CONFIG')
if not pkg_config: return
@Configure.conf
def pkg_check_modules(conf, uselib_name, expression, mandatory=True):
pkg_config = conf.env['PKG_CONFIG']
if not pkg_config:
if mandatory:
conf.fatal("pkg-config is not available")
else:
return False
if Options.options.verbose:
extra_msg = ' (%s)' % expression
else:
extra_msg = ''
conf.start_msg('Checking for pkg-config flags for %s%s' % (uselib_name, extra_msg))
argv = [pkg_config, '--cflags', '--libs', expression]
cmd = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = cmd.communicate()
retval = cmd.wait()
conf.to_log('%r: %r (exit code %i)\n%s' % (argv, out, retval, err))
if retval != 0:
conf.end_msg(False)
sys.stderr.write(err)
else:
if Options.options.verbose:
conf.end_msg(out)
else:
conf.end_msg(True)
if retval == 0:
conf.parse_flags(out, uselib_name, conf.env)
conf.env[uselib_name] = True
return True
else:
conf.env[uselib_name] = False
if mandatory:
raise Configure.ConfigurationError('pkg-config check failed')
else:
return False
@Configure.conf
def pkg_check_module_variable(conf, module, variable):
pkg_config = conf.env['PKG_CONFIG']
if not pkg_config:
conf.fatal("pkg-config is not available")
argv = [pkg_config, '--variable', variable, module]
cmd = subprocess.Popen(argv, stdout=subprocess.PIPE)
out, dummy = cmd.communicate()
retval = cmd.wait()
out = out.rstrip() # strip the trailing newline
msg_checking = ("Checking for pkg-config variable %r in %s" % (variable, module,))
conf.check_message_custom(msg_checking, '', out)
conf.log.write('%r: %r (exit code %i)\n' % (argv, out, retval))
if retval == 0:
return out
else:
raise Configure.ConfigurationError('pkg-config check failed')
| gpl-2.0 |
kenshay/ImageScript | Script_Runner/PYTHON/Lib/unittest/test/test_runner.py | 11 | 12019 | import io
import os
import sys
import pickle
import subprocess
import unittest
from unittest.case import _Outcome
from unittest.test.support import (LoggingResult,
ResultWithNoStartTestRunStopTestRun)
class TestCleanUp(unittest.TestCase):
def testCleanUp(self):
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
self.assertEqual(test._cleanups, [])
cleanups = []
def cleanup1(*args, **kwargs):
cleanups.append((1, args, kwargs))
def cleanup2(*args, **kwargs):
cleanups.append((2, args, kwargs))
test.addCleanup(cleanup1, 1, 2, 3, four='hello', five='goodbye')
test.addCleanup(cleanup2)
self.assertEqual(test._cleanups,
[(cleanup1, (1, 2, 3), dict(four='hello', five='goodbye')),
(cleanup2, (), {})])
self.assertTrue(test.doCleanups())
self.assertEqual(cleanups, [(2, (), {}), (1, (1, 2, 3), dict(four='hello', five='goodbye'))])
def testCleanUpWithErrors(self):
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
outcome = test._outcome = _Outcome()
exc1 = Exception('foo')
exc2 = Exception('bar')
def cleanup1():
raise exc1
def cleanup2():
raise exc2
test.addCleanup(cleanup1)
test.addCleanup(cleanup2)
self.assertFalse(test.doCleanups())
self.assertFalse(outcome.success)
((_, (Type1, instance1, _)),
(_, (Type2, instance2, _))) = reversed(outcome.errors)
self.assertEqual((Type1, instance1), (Exception, exc1))
self.assertEqual((Type2, instance2), (Exception, exc2))
def testCleanupInRun(self):
blowUp = False
ordering = []
class TestableTest(unittest.TestCase):
def setUp(self):
ordering.append('setUp')
if blowUp:
raise Exception('foo')
def testNothing(self):
ordering.append('test')
def tearDown(self):
ordering.append('tearDown')
test = TestableTest('testNothing')
def cleanup1():
ordering.append('cleanup1')
def cleanup2():
ordering.append('cleanup2')
test.addCleanup(cleanup1)
test.addCleanup(cleanup2)
def success(some_test):
self.assertEqual(some_test, test)
ordering.append('success')
result = unittest.TestResult()
result.addSuccess = success
test.run(result)
self.assertEqual(ordering, ['setUp', 'test', 'tearDown',
'cleanup2', 'cleanup1', 'success'])
blowUp = True
ordering = []
test = TestableTest('testNothing')
test.addCleanup(cleanup1)
test.run(result)
self.assertEqual(ordering, ['setUp', 'cleanup1'])
def testTestCaseDebugExecutesCleanups(self):
ordering = []
class TestableTest(unittest.TestCase):
def setUp(self):
ordering.append('setUp')
self.addCleanup(cleanup1)
def testNothing(self):
ordering.append('test')
def tearDown(self):
ordering.append('tearDown')
test = TestableTest('testNothing')
def cleanup1():
ordering.append('cleanup1')
test.addCleanup(cleanup2)
def cleanup2():
ordering.append('cleanup2')
test.debug()
self.assertEqual(ordering, ['setUp', 'test', 'tearDown', 'cleanup1', 'cleanup2'])
class Test_TextTestRunner(unittest.TestCase):
"""Tests for TextTestRunner."""
def setUp(self):
# clean the environment from pre-existing PYTHONWARNINGS to make
# test_warnings results consistent
self.pythonwarnings = os.environ.get('PYTHONWARNINGS')
if self.pythonwarnings:
del os.environ['PYTHONWARNINGS']
def tearDown(self):
# bring back pre-existing PYTHONWARNINGS if present
if self.pythonwarnings:
os.environ['PYTHONWARNINGS'] = self.pythonwarnings
def test_init(self):
runner = unittest.TextTestRunner()
self.assertFalse(runner.failfast)
self.assertFalse(runner.buffer)
self.assertEqual(runner.verbosity, 1)
self.assertEqual(runner.warnings, None)
self.assertTrue(runner.descriptions)
self.assertEqual(runner.resultclass, unittest.TextTestResult)
self.assertFalse(runner.tb_locals)
def test_multiple_inheritance(self):
class AResult(unittest.TestResult):
def __init__(self, stream, descriptions, verbosity):
super(AResult, self).__init__(stream, descriptions, verbosity)
class ATextResult(unittest.TextTestResult, AResult):
pass
# This used to raise an exception due to TextTestResult not passing
# on arguments in its __init__ super call
ATextResult(None, None, 1)
def testBufferAndFailfast(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
result = unittest.TestResult()
runner = unittest.TextTestRunner(stream=io.StringIO(), failfast=True,
buffer=True)
# Use our result object
runner._makeResult = lambda: result
runner.run(Test('testFoo'))
self.assertTrue(result.failfast)
self.assertTrue(result.buffer)
def test_locals(self):
runner = unittest.TextTestRunner(stream=io.StringIO(), tb_locals=True)
result = runner.run(unittest.TestSuite())
self.assertEqual(True, result.tb_locals)
def testRunnerRegistersResult(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
originalRegisterResult = unittest.runner.registerResult
def cleanup():
unittest.runner.registerResult = originalRegisterResult
self.addCleanup(cleanup)
result = unittest.TestResult()
runner = unittest.TextTestRunner(stream=io.StringIO())
# Use our result object
runner._makeResult = lambda: result
self.wasRegistered = 0
def fakeRegisterResult(thisResult):
self.wasRegistered += 1
self.assertEqual(thisResult, result)
unittest.runner.registerResult = fakeRegisterResult
runner.run(unittest.TestSuite())
self.assertEqual(self.wasRegistered, 1)
def test_works_with_result_without_startTestRun_stopTestRun(self):
class OldTextResult(ResultWithNoStartTestRunStopTestRun):
separator2 = ''
def printErrors(self):
pass
class Runner(unittest.TextTestRunner):
def __init__(self):
super(Runner, self).__init__(io.StringIO())
def _makeResult(self):
return OldTextResult()
runner = Runner()
runner.run(unittest.TestSuite())
def test_startTestRun_stopTestRun_called(self):
class LoggingTextResult(LoggingResult):
separator2 = ''
def printErrors(self):
pass
class LoggingRunner(unittest.TextTestRunner):
def __init__(self, events):
super(LoggingRunner, self).__init__(io.StringIO())
self._events = events
def _makeResult(self):
return LoggingTextResult(self._events)
events = []
runner = LoggingRunner(events)
runner.run(unittest.TestSuite())
expected = ['startTestRun', 'stopTestRun']
self.assertEqual(events, expected)
def test_pickle_unpickle(self):
# Issue #7197: a TextTestRunner should be (un)pickleable. This is
# required by test_multiprocessing under Windows (in verbose mode).
stream = io.StringIO("foo")
runner = unittest.TextTestRunner(stream)
for protocol in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(runner, protocol)
obj = pickle.loads(s)
# StringIO objects never compare equal, a cheap test instead.
self.assertEqual(obj.stream.getvalue(), stream.getvalue())
def test_resultclass(self):
def MockResultClass(*args):
return args
STREAM = object()
DESCRIPTIONS = object()
VERBOSITY = object()
runner = unittest.TextTestRunner(STREAM, DESCRIPTIONS, VERBOSITY,
resultclass=MockResultClass)
self.assertEqual(runner.resultclass, MockResultClass)
expectedresult = (runner.stream, DESCRIPTIONS, VERBOSITY)
self.assertEqual(runner._makeResult(), expectedresult)
def test_warnings(self):
"""
Check that warnings argument of TextTestRunner correctly affects the
behavior of the warnings.
"""
# see #10535 and the _test_warnings file for more information
def get_parse_out_err(p):
return [b.splitlines() for b in p.communicate()]
opts = dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.path.dirname(__file__))
ae_msg = b'Please use assertEqual instead.'
at_msg = b'Please use assertTrue instead.'
# no args -> all the warnings are printed, unittest warnings only once
p = subprocess.Popen([sys.executable, '-E', '_test_warnings.py'], **opts)
with p:
out, err = get_parse_out_err(p)
self.assertIn(b'OK', err)
# check that the total number of warnings in the output is correct
self.assertEqual(len(out), 12)
# check that the numbers of the different kind of warnings is correct
for msg in [b'dw', b'iw', b'uw']:
self.assertEqual(out.count(msg), 3)
for msg in [ae_msg, at_msg, b'rw']:
self.assertEqual(out.count(msg), 1)
args_list = (
# passing 'ignore' as warnings arg -> no warnings
[sys.executable, '_test_warnings.py', 'ignore'],
# -W doesn't affect the result if the arg is passed
[sys.executable, '-Wa', '_test_warnings.py', 'ignore'],
# -W affects the result if the arg is not passed
[sys.executable, '-Wi', '_test_warnings.py']
)
# in all these cases no warnings are printed
for args in args_list:
p = subprocess.Popen(args, **opts)
with p:
out, err = get_parse_out_err(p)
self.assertIn(b'OK', err)
self.assertEqual(len(out), 0)
# passing 'always' as warnings arg -> all the warnings printed,
# unittest warnings only once
p = subprocess.Popen([sys.executable, '_test_warnings.py', 'always'],
**opts)
with p:
out, err = get_parse_out_err(p)
self.assertIn(b'OK', err)
self.assertEqual(len(out), 14)
for msg in [b'dw', b'iw', b'uw', b'rw']:
self.assertEqual(out.count(msg), 3)
for msg in [ae_msg, at_msg]:
self.assertEqual(out.count(msg), 1)
def testStdErrLookedUpAtInstantiationTime(self):
# see issue 10786
old_stderr = sys.stderr
f = io.StringIO()
sys.stderr = f
try:
runner = unittest.TextTestRunner()
self.assertTrue(runner.stream.stream is f)
finally:
sys.stderr = old_stderr
def testSpecifiedStreamUsed(self):
# see issue 10786
f = io.StringIO()
runner = unittest.TextTestRunner(f)
self.assertTrue(runner.stream.stream is f)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
smilusingjavascript/blink | Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/headerparserhandler.py | 638 | 9836 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""PythonHeaderParserHandler for mod_pywebsocket.
Apache HTTP Server and mod_python must be configured such that this
function is called to handle WebSocket request.
"""
import logging
from mod_python import apache
from mod_pywebsocket import common
from mod_pywebsocket import dispatch
from mod_pywebsocket import handshake
from mod_pywebsocket import util
# PythonOption to specify the handler root directory.
_PYOPT_HANDLER_ROOT = 'mod_pywebsocket.handler_root'
# PythonOption to specify the handler scan directory.
# This must be a directory under the root directory.
# The default is the root directory.
_PYOPT_HANDLER_SCAN = 'mod_pywebsocket.handler_scan'
# PythonOption to allow handlers whose canonical path is
# not under the root directory. It's disallowed by default.
# Set this option with value of 'yes' to allow.
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT = (
'mod_pywebsocket.allow_handlers_outside_root_dir')
# Map from values to their meanings. 'Yes' and 'No' are allowed just for
# compatibility.
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION = {
'off': False, 'no': False, 'on': True, 'yes': True}
# (Obsolete option. Ignored.)
# PythonOption to specify to allow handshake defined in Hixie 75 version
# protocol. The default is None (Off)
_PYOPT_ALLOW_DRAFT75 = 'mod_pywebsocket.allow_draft75'
# Map from values to their meanings.
_PYOPT_ALLOW_DRAFT75_DEFINITION = {'off': False, 'on': True}
class ApacheLogHandler(logging.Handler):
"""Wrapper logging.Handler to emit log message to apache's error.log."""
_LEVELS = {
logging.DEBUG: apache.APLOG_DEBUG,
logging.INFO: apache.APLOG_INFO,
logging.WARNING: apache.APLOG_WARNING,
logging.ERROR: apache.APLOG_ERR,
logging.CRITICAL: apache.APLOG_CRIT,
}
def __init__(self, request=None):
logging.Handler.__init__(self)
self._log_error = apache.log_error
if request is not None:
self._log_error = request.log_error
# Time and level will be printed by Apache.
self._formatter = logging.Formatter('%(name)s: %(message)s')
def emit(self, record):
apache_level = apache.APLOG_DEBUG
if record.levelno in ApacheLogHandler._LEVELS:
apache_level = ApacheLogHandler._LEVELS[record.levelno]
msg = self._formatter.format(record)
# "server" parameter must be passed to have "level" parameter work.
# If only "level" parameter is passed, nothing shows up on Apache's
# log. However, at this point, we cannot get the server object of the
# virtual host which will process WebSocket requests. The only server
# object we can get here is apache.main_server. But Wherever (server
# configuration context or virtual host context) we put
# PythonHeaderParserHandler directive, apache.main_server just points
# the main server instance (not any of virtual server instance). Then,
# Apache follows LogLevel directive in the server configuration context
# to filter logs. So, we need to specify LogLevel in the server
# configuration context. Even if we specify "LogLevel debug" in the
# virtual host context which actually handles WebSocket connections,
# DEBUG level logs never show up unless "LogLevel debug" is specified
# in the server configuration context.
#
# TODO(tyoshino): Provide logging methods on request object. When
# request is mp_request object (when used together with Apache), the
# methods call request.log_error indirectly. When request is
# _StandaloneRequest, the methods call Python's logging facility which
# we create in standalone.py.
self._log_error(msg, apache_level, apache.main_server)
def _configure_logging():
logger = logging.getLogger()
# Logs are filtered by Apache based on LogLevel directive in Apache
# configuration file. We must just pass logs for all levels to
# ApacheLogHandler.
logger.setLevel(logging.DEBUG)
logger.addHandler(ApacheLogHandler())
_configure_logging()
_LOGGER = logging.getLogger(__name__)
def _parse_option(name, value, definition):
if value is None:
return False
meaning = definition.get(value.lower())
if meaning is None:
raise Exception('Invalid value for PythonOption %s: %r' %
(name, value))
return meaning
def _create_dispatcher():
_LOGGER.info('Initializing Dispatcher')
options = apache.main_server.get_options()
handler_root = options.get(_PYOPT_HANDLER_ROOT, None)
if not handler_root:
raise Exception('PythonOption %s is not defined' % _PYOPT_HANDLER_ROOT,
apache.APLOG_ERR)
handler_scan = options.get(_PYOPT_HANDLER_SCAN, handler_root)
allow_handlers_outside_root = _parse_option(
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT,
options.get(_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT),
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION)
dispatcher = dispatch.Dispatcher(
handler_root, handler_scan, allow_handlers_outside_root)
for warning in dispatcher.source_warnings():
apache.log_error(
'mod_pywebsocket: Warning in source loading: %s' % warning,
apache.APLOG_WARNING)
return dispatcher
# Initialize
_dispatcher = _create_dispatcher()
def headerparserhandler(request):
"""Handle request.
Args:
request: mod_python request.
This function is named headerparserhandler because it is the default
name for a PythonHeaderParserHandler.
"""
handshake_is_done = False
try:
# Fallback to default http handler for request paths for which
# we don't have request handlers.
if not _dispatcher.get_handler_suite(request.uri):
request.log_error(
'mod_pywebsocket: No handler for resource: %r' % request.uri,
apache.APLOG_INFO)
request.log_error(
'mod_pywebsocket: Fallback to Apache', apache.APLOG_INFO)
return apache.DECLINED
except dispatch.DispatchException, e:
request.log_error(
'mod_pywebsocket: Dispatch failed for error: %s' % e,
apache.APLOG_INFO)
if not handshake_is_done:
return e.status
try:
allow_draft75 = _parse_option(
_PYOPT_ALLOW_DRAFT75,
apache.main_server.get_options().get(_PYOPT_ALLOW_DRAFT75),
_PYOPT_ALLOW_DRAFT75_DEFINITION)
try:
handshake.do_handshake(
request, _dispatcher, allowDraft75=allow_draft75)
except handshake.VersionException, e:
request.log_error(
'mod_pywebsocket: Handshake failed for version error: %s' % e,
apache.APLOG_INFO)
request.err_headers_out.add(common.SEC_WEBSOCKET_VERSION_HEADER,
e.supported_versions)
return apache.HTTP_BAD_REQUEST
except handshake.HandshakeException, e:
# Handshake for ws/wss failed.
# Send http response with error status.
request.log_error(
'mod_pywebsocket: Handshake failed for error: %s' % e,
apache.APLOG_INFO)
return e.status
handshake_is_done = True
request._dispatcher = _dispatcher
_dispatcher.transfer_data(request)
except handshake.AbortedByUserException, e:
request.log_error('mod_pywebsocket: Aborted: %s' % e, apache.APLOG_INFO)
except Exception, e:
# DispatchException can also be thrown if something is wrong in
# pywebsocket code. It's caught here, then.
request.log_error('mod_pywebsocket: Exception occurred: %s\n%s' %
(e, util.get_stack_trace()),
apache.APLOG_ERR)
# Unknown exceptions before handshake mean Apache must handle its
# request with another handler.
if not handshake_is_done:
return apache.DECLINED
# Set assbackwards to suppress response header generation by Apache.
request.assbackwards = 1
return apache.DONE # Return DONE such that no other handlers are invoked.
# vi:sts=4 sw=4 et
| bsd-3-clause |
mcsosa121/cafa | cafaenv/lib/python2.7/site-packages/django/conf/locale/nb/formats.py | 504 | 1766 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| mit |
piman31415/kudzucoin | contrib/testgen/base58.py | 2139 | 2818 | '''
Bitcoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/bitcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| mit |
NixaSoftware/CVis | venv/bin/tools/build/v2/test/prebuilt.py | 44 | 1309 | #!/usr/bin/python
# Copyright 2002, 2003, 2004 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Test that we can use already built sources
import BoostBuild
t = BoostBuild.Tester(["debug", "release"], use_test_config=False)
t.set_tree('prebuilt')
t.expand_toolset("ext/jamroot.jam")
t.expand_toolset("jamroot.jam")
# First, build the external project.
t.run_build_system(subdir="ext")
# Then pretend that we do not have the sources for the external project, and
# can only use compiled binaries.
t.copy("ext/jamfile2.jam", "ext/jamfile.jam")
t.expand_toolset("ext/jamfile.jam")
# Now check that we can build the main project, and that correct prebuilt file
# is picked, depending of variant. This also checks that correct includes for
# prebuilt libraries are used.
t.run_build_system()
t.expect_addition("bin/$toolset/debug/hello.exe")
t.expect_addition("bin/$toolset/release/hello.exe")
t.rm("bin")
# Now test that prebuilt file specified by absolute name works too.
t.copy("ext/jamfile3.jam", "ext/jamfile.jam")
t.expand_toolset("ext/jamfile.jam")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/hello.exe")
t.expect_addition("bin/$toolset/release/hello.exe")
t.cleanup()
| apache-2.0 |
zguangyu/tornado | tornado/test/gen_test.py | 1 | 45446 | from __future__ import absolute_import, division, print_function, with_statement
import gc
import contextlib
import datetime
import functools
import sys
import textwrap
import time
import weakref
from tornado.concurrent import return_future, Future
from tornado.escape import url_escape
from tornado.httpclient import AsyncHTTPClient
from tornado.ioloop import IOLoop
from tornado.log import app_log
from tornado import stack_context
from tornado.testing import AsyncHTTPTestCase, AsyncTestCase, ExpectLog, gen_test
from tornado.test.util import unittest, skipOnTravis, skipBefore33, skipBefore35, skipNotCPython, exec_test
from tornado.web import Application, RequestHandler, asynchronous, HTTPError
from tornado import gen
try:
from concurrent import futures
except ImportError:
futures = None
class GenEngineTest(AsyncTestCase):
def setUp(self):
super(GenEngineTest, self).setUp()
self.named_contexts = []
def named_context(self, name):
@contextlib.contextmanager
def context():
self.named_contexts.append(name)
try:
yield
finally:
self.assertEqual(self.named_contexts.pop(), name)
return context
def run_gen(self, f):
f()
return self.wait()
def delay_callback(self, iterations, callback, arg):
"""Runs callback(arg) after a number of IOLoop iterations."""
if iterations == 0:
callback(arg)
else:
self.io_loop.add_callback(functools.partial(
self.delay_callback, iterations - 1, callback, arg))
@return_future
def async_future(self, result, callback):
self.io_loop.add_callback(callback, result)
@gen.coroutine
def async_exception(self, e):
yield gen.moment
raise e
def test_no_yield(self):
@gen.engine
def f():
self.stop()
self.run_gen(f)
def test_inline_cb(self):
@gen.engine
def f():
(yield gen.Callback("k1"))()
res = yield gen.Wait("k1")
self.assertTrue(res is None)
self.stop()
self.run_gen(f)
def test_ioloop_cb(self):
@gen.engine
def f():
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.stop()
self.run_gen(f)
def test_exception_phase1(self):
@gen.engine
def f():
1 / 0
self.assertRaises(ZeroDivisionError, self.run_gen, f)
def test_exception_phase2(self):
@gen.engine
def f():
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
1 / 0
self.assertRaises(ZeroDivisionError, self.run_gen, f)
def test_exception_in_task_phase1(self):
def fail_task(callback):
1 / 0
@gen.engine
def f():
try:
yield gen.Task(fail_task)
raise Exception("did not get expected exception")
except ZeroDivisionError:
self.stop()
self.run_gen(f)
def test_exception_in_task_phase2(self):
# This is the case that requires the use of stack_context in gen.engine
def fail_task(callback):
self.io_loop.add_callback(lambda: 1 / 0)
@gen.engine
def f():
try:
yield gen.Task(fail_task)
raise Exception("did not get expected exception")
except ZeroDivisionError:
self.stop()
self.run_gen(f)
def test_with_arg(self):
@gen.engine
def f():
(yield gen.Callback("k1"))(42)
res = yield gen.Wait("k1")
self.assertEqual(42, res)
self.stop()
self.run_gen(f)
def test_with_arg_tuple(self):
@gen.engine
def f():
(yield gen.Callback((1, 2)))((3, 4))
res = yield gen.Wait((1, 2))
self.assertEqual((3, 4), res)
self.stop()
self.run_gen(f)
def test_key_reuse(self):
@gen.engine
def f():
yield gen.Callback("k1")
yield gen.Callback("k1")
self.stop()
self.assertRaises(gen.KeyReuseError, self.run_gen, f)
def test_key_reuse_tuple(self):
@gen.engine
def f():
yield gen.Callback((1, 2))
yield gen.Callback((1, 2))
self.stop()
self.assertRaises(gen.KeyReuseError, self.run_gen, f)
def test_key_mismatch(self):
@gen.engine
def f():
yield gen.Callback("k1")
yield gen.Wait("k2")
self.stop()
self.assertRaises(gen.UnknownKeyError, self.run_gen, f)
def test_key_mismatch_tuple(self):
@gen.engine
def f():
yield gen.Callback((1, 2))
yield gen.Wait((2, 3))
self.stop()
self.assertRaises(gen.UnknownKeyError, self.run_gen, f)
def test_leaked_callback(self):
@gen.engine
def f():
yield gen.Callback("k1")
self.stop()
self.assertRaises(gen.LeakedCallbackError, self.run_gen, f)
def test_leaked_callback_tuple(self):
@gen.engine
def f():
yield gen.Callback((1, 2))
self.stop()
self.assertRaises(gen.LeakedCallbackError, self.run_gen, f)
def test_parallel_callback(self):
@gen.engine
def f():
for k in range(3):
self.io_loop.add_callback((yield gen.Callback(k)))
yield gen.Wait(1)
self.io_loop.add_callback((yield gen.Callback(3)))
yield gen.Wait(0)
yield gen.Wait(3)
yield gen.Wait(2)
self.stop()
self.run_gen(f)
def test_bogus_yield(self):
@gen.engine
def f():
yield 42
self.assertRaises(gen.BadYieldError, self.run_gen, f)
def test_bogus_yield_tuple(self):
@gen.engine
def f():
yield (1, 2)
self.assertRaises(gen.BadYieldError, self.run_gen, f)
def test_reuse(self):
@gen.engine
def f():
self.io_loop.add_callback((yield gen.Callback(0)))
yield gen.Wait(0)
self.stop()
self.run_gen(f)
self.run_gen(f)
def test_task(self):
@gen.engine
def f():
yield gen.Task(self.io_loop.add_callback)
self.stop()
self.run_gen(f)
def test_wait_all(self):
@gen.engine
def f():
(yield gen.Callback("k1"))("v1")
(yield gen.Callback("k2"))("v2")
results = yield gen.WaitAll(["k1", "k2"])
self.assertEqual(results, ["v1", "v2"])
self.stop()
self.run_gen(f)
def test_exception_in_yield(self):
@gen.engine
def f():
try:
yield gen.Wait("k1")
raise Exception("did not get expected exception")
except gen.UnknownKeyError:
pass
self.stop()
self.run_gen(f)
def test_resume_after_exception_in_yield(self):
@gen.engine
def f():
try:
yield gen.Wait("k1")
raise Exception("did not get expected exception")
except gen.UnknownKeyError:
pass
(yield gen.Callback("k2"))("v2")
self.assertEqual((yield gen.Wait("k2")), "v2")
self.stop()
self.run_gen(f)
def test_orphaned_callback(self):
@gen.engine
def f():
self.orphaned_callback = yield gen.Callback(1)
try:
self.run_gen(f)
raise Exception("did not get expected exception")
except gen.LeakedCallbackError:
pass
self.orphaned_callback()
def test_multi(self):
@gen.engine
def f():
(yield gen.Callback("k1"))("v1")
(yield gen.Callback("k2"))("v2")
results = yield [gen.Wait("k1"), gen.Wait("k2")]
self.assertEqual(results, ["v1", "v2"])
self.stop()
self.run_gen(f)
def test_multi_dict(self):
@gen.engine
def f():
(yield gen.Callback("k1"))("v1")
(yield gen.Callback("k2"))("v2")
results = yield dict(foo=gen.Wait("k1"), bar=gen.Wait("k2"))
self.assertEqual(results, dict(foo="v1", bar="v2"))
self.stop()
self.run_gen(f)
# The following tests explicitly run with both gen.Multi
# and gen.multi_future (Task returns a Future, so it can be used
# with either).
def test_multi_yieldpoint_delayed(self):
@gen.engine
def f():
# callbacks run at different times
responses = yield gen.Multi([
gen.Task(self.delay_callback, 3, arg="v1"),
gen.Task(self.delay_callback, 1, arg="v2"),
])
self.assertEqual(responses, ["v1", "v2"])
self.stop()
self.run_gen(f)
def test_multi_yieldpoint_dict_delayed(self):
@gen.engine
def f():
# callbacks run at different times
responses = yield gen.Multi(dict(
foo=gen.Task(self.delay_callback, 3, arg="v1"),
bar=gen.Task(self.delay_callback, 1, arg="v2"),
))
self.assertEqual(responses, dict(foo="v1", bar="v2"))
self.stop()
self.run_gen(f)
def test_multi_future_delayed(self):
@gen.engine
def f():
# callbacks run at different times
responses = yield gen.multi_future([
gen.Task(self.delay_callback, 3, arg="v1"),
gen.Task(self.delay_callback, 1, arg="v2"),
])
self.assertEqual(responses, ["v1", "v2"])
self.stop()
self.run_gen(f)
def test_multi_future_dict_delayed(self):
@gen.engine
def f():
# callbacks run at different times
responses = yield gen.multi_future(dict(
foo=gen.Task(self.delay_callback, 3, arg="v1"),
bar=gen.Task(self.delay_callback, 1, arg="v2"),
))
self.assertEqual(responses, dict(foo="v1", bar="v2"))
self.stop()
self.run_gen(f)
@skipOnTravis
@gen_test
def test_multi_performance(self):
# Yielding a list used to have quadratic performance; make
# sure a large list stays reasonable. On my laptop a list of
# 2000 used to take 1.8s, now it takes 0.12.
start = time.time()
yield [gen.Task(self.io_loop.add_callback) for i in range(2000)]
end = time.time()
self.assertLess(end - start, 1.0)
@gen_test
def test_multi_empty(self):
# Empty lists or dicts should return the same type.
x = yield []
self.assertTrue(isinstance(x, list))
y = yield {}
self.assertTrue(isinstance(y, dict))
@gen_test
def test_multi_mixed_types(self):
# A YieldPoint (Wait) and Future (Task) can be combined
# (and use the YieldPoint codepath)
(yield gen.Callback("k1"))("v1")
responses = yield [gen.Wait("k1"),
gen.Task(self.delay_callback, 3, arg="v2")]
self.assertEqual(responses, ["v1", "v2"])
@gen_test
def test_future(self):
result = yield self.async_future(1)
self.assertEqual(result, 1)
@gen_test
def test_multi_future(self):
results = yield [self.async_future(1), self.async_future(2)]
self.assertEqual(results, [1, 2])
@gen_test
def test_multi_future_duplicate(self):
f = self.async_future(2)
results = yield [self.async_future(1), f, self.async_future(3), f]
self.assertEqual(results, [1, 2, 3, 2])
@gen_test
def test_multi_dict_future(self):
results = yield dict(foo=self.async_future(1), bar=self.async_future(2))
self.assertEqual(results, dict(foo=1, bar=2))
@gen_test
def test_multi_exceptions(self):
with ExpectLog(app_log, "Multiple exceptions in yield list"):
with self.assertRaises(RuntimeError) as cm:
yield gen.Multi([self.async_exception(RuntimeError("error 1")),
self.async_exception(RuntimeError("error 2"))])
self.assertEqual(str(cm.exception), "error 1")
# With only one exception, no error is logged.
with self.assertRaises(RuntimeError):
yield gen.Multi([self.async_exception(RuntimeError("error 1")),
self.async_future(2)])
# Exception logging may be explicitly quieted.
with self.assertRaises(RuntimeError):
yield gen.Multi([self.async_exception(RuntimeError("error 1")),
self.async_exception(RuntimeError("error 2"))],
quiet_exceptions=RuntimeError)
@gen_test
def test_multi_future_exceptions(self):
with ExpectLog(app_log, "Multiple exceptions in yield list"):
with self.assertRaises(RuntimeError) as cm:
yield [self.async_exception(RuntimeError("error 1")),
self.async_exception(RuntimeError("error 2"))]
self.assertEqual(str(cm.exception), "error 1")
# With only one exception, no error is logged.
with self.assertRaises(RuntimeError):
yield [self.async_exception(RuntimeError("error 1")),
self.async_future(2)]
# Exception logging may be explicitly quieted.
with self.assertRaises(RuntimeError):
yield gen.multi_future(
[self.async_exception(RuntimeError("error 1")),
self.async_exception(RuntimeError("error 2"))],
quiet_exceptions=RuntimeError)
def test_arguments(self):
@gen.engine
def f():
(yield gen.Callback("noargs"))()
self.assertEqual((yield gen.Wait("noargs")), None)
(yield gen.Callback("1arg"))(42)
self.assertEqual((yield gen.Wait("1arg")), 42)
(yield gen.Callback("kwargs"))(value=42)
result = yield gen.Wait("kwargs")
self.assertTrue(isinstance(result, gen.Arguments))
self.assertEqual(((), dict(value=42)), result)
self.assertEqual(dict(value=42), result.kwargs)
(yield gen.Callback("2args"))(42, 43)
result = yield gen.Wait("2args")
self.assertTrue(isinstance(result, gen.Arguments))
self.assertEqual(((42, 43), {}), result)
self.assertEqual((42, 43), result.args)
def task_func(callback):
callback(None, error="foo")
result = yield gen.Task(task_func)
self.assertTrue(isinstance(result, gen.Arguments))
self.assertEqual(((None,), dict(error="foo")), result)
self.stop()
self.run_gen(f)
def test_stack_context_leak(self):
# regression test: repeated invocations of a gen-based
# function should not result in accumulated stack_contexts
def _stack_depth():
head = stack_context._state.contexts[1]
length = 0
while head is not None:
length += 1
head = head.old_contexts[1]
return length
@gen.engine
def inner(callback):
yield gen.Task(self.io_loop.add_callback)
callback()
@gen.engine
def outer():
for i in range(10):
yield gen.Task(inner)
stack_increase = _stack_depth() - initial_stack_depth
self.assertTrue(stack_increase <= 2)
self.stop()
initial_stack_depth = _stack_depth()
self.run_gen(outer)
def test_stack_context_leak_exception(self):
# same as previous, but with a function that exits with an exception
@gen.engine
def inner(callback):
yield gen.Task(self.io_loop.add_callback)
1 / 0
@gen.engine
def outer():
for i in range(10):
try:
yield gen.Task(inner)
except ZeroDivisionError:
pass
stack_increase = len(stack_context._state.contexts) - initial_stack_depth
self.assertTrue(stack_increase <= 2)
self.stop()
initial_stack_depth = len(stack_context._state.contexts)
self.run_gen(outer)
def function_with_stack_context(self, callback):
# Technically this function should stack_context.wrap its callback
# upon entry. However, it is very common for this step to be
# omitted.
def step2():
self.assertEqual(self.named_contexts, ['a'])
self.io_loop.add_callback(callback)
with stack_context.StackContext(self.named_context('a')):
self.io_loop.add_callback(step2)
@gen_test
def test_wait_transfer_stack_context(self):
# Wait should not pick up contexts from where callback was invoked,
# even if that function improperly fails to wrap its callback.
cb = yield gen.Callback('k1')
self.function_with_stack_context(cb)
self.assertEqual(self.named_contexts, [])
yield gen.Wait('k1')
self.assertEqual(self.named_contexts, [])
@gen_test
def test_task_transfer_stack_context(self):
yield gen.Task(self.function_with_stack_context)
self.assertEqual(self.named_contexts, [])
def test_raise_after_stop(self):
# This pattern will be used in the following tests so make sure
# the exception propagates as expected.
@gen.engine
def f():
self.stop()
1 / 0
with self.assertRaises(ZeroDivisionError):
self.run_gen(f)
def test_sync_raise_return(self):
# gen.Return is allowed in @gen.engine, but it may not be used
# to return a value.
@gen.engine
def f():
self.stop(42)
raise gen.Return()
result = self.run_gen(f)
self.assertEqual(result, 42)
def test_async_raise_return(self):
@gen.engine
def f():
yield gen.Task(self.io_loop.add_callback)
self.stop(42)
raise gen.Return()
result = self.run_gen(f)
self.assertEqual(result, 42)
def test_sync_raise_return_value(self):
@gen.engine
def f():
raise gen.Return(42)
with self.assertRaises(gen.ReturnValueIgnoredError):
self.run_gen(f)
def test_sync_raise_return_value_tuple(self):
@gen.engine
def f():
raise gen.Return((1, 2))
with self.assertRaises(gen.ReturnValueIgnoredError):
self.run_gen(f)
def test_async_raise_return_value(self):
@gen.engine
def f():
yield gen.Task(self.io_loop.add_callback)
raise gen.Return(42)
with self.assertRaises(gen.ReturnValueIgnoredError):
self.run_gen(f)
def test_async_raise_return_value_tuple(self):
@gen.engine
def f():
yield gen.Task(self.io_loop.add_callback)
raise gen.Return((1, 2))
with self.assertRaises(gen.ReturnValueIgnoredError):
self.run_gen(f)
def test_return_value(self):
# It is an error to apply @gen.engine to a function that returns
# a value.
@gen.engine
def f():
return 42
with self.assertRaises(gen.ReturnValueIgnoredError):
self.run_gen(f)
def test_return_value_tuple(self):
# It is an error to apply @gen.engine to a function that returns
# a value.
@gen.engine
def f():
return (1, 2)
with self.assertRaises(gen.ReturnValueIgnoredError):
self.run_gen(f)
@skipNotCPython
def test_task_refcounting(self):
# On CPython, tasks and their arguments should be released immediately
# without waiting for garbage collection.
@gen.engine
def f():
class Foo(object):
pass
arg = Foo()
self.arg_ref = weakref.ref(arg)
task = gen.Task(self.io_loop.add_callback, arg=arg)
self.task_ref = weakref.ref(task)
yield task
self.stop()
self.run_gen(f)
self.assertIs(self.arg_ref(), None)
self.assertIs(self.task_ref(), None)
class GenCoroutineTest(AsyncTestCase):
def setUp(self):
# Stray StopIteration exceptions can lead to tests exiting prematurely,
# so we need explicit checks here to make sure the tests run all
# the way through.
self.finished = False
super(GenCoroutineTest, self).setUp()
def tearDown(self):
super(GenCoroutineTest, self).tearDown()
assert self.finished
@gen_test
def test_sync_gen_return(self):
@gen.coroutine
def f():
raise gen.Return(42)
result = yield f()
self.assertEqual(result, 42)
self.finished = True
@gen_test
def test_async_gen_return(self):
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
raise gen.Return(42)
result = yield f()
self.assertEqual(result, 42)
self.finished = True
@gen_test
def test_sync_return(self):
@gen.coroutine
def f():
return 42
result = yield f()
self.assertEqual(result, 42)
self.finished = True
@skipBefore33
@gen_test
def test_async_return(self):
namespace = exec_test(globals(), locals(), """
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
return 42
""")
result = yield namespace['f']()
self.assertEqual(result, 42)
self.finished = True
@skipBefore33
@gen_test
def test_async_early_return(self):
# A yield statement exists but is not executed, which means
# this function "returns" via an exception. This exception
# doesn't happen before the exception handling is set up.
namespace = exec_test(globals(), locals(), """
@gen.coroutine
def f():
if True:
return 42
yield gen.Task(self.io_loop.add_callback)
""")
result = yield namespace['f']()
self.assertEqual(result, 42)
self.finished = True
@skipBefore35
@gen_test
def test_async_await(self):
# This test verifies that an async function can await a
# yield-based gen.coroutine, and that a gen.coroutine
# (the test method itself) can yield an async function.
namespace = exec_test(globals(), locals(), """
async def f():
await gen.Task(self.io_loop.add_callback)
return 42
""")
result = yield namespace['f']()
self.assertEqual(result, 42)
self.finished = True
@skipBefore35
@gen_test
def test_async_await_mixed_multi_native_future(self):
namespace = exec_test(globals(), locals(), """
async def f1():
await gen.Task(self.io_loop.add_callback)
return 42
""")
@gen.coroutine
def f2():
yield gen.Task(self.io_loop.add_callback)
raise gen.Return(43)
results = yield [namespace['f1'](), f2()]
self.assertEqual(results, [42, 43])
self.finished = True
@skipBefore35
@gen_test
def test_async_await_mixed_multi_native_yieldpoint(self):
namespace = exec_test(globals(), locals(), """
async def f1():
await gen.Task(self.io_loop.add_callback)
return 42
""")
@gen.coroutine
def f2():
yield gen.Task(self.io_loop.add_callback)
raise gen.Return(43)
f2(callback=(yield gen.Callback('cb')))
results = yield [namespace['f1'](), gen.Wait('cb')]
self.assertEqual(results, [42, 43])
self.finished = True
@skipBefore35
@gen_test
def test_async_with_timeout(self):
namespace = exec_test(globals(), locals(), """
async def f1():
return 42
""")
result = yield gen.with_timeout(datetime.timedelta(hours=1),
namespace['f1']())
self.assertEqual(result, 42)
self.finished = True
@gen_test
def test_sync_return_no_value(self):
@gen.coroutine
def f():
return
result = yield f()
self.assertEqual(result, None)
self.finished = True
@gen_test
def test_async_return_no_value(self):
# Without a return value we don't need python 3.3.
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
return
result = yield f()
self.assertEqual(result, None)
self.finished = True
@gen_test
def test_sync_raise(self):
@gen.coroutine
def f():
1 / 0
# The exception is raised when the future is yielded
# (or equivalently when its result method is called),
# not when the function itself is called).
future = f()
with self.assertRaises(ZeroDivisionError):
yield future
self.finished = True
@gen_test
def test_async_raise(self):
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
1 / 0
future = f()
with self.assertRaises(ZeroDivisionError):
yield future
self.finished = True
@gen_test
def test_pass_callback(self):
@gen.coroutine
def f():
raise gen.Return(42)
result = yield gen.Task(f)
self.assertEqual(result, 42)
self.finished = True
@gen_test
def test_replace_yieldpoint_exception(self):
# Test exception handling: a coroutine can catch one exception
# raised by a yield point and raise a different one.
@gen.coroutine
def f1():
1 / 0
@gen.coroutine
def f2():
try:
yield f1()
except ZeroDivisionError:
raise KeyError()
future = f2()
with self.assertRaises(KeyError):
yield future
self.finished = True
@gen_test
def test_swallow_yieldpoint_exception(self):
# Test exception handling: a coroutine can catch an exception
# raised by a yield point and not raise a different one.
@gen.coroutine
def f1():
1 / 0
@gen.coroutine
def f2():
try:
yield f1()
except ZeroDivisionError:
raise gen.Return(42)
result = yield f2()
self.assertEqual(result, 42)
self.finished = True
@gen_test
def test_replace_context_exception(self):
# Test exception handling: exceptions thrown into the stack context
# can be caught and replaced.
# Note that this test and the following are for behavior that is
# not really supported any more: coroutines no longer create a
# stack context automatically; but one is created after the first
# YieldPoint (i.e. not a Future).
@gen.coroutine
def f2():
(yield gen.Callback(1))()
yield gen.Wait(1)
self.io_loop.add_callback(lambda: 1 / 0)
try:
yield gen.Task(self.io_loop.add_timeout,
self.io_loop.time() + 10)
except ZeroDivisionError:
raise KeyError()
future = f2()
with self.assertRaises(KeyError):
yield future
self.finished = True
@gen_test
def test_swallow_context_exception(self):
# Test exception handling: exceptions thrown into the stack context
# can be caught and ignored.
@gen.coroutine
def f2():
(yield gen.Callback(1))()
yield gen.Wait(1)
self.io_loop.add_callback(lambda: 1 / 0)
try:
yield gen.Task(self.io_loop.add_timeout,
self.io_loop.time() + 10)
except ZeroDivisionError:
raise gen.Return(42)
result = yield f2()
self.assertEqual(result, 42)
self.finished = True
@gen_test
def test_moment(self):
calls = []
@gen.coroutine
def f(name, yieldable):
for i in range(5):
calls.append(name)
yield yieldable
# First, confirm the behavior without moment: each coroutine
# monopolizes the event loop until it finishes.
immediate = Future()
immediate.set_result(None)
yield [f('a', immediate), f('b', immediate)]
self.assertEqual(''.join(calls), 'aaaaabbbbb')
# With moment, they take turns.
calls = []
yield [f('a', gen.moment), f('b', gen.moment)]
self.assertEqual(''.join(calls), 'ababababab')
self.finished = True
calls = []
yield [f('a', gen.moment), f('b', immediate)]
self.assertEqual(''.join(calls), 'abbbbbaaaa')
@gen_test
def test_sleep(self):
yield gen.sleep(0.01)
self.finished = True
@skipBefore33
@gen_test
def test_py3_leak_exception_context(self):
class LeakedException(Exception):
pass
@gen.coroutine
def inner(iteration):
raise LeakedException(iteration)
try:
yield inner(1)
except LeakedException as e:
self.assertEqual(str(e), "1")
self.assertIsNone(e.__context__)
try:
yield inner(2)
except LeakedException as e:
self.assertEqual(str(e), "2")
self.assertIsNone(e.__context__)
self.finished = True
class GenSequenceHandler(RequestHandler):
@asynchronous
@gen.engine
def get(self):
self.io_loop = self.request.connection.stream.io_loop
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.write("1")
self.io_loop.add_callback((yield gen.Callback("k2")))
yield gen.Wait("k2")
self.write("2")
# reuse an old key
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.finish("3")
class GenCoroutineSequenceHandler(RequestHandler):
@gen.coroutine
def get(self):
self.io_loop = self.request.connection.stream.io_loop
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.write("1")
self.io_loop.add_callback((yield gen.Callback("k2")))
yield gen.Wait("k2")
self.write("2")
# reuse an old key
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.finish("3")
class GenCoroutineUnfinishedSequenceHandler(RequestHandler):
@asynchronous
@gen.coroutine
def get(self):
self.io_loop = self.request.connection.stream.io_loop
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.write("1")
self.io_loop.add_callback((yield gen.Callback("k2")))
yield gen.Wait("k2")
self.write("2")
# reuse an old key
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
# just write, don't finish
self.write("3")
class GenTaskHandler(RequestHandler):
@asynchronous
@gen.engine
def get(self):
io_loop = self.request.connection.stream.io_loop
client = AsyncHTTPClient(io_loop=io_loop)
response = yield gen.Task(client.fetch, self.get_argument('url'))
response.rethrow()
self.finish(b"got response: " + response.body)
class GenExceptionHandler(RequestHandler):
@asynchronous
@gen.engine
def get(self):
# This test depends on the order of the two decorators.
io_loop = self.request.connection.stream.io_loop
yield gen.Task(io_loop.add_callback)
raise Exception("oops")
class GenCoroutineExceptionHandler(RequestHandler):
@gen.coroutine
def get(self):
# This test depends on the order of the two decorators.
io_loop = self.request.connection.stream.io_loop
yield gen.Task(io_loop.add_callback)
raise Exception("oops")
class GenYieldExceptionHandler(RequestHandler):
@asynchronous
@gen.engine
def get(self):
io_loop = self.request.connection.stream.io_loop
# Test the interaction of the two stack_contexts.
def fail_task(callback):
io_loop.add_callback(lambda: 1 / 0)
try:
yield gen.Task(fail_task)
raise Exception("did not get expected exception")
except ZeroDivisionError:
self.finish('ok')
# "Undecorated" here refers to the absence of @asynchronous.
class UndecoratedCoroutinesHandler(RequestHandler):
@gen.coroutine
def prepare(self):
self.chunks = []
yield gen.Task(IOLoop.current().add_callback)
self.chunks.append('1')
@gen.coroutine
def get(self):
self.chunks.append('2')
yield gen.Task(IOLoop.current().add_callback)
self.chunks.append('3')
yield gen.Task(IOLoop.current().add_callback)
self.write(''.join(self.chunks))
class AsyncPrepareErrorHandler(RequestHandler):
@gen.coroutine
def prepare(self):
yield gen.Task(IOLoop.current().add_callback)
raise HTTPError(403)
def get(self):
self.finish('ok')
class NativeCoroutineHandler(RequestHandler):
if sys.version_info > (3, 5):
exec(textwrap.dedent("""
async def get(self):
await gen.Task(IOLoop.current().add_callback)
self.write("ok")
"""))
class GenWebTest(AsyncHTTPTestCase):
def get_app(self):
return Application([
('/sequence', GenSequenceHandler),
('/coroutine_sequence', GenCoroutineSequenceHandler),
('/coroutine_unfinished_sequence',
GenCoroutineUnfinishedSequenceHandler),
('/task', GenTaskHandler),
('/exception', GenExceptionHandler),
('/coroutine_exception', GenCoroutineExceptionHandler),
('/yield_exception', GenYieldExceptionHandler),
('/undecorated_coroutine', UndecoratedCoroutinesHandler),
('/async_prepare_error', AsyncPrepareErrorHandler),
('/native_coroutine', NativeCoroutineHandler),
])
def test_sequence_handler(self):
response = self.fetch('/sequence')
self.assertEqual(response.body, b"123")
def test_coroutine_sequence_handler(self):
response = self.fetch('/coroutine_sequence')
self.assertEqual(response.body, b"123")
def test_coroutine_unfinished_sequence_handler(self):
response = self.fetch('/coroutine_unfinished_sequence')
self.assertEqual(response.body, b"123")
def test_task_handler(self):
response = self.fetch('/task?url=%s' % url_escape(self.get_url('/sequence')))
self.assertEqual(response.body, b"got response: 123")
def test_exception_handler(self):
# Make sure we get an error and not a timeout
with ExpectLog(app_log, "Uncaught exception GET /exception"):
response = self.fetch('/exception')
self.assertEqual(500, response.code)
def test_coroutine_exception_handler(self):
# Make sure we get an error and not a timeout
with ExpectLog(app_log, "Uncaught exception GET /coroutine_exception"):
response = self.fetch('/coroutine_exception')
self.assertEqual(500, response.code)
def test_yield_exception_handler(self):
response = self.fetch('/yield_exception')
self.assertEqual(response.body, b'ok')
def test_undecorated_coroutines(self):
response = self.fetch('/undecorated_coroutine')
self.assertEqual(response.body, b'123')
def test_async_prepare_error_handler(self):
response = self.fetch('/async_prepare_error')
self.assertEqual(response.code, 403)
@skipBefore35
def test_native_coroutine_handler(self):
response = self.fetch('/native_coroutine')
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b'ok')
class WithTimeoutTest(AsyncTestCase):
@gen_test
def test_timeout(self):
with self.assertRaises(gen.TimeoutError):
yield gen.with_timeout(datetime.timedelta(seconds=0.1),
Future())
@gen_test
def test_completes_before_timeout(self):
future = Future()
self.io_loop.add_timeout(datetime.timedelta(seconds=0.1),
lambda: future.set_result('asdf'))
result = yield gen.with_timeout(datetime.timedelta(seconds=3600),
future, io_loop=self.io_loop)
self.assertEqual(result, 'asdf')
@gen_test
def test_fails_before_timeout(self):
future = Future()
self.io_loop.add_timeout(
datetime.timedelta(seconds=0.1),
lambda: future.set_exception(ZeroDivisionError()))
with self.assertRaises(ZeroDivisionError):
yield gen.with_timeout(datetime.timedelta(seconds=3600),
future, io_loop=self.io_loop)
@gen_test
def test_already_resolved(self):
future = Future()
future.set_result('asdf')
result = yield gen.with_timeout(datetime.timedelta(seconds=3600),
future, io_loop=self.io_loop)
self.assertEqual(result, 'asdf')
@unittest.skipIf(futures is None, 'futures module not present')
@gen_test
def test_timeout_concurrent_future(self):
with futures.ThreadPoolExecutor(1) as executor:
with self.assertRaises(gen.TimeoutError):
yield gen.with_timeout(self.io_loop.time(),
executor.submit(time.sleep, 0.1))
@unittest.skipIf(futures is None, 'futures module not present')
@gen_test
def test_completed_concurrent_future(self):
with futures.ThreadPoolExecutor(1) as executor:
yield gen.with_timeout(datetime.timedelta(seconds=3600),
executor.submit(lambda: None))
class WaitIteratorTest(AsyncTestCase):
@gen_test
def test_empty_iterator(self):
g = gen.WaitIterator()
self.assertTrue(g.done(), 'empty generator iterated')
with self.assertRaises(ValueError):
g = gen.WaitIterator(False, bar=False)
self.assertEqual(g.current_index, None, "bad nil current index")
self.assertEqual(g.current_future, None, "bad nil current future")
@gen_test
def test_already_done(self):
f1 = Future()
f2 = Future()
f3 = Future()
f1.set_result(24)
f2.set_result(42)
f3.set_result(84)
g = gen.WaitIterator(f1, f2, f3)
i = 0
while not g.done():
r = yield g.next()
# Order is not guaranteed, but the current implementation
# preserves ordering of already-done Futures.
if i == 0:
self.assertEqual(g.current_index, 0)
self.assertIs(g.current_future, f1)
self.assertEqual(r, 24)
elif i == 1:
self.assertEqual(g.current_index, 1)
self.assertIs(g.current_future, f2)
self.assertEqual(r, 42)
elif i == 2:
self.assertEqual(g.current_index, 2)
self.assertIs(g.current_future, f3)
self.assertEqual(r, 84)
i += 1
self.assertEqual(g.current_index, None, "bad nil current index")
self.assertEqual(g.current_future, None, "bad nil current future")
dg = gen.WaitIterator(f1=f1, f2=f2)
while not dg.done():
dr = yield dg.next()
if dg.current_index == "f1":
self.assertTrue(dg.current_future == f1 and dr == 24,
"WaitIterator dict status incorrect")
elif dg.current_index == "f2":
self.assertTrue(dg.current_future == f2 and dr == 42,
"WaitIterator dict status incorrect")
else:
self.fail("got bad WaitIterator index {}".format(
dg.current_index))
i += 1
self.assertEqual(dg.current_index, None, "bad nil current index")
self.assertEqual(dg.current_future, None, "bad nil current future")
def finish_coroutines(self, iteration, futures):
if iteration == 3:
futures[2].set_result(24)
elif iteration == 5:
futures[0].set_exception(ZeroDivisionError())
elif iteration == 8:
futures[1].set_result(42)
futures[3].set_result(84)
if iteration < 8:
self.io_loop.add_callback(self.finish_coroutines, iteration + 1, futures)
@gen_test
def test_iterator(self):
futures = [Future(), Future(), Future(), Future()]
self.finish_coroutines(0, futures)
g = gen.WaitIterator(*futures)
i = 0
while not g.done():
try:
r = yield g.next()
except ZeroDivisionError:
self.assertIs(g.current_future, futures[0],
'exception future invalid')
else:
if i == 0:
self.assertEqual(r, 24, 'iterator value incorrect')
self.assertEqual(g.current_index, 2, 'wrong index')
elif i == 2:
self.assertEqual(r, 42, 'iterator value incorrect')
self.assertEqual(g.current_index, 1, 'wrong index')
elif i == 3:
self.assertEqual(r, 84, 'iterator value incorrect')
self.assertEqual(g.current_index, 3, 'wrong index')
i += 1
@skipBefore35
@gen_test
def test_iterator_async_await(self):
# Recreate the previous test with py35 syntax. It's a little clunky
# because of the way the previous test handles an exception on
# a single iteration.
futures = [Future(), Future(), Future(), Future()]
self.finish_coroutines(0, futures)
self.finished = False
namespace = exec_test(globals(), locals(), """
async def f():
i = 0
g = gen.WaitIterator(*futures)
try:
async for r in g:
if i == 0:
self.assertEqual(r, 24, 'iterator value incorrect')
self.assertEqual(g.current_index, 2, 'wrong index')
else:
raise Exception("expected exception on iteration 1")
i += 1
except ZeroDivisionError:
i += 1
async for r in g:
if i == 2:
self.assertEqual(r, 42, 'iterator value incorrect')
self.assertEqual(g.current_index, 1, 'wrong index')
elif i == 3:
self.assertEqual(r, 84, 'iterator value incorrect')
self.assertEqual(g.current_index, 3, 'wrong index')
else:
raise Exception("didn't expect iteration %d" % i)
i += 1
self.finished = True
""")
yield namespace['f']()
self.assertTrue(self.finished)
@gen_test
def test_no_ref(self):
# In this usage, there is no direct hard reference to the
# WaitIterator itself, only the Future it returns. Since
# WaitIterator uses weak references internally to improve GC
# performance, this used to cause problems.
yield gen.with_timeout(datetime.timedelta(seconds=0.1),
gen.WaitIterator(gen.sleep(0)).next())
class RunnerGCTest(AsyncTestCase):
"""Github issue 1769: Runner objects can get GCed unexpectedly"""
@gen_test
def test_gc(self):
"""Runners shouldn't GC if future is alive"""
# Create the weakref
weakref_scope = [None]
def callback():
gc.collect(2)
weakref_scope[0]().set_result(123)
@gen.coroutine
def tester():
fut = Future()
weakref_scope[0] = weakref.ref(fut)
self.io_loop.add_callback(callback)
yield fut
yield gen.with_timeout(
datetime.timedelta(seconds=0.2),
tester()
)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
jgoppert/pymola | versioneer.py | 386 | 68611 |
# Version: 0.18
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other langauges) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
| bsd-3-clause |
twizoo/semantic | semantic/test/testDates.py | 1 | 6209 | import datetime
import unittest
from freezegun import freeze_time
from semantic.dates import DateService
@freeze_time('2014-01-01 00:00')
class TestDate(unittest.TestCase):
def compareDate(self, input, target):
service = DateService()
result = service.extractDate(input)
self.assertEqual(target, result)
def compareTime(self, input, target):
service = DateService()
result = service.extractTime(input)
self.assertEqual(target, result)
def compareDates(self, input, targets):
service = DateService()
results = service.extractDates(input)
for (result, target) in zip(results, targets):
self.assertEqual(target, result)
def compareTimes(self, input, targets):
service = DateService()
results = service.extractDates(input)
for (result, target) in zip(results, targets):
self.assertEqual(target.time(), result.time())
#
# Date Tests
#
def testExactWords(self):
input = "Remind me on January Twenty Sixth"
target = "2014-01-26"
self.compareDate(input, target)
def testExactWordsDash(self):
input = "Remind me on January Twenty-Sixth"
target = "2014-01-26"
self.compareDate(input, target)
def testExactNums(self):
input = "Remind me on January 26"
target = "2014-01-26"
self.compareDate(input, target)
def testOrdinalNums(self):
input = "Remind me on January 2nd"
target = "2014-01-02"
self.compareDate(input, target)
def testWeekFromExact(self):
input = "Do x y and z a week from January 26"
target = "2014-02-02"
self.compareDate(input, target)
def testMultipleWeeksFrom(self):
input = "Do x y and z three weeks from January 26"
target = "2014-02-16"
self.compareDate(input, target)
def testMultiWordDaysFrom(self):
input = "Do x y and z twenty six days from January 26"
target = "2014-02-21"
self.compareDate(input, target)
def testMultiWordAndDaysFrom(self):
input = "Do x y and z one hundred and twelve days from January 26"
target = "2014-05-18"
self.compareDate(input, target)
def testNextFriday(self):
input = "Next Friday, go to the grocery store"
target = "2014-01-10"
self.compareDate(input, target)
def testAmbiguousNext(self):
input = "The next event will take place on Friday"
target = "2014-01-03"
self.compareDate(input, target)
def testTomorrow(self):
input = "Tomorrow morning, go to the grocery store"
target = "2014-01-02"
self.compareDate(input, target)
def testToday(self):
input = "Send me an email some time today if you can"
target = "2014-01-01"
self.compareDate(input, target)
def testThis(self):
input = "This morning, I went to the gym"
target = "2014-01-01"
self.compareDate(input, target)
def testIllegalDate(self):
input = "I have a meeting on February 29 at 12:15pm"
self.assertRaises(ValueError, lambda: DateService().extractDate(input))
def testNoDate(self):
input = "It's a very nice day."
target = None
self.compareDate(input, target)
def testNoDateButTime(self):
input = "I have a meeting at 2pm"
target = None
self.compareDate(input, target)
#
# Time Tests
#
def testExactTime(self):
input = "Let's go to the park at 12:51pm"
target = "12:51"
self.compareTime(input, target)
def testInExactTime(self):
input = "I want to leave in two hours and twenty minutes"
target = datetime.datetime.today() + \
datetime.timedelta(hours=2, minutes=20)
self.compareTime(input, target)
def testTimeNoMinutes(self):
input = "Let's go to the park at 8pm"
target = "20:00"
self.compareTime(input, target)
def testTimeNoMinutesLater(self):
input = "Let's go to the park at 10pm"
target = "22:00"
self.compareTime(input, target)
def testTimeDotMinutes(self):
input = "Let's go to the park at 6.20pm"
target = "18:20"
self.compareTime(input, target)
def testTimeDotMinutesZeroMinutes(self):
input = "Let's go to the park at 6.00am"
target = "06:00"
self.compareTime(input, target)
def testAmbiguousTime(self):
input = "Let's go to the park at 8"
target = "20:00"
self.compareTime(input, target)
def testAmbiguousDotTime(self):
input = "Let's go to the park at 8.45"
target = "20:45"
self.compareTime(input, target)
def testMilitaryMorningTime(self):
input = "Let's go to the park at 08:00"
target = "08:00"
self.compareTime(input, target)
def testMilitaryAfternoonTime(self):
input = "Let's go to the park at 20:00"
target = "20:00"
self.compareTime(input, target)
def testThisEve(self):
input = "Let's go to the park this eve."
target = "20:00"
self.compareTime(input, target)
def testTonightTime(self):
input = "Let's go to the park tonight."
target = "20:00"
self.compareTime(input, target)
def testBeforeTenIsEveningTime(self):
input = "Let's go to the park at 5."
target = "17:00"
self.compareTime(input, target)
def testInThe(self):
input = "I went to the park in the afternoon"
target = "15:00"
self.compareTime(input, target)
def testBothDateAndTime(self):
input = "Let's go to the park at 5 tomorrow."
target_time = "17:00"
target_date = "2014-01-02"
self.compareTime(input, target_time)
self.compareDate(input, target_date)
def testNoTime(self):
input = "It's a very nice day."
target = None
self.compareTime(input, target)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestDate)
unittest.TextTestRunner(verbosity=2).run(suite)
| mit |
neuroidss/nupic.vision | nupicvision/regions/PictureSensorExplorers/__init__.py | 35 | 1069 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# This package contains modules that implement various
# PictureSensor explorer plugins.
| gpl-3.0 |
protomouse/Flexget | flexget/plugins/input/plex.py | 4 | 15236 | """Plugin for plex media server (www.plexapp.com)."""
from xml.dom.minidom import parseString
import re
import logging
import os
from os.path import basename
from socket import gethostbyname
from string import find
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils import requests
log = logging.getLogger('plex')
class InputPlex(object):
"""
Uses a plex media server (www.plexapp.com) tv section as an input.
'section' Required parameter, numerical (/library/sections/<num>) or section name.
'selection' Can be set to different keys:
- all : Default
- unwatched :
- recentlyAdded :
- recentlyViewed :
- recentlyViewedShows : Series only.
'all' and 'recentlyViewedShows' will only produce a list of show names while the other three will produce
filename and download url.
'username' Myplex (http://my.plexapp.com) username, used to connect to shared PMS'.
'password' Myplex (http://my.plexapp.com) password, used to connect to shared PMS'.
'server' Host/IP of PMS to connect to.
'lowercase_title' Convert filename (title) to lower case.
'strip_year' Remove year from title, ex: Show Name (2012) 01x01 => Show Name 01x01.
Movies will have year added to their filename unless this is set.
'original_filename' Use filename stored in PMS instead of transformed name. lowercase_title and strip_year
will be ignored.
'unwatched_only' Request only unwatched media from PMS.
'fetch' What to download, can be set to the following values:
- file The file itself, default.
- art Series or movie art as configured in PMS
- cover Series cover for series, movie cover for movies.
- thumb Episode thumbnail, series only.
- season_cover Season cover, series only. If used in movies, movie cover will be set.
Default paramaters:
server : localhost
port : 32400
selection : all
lowercase_title : no
strip_year : yes
original_filename: no
unwatched_only : no
fetch : file
Example:
plex:
server: 192.168.1.23
section: 3
selection: recentlyAdded
fetch: series_art
"""
def validator(self):
from flexget import validator
config = validator.factory('dict')
config.accept('text', key='server')
config.accept('text', key='selection')
config.accept('integer', key='port')
config.accept('text', key='section', required=True)
config.accept('integer', key='section', required=True)
config.accept('text', key='username')
config.accept('text', key='password')
config.accept('boolean', key='lowercase_title')
config.accept('boolean', key='strip_year')
config.accept('boolean', key='original_filename')
config.accept('boolean', key='unwatched_only')
config.accept('text', key='fetch')
return config
def prepare_config(self, config):
config.setdefault('server', '127.0.0.1')
config.setdefault('port', 32400)
config.setdefault('selection', 'all')
config.setdefault('username', '')
config.setdefault('password', '')
config.setdefault('lowercase_title', False)
config.setdefault('strip_year', True)
config.setdefault('original_filename', False)
config.setdefault('unwatched_only', False)
config.setdefault('fetch', 'file')
config['plexserver'] = config['server']
config = self.plex_format_server(config)
return config
def plex_get_globalaccesstoken(self, config):
header = {'X-Plex-Client-Identifier': 'flexget'}
try:
r = requests.post('https://my.plexapp.com/users/sign_in.xml',
auth=(config['username'], config['password']), headers=header)
except requests.RequestException as error:
raise plugin.PluginError('Could not log in to myplex! Error: %s' % error)
if 'Ivalid email' in r.text:
raise plugin.PluginError('Myplex: invalid username and/or password!')
dom = parseString(r.text)
globalaccesstoken = dom.getElementsByTagName('authentication-token')[0].firstChild.nodeValue
if not globalaccesstoken:
raise plugin.PluginError('Myplex: could not find a server!')
else:
log.debug('Myplex: Got global accesstoken: %s' % globalaccesstoken)
return globalaccesstoken
def plex_get_accesstoken(self, config, globalaccesstoken = ""):
if not globalaccesstoken:
globalaccesstoken = self.plex_get_globalaccesstoken(config)
try:
r = requests.get("https://my.plexapp.com/pms/servers?X-Plex-Token=%s" % globalaccesstoken)
except requests.RequestException as e:
raise plugin.PluginError("Could not get servers from my.plexapp.com using "
"authentication-token: %s. (%s)" % (globalaccesstoken, e))
dom = parseString(r.text)
for node in dom.getElementsByTagName('Server'):
if node.getAttribute('address') == config['server']:
accesstoken = node.getAttribute('accessToken')
log.debug("Got plextoken: %s" % accesstoken)
if not accesstoken:
raise plugin.PluginError('Could not retrieve accesstoken for %s.' % config['server'])
else:
return accesstoken
def plex_format_server(self, config):
if gethostbyname(config['server']) != config['server']:
config['server'] = gethostbyname(config['server'])
return config
def plex_section_is_int(self, section):
return isinstance(section, int)
def on_task_input(self, task, config):
config = self.prepare_config(config)
accesstoken = ""
urlconfig = {}
urlappend = "?"
entries = []
data = {}
if config['unwatched_only'] and config['section'] != 'recentlyViewedShows' and config['section'] != 'all':
urlconfig['unwatched'] = '1'
if config['username'] and config['password'] and config['server'] != '127.0.0.1':
accesstoken = self.plex_get_accesstoken(config)
log.debug("Got accesstoken: %s" % accesstoken)
urlconfig['X-Plex-Token'] = accesstoken
for key in urlconfig:
urlappend += '%s=%s&' % (key, urlconfig[key])
if not self.plex_section_is_int(config['section']):
try:
path = "/library/sections/"
r = requests.get("http://%s:%d%s%s" %(config['plexserver'], config['port'], path, urlappend))
except requests.RequestException as e:
raise plugin.PluginError('Error retrieving source: %s' % e)
dom = parseString(r.text.encode("utf-8"))
for node in dom.getElementsByTagName('Directory'):
if node.getAttribute('title') == config['section']:
config['section'] = int(node.getAttribute('key'))
if not self.plex_section_is_int(config['section']):
raise plugin.PluginError('Could not find section \'%s\'' % config['section'])
log.debug("Fetching http://%s:%d/library/sections/%s/%s%s" %
(config['server'], config['port'], config['section'], config['selection'], urlappend))
try:
path = "/library/sections/%s/%s" % (config['section'], config['selection'])
r = requests.get("http://%s:%d%s%s" %(config['plexserver'], config['port'], path, urlappend))
except requests.RequestException as e:
raise plugin.PluginError('There is no section with number %d. (%s)' % (config['section'], e) )
dom = parseString(r.text.encode("utf-8"))
plexsectionname = dom.getElementsByTagName('MediaContainer')[0].getAttribute('title1')
viewgroup = dom.getElementsByTagName('MediaContainer')[0].getAttribute('viewGroup')
log.debug("Plex section \"%s\" is a \"%s\" section" % (plexsectionname, viewgroup))
if (viewgroup != "movie" and viewgroup != "show" and viewgroup != "episode"):
raise plugin.PluginError("Section is neither a movie nor tv show section!")
domroot = "Directory"
titletag = "title"
if viewgroup == "episode":
domroot = "Video"
titletag = "grandparentTitle"
thumbtag = "thumb"
arttag = "art"
seasoncovertag = "parentThumb"
covertag = "grandparentThumb"
elif viewgroup == "movie":
domroot = "Video"
titletag = "title"
arttag = "art"
seasoncovertag = "thumb"
covertag = "thumb"
if config['fetch'] == "thumb":
raise plugin.PluginError("Movie sections does not have any thumbnails to download!")
for node in dom.getElementsByTagName(domroot):
e = Entry()
e['plex_server'] = config['plexserver']
e['plex_port'] = config['port']
e['plex_section'] = config['section']
e['plex_section_name'] = plexsectionname
e['plex_episode_thumb'] = ''
title = node.getAttribute(titletag)
if config['strip_year']:
title = re.sub(r'^(.*)\(\d{4}\)(.*)', r'\1\2', title)
title = re.sub(r'[\(\)]', r'', title)
title = re.sub(r'&', r'And', title)
title = re.sub(r'[^A-Za-z0-9- ]', r'', title)
if config['lowercase_title']:
title = title.lower()
if viewgroup == "show":
e['title'] = title
e['url'] = 'NULL'
entries.append(e)
# show ends here.
continue
e['plex_art'] = "http://%s:%d%s%s" % (config['server'], config['port'],
node.getAttribute(arttag), urlappend)
e['plex_cover'] = "http://%s:%d%s%s" % (config['server'], config['port'],
node.getAttribute(covertag), urlappend)
e['plex_season_cover'] = "http://%s:%d%s%s" % (config['server'], config['port'],
node.getAttribute(seasoncovertag), urlappend)
if viewgroup == "episode":
e['plex_thumb'] = "http://%s:%d%s%s" % (config['server'], config['port'], node.getAttribute('thumb'), urlappend)
season = int(node.getAttribute('parentIndex'))
if node.getAttribute('parentIndex') == node.getAttribute('year'):
season = node.getAttribute('originallyAvailableAt')
filenamemap = "%s_%s%s_%s_%s_%s.%s"
episode = ""
elif node.getAttribute('index'):
episode = int(node.getAttribute('index'))
filenamemap = "%s_%02dx%02d_%s_%s_%s.%s"
else:
log.debug("Could not get episode number for '%s' (Hint, ratingKey: %s)"
% (title, node.getAttribute('ratingKey')))
break
elif viewgroup == "movie":
filenamemap = "%s_%s_%s_%s.%s"
e['plex_duration'] = node.getAttribute('duration')
year = node.getAttribute('year')
e['plex_summary'] = node.getAttribute('summary')
count = node.getAttribute('viewCount')
offset = node.getAttribute('viewOffset')
if count:
e['plex_status'] = "seen"
elif offset:
e['plex_status'] = "inprogress"
else:
e['plex_status'] = "unwatched"
for media in node.getElementsByTagName('Media'):
vcodec = media.getAttribute('videoCodec')
acodec = media.getAttribute('audioCodec')
if config['fetch'] == "file" or not config['fetch']:
container = media.getAttribute('container')
else:
container = "jpg"
resolution = media.getAttribute('videoResolution') + "p"
for part in media.getElementsByTagName('Part'):
if config['fetch'] == "file" or not config['fetch']:
key = part.getAttribute('key')
elif config['fetch'] == "art":
key = node.getAttribute(arttag)
elif config['fetch'] == "cover":
key = node.getAttribute(arttag)
elif config['fetch'] == "season_cover":
key = node.getAttribute(seasoncovertag)
elif config['fetch'] == "thumb":
key = node.getAttribute(thumbtag)
# key = part.getAttribute('key')
duration = part.getAttribute('duration')
if viewgroup == "show":
e['plex_title'] = episodetitle
elif viewgroup == "movie":
e['plex_title'] = title
if config['original_filename']:
filename, fileext = os.path.splitext(basename(part.getAttribute('file')))
if config['fetch'] != 'file':
filename += ".jpg"
else:
filename = "%s.%s" % (filename, fileext)
else:
if viewgroup == "episode":
filename = filenamemap % (title.replace(" ", "."), season, episode, resolution, vcodec,
acodec, container)
title = filename
elif viewgroup == "movie":
filename = filenamemap % (title.replace(" ", "."), resolution, vcodec,
acodec, container)
e['plex_url'] = "http://%s:%d%s%s" % (config['server'], config['port'], key, urlappend)
e['plex_path'] = key
e['url'] = "http://%s:%d%s%s" % (config['server'], config['port'], key, urlappend)
e['plex_duration'] = duration
e['filename'] = filename
e['title'] = title
if key == "":
log.debug("Could not find anything in PMS to download. Next!")
else:
entries.append(e)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(InputPlex, 'plex', api_ver=2)
| mit |
commial/miasm | test/arch/x86/unit/mn_rotsh.py | 5 | 3417 | import sys
from asm_test import Asm_Test_64
class Test_ROR_0(Asm_Test_64):
TXT = '''
main:
MOV RAX, 0x8877665544332211
ROR RAX, 0
RET
'''
def check(self):
assert self.myjit.cpu.RAX == 0x8877665544332211
class Test_ROR_8(Asm_Test_64):
TXT = '''
main:
MOV RAX, 0x8877665544332211
ROR RAX, 8
RET
'''
def check(self):
assert self.myjit.cpu.RAX == 0x1188776655443322
class Test_ROR_X8(Asm_Test_64):
TXT = '''
main:
MOV RAX, 0x8877665544332211
MOV CL, 16
ROR RAX, CL
RET
'''
def check(self):
assert self.myjit.cpu.RAX == 0x2211887766554433
class Test_SHR_0(Asm_Test_64):
TXT = '''
main:
MOV RAX, 0x8877665544332211
SHR RAX, 0
RET
'''
def check(self):
assert self.myjit.cpu.RAX == 0x8877665544332211
class Test_SHR_8(Asm_Test_64):
TXT = '''
main:
MOV RAX, 0x8877665544332211
SHR RAX, 8
RET
'''
def check(self):
assert self.myjit.cpu.RAX == 0x88776655443322
class Test_SHR_X8(Asm_Test_64):
TXT = '''
main:
MOV RAX, 0x8877665544332211
MOV CL, 16
SHR RAX, CL
RET
'''
def check(self):
assert self.myjit.cpu.RAX == 0x887766554433
class Test_ROR_0_64_32(Asm_Test_64):
TXT = '''
main:
MOV RAX, 0x8877665544332211
ROR EAX, 0
RET
'''
def check(self):
assert self.myjit.cpu.RAX == 0x44332211
class Test_ROR_8_64_32(Asm_Test_64):
TXT = '''
main:
MOV RAX, 0x8877665544332211
ROR EAX, 8
RET
'''
def check(self):
assert self.myjit.cpu.RAX == 0x11443322
class Test_ROR_X8_64_32(Asm_Test_64):
TXT = '''
main:
MOV RAX, 0x8877665544332211
MOV CL, 16
ROR EAX, CL
RET
'''
def check(self):
assert self.myjit.cpu.RAX == 0x22114433
class Test_SHR_0_64_32(Asm_Test_64):
TXT = '''
main:
MOV RAX, 0x8877665544332211
SHR EAX, 0
RET
'''
def check(self):
assert self.myjit.cpu.RAX == 0x44332211
class Test_SHR_8_64_32(Asm_Test_64):
TXT = '''
main:
MOV RAX, 0x8877665544332211
SHR EAX, 8
RET
'''
def check(self):
assert self.myjit.cpu.RAX == 0x443322
class Test_SHR_X8_64_32(Asm_Test_64):
TXT = '''
main:
MOV RAX, 0x8877665544332211
MOV CL, 16
SHR EAX, CL
RET
'''
def check(self):
assert self.myjit.cpu.RAX == 0x4433
class Test_SHLD(Asm_Test_64):
TXT = '''
main:
MOV RAX, 0x1234FDB512345678
MOV RDX, RAX
MOV RAX, 0x21AD96F921AD3D34
MOV RSI, RAX
MOV RAX, 0x0000000000000021
MOV RCX, RAX
SHLD EDX, ESI, CL
RET
'''
def check(self):
assert self.myjit.cpu.RDX == 0x000000002468ACF0
if __name__ == "__main__":
[
test(*sys.argv[1:])() for test in [
Test_ROR_0,
Test_ROR_8,
Test_ROR_X8,
Test_SHR_0,
Test_SHR_8,
Test_SHR_X8,
Test_ROR_0_64_32,
Test_ROR_8_64_32,
Test_ROR_X8_64_32,
Test_SHR_0_64_32,
Test_SHR_8_64_32,
Test_SHR_X8_64_32,
Test_SHLD,
]
]
| gpl-2.0 |
indashnet/InDashNet.Open.UN2000 | android/build/tools/compare_fileslist.py | 27 | 2527 | #!/usr/bin/env python
#
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cgi, os, string, sys
def IsDifferent(row):
val = None
for v in row:
if v:
if not val:
val = v
else:
if val != v:
return True
return False
def main(argv):
inputs = argv[1:]
data = {}
index = 0
for input in inputs:
f = file(input, "r")
lines = f.readlines()
f.close()
lines = map(string.split, lines)
lines = map(lambda (x,y): (y,int(x)), lines)
for fn,sz in lines:
if not data.has_key(fn):
data[fn] = {}
data[fn][index] = sz
index = index + 1
rows = []
for fn,sizes in data.iteritems():
row = [fn]
for i in range(0,index):
if sizes.has_key(i):
row.append(sizes[i])
else:
row.append(None)
rows.append(row)
rows = sorted(rows, key=lambda x: x[0])
print """<html>
<head>
<style type="text/css">
.fn, .sz, .z, .d {
padding-left: 10px;
padding-right: 10px;
}
.sz, .z, .d {
text-align: right;
}
.fn {
background-color: #ffffdd;
}
.sz {
background-color: #ffffcc;
}
.z {
background-color: #ffcccc;
}
.d {
background-color: #99ccff;
}
</style>
</head>
<body>
"""
print "<table>"
print "<tr>"
for input in inputs:
combo = input.split(os.path.sep)[1]
print " <td class='fn'>%s</td>" % cgi.escape(combo)
print "</tr>"
for row in rows:
print "<tr>"
for sz in row[1:]:
if not sz:
print " <td class='z'> </td>"
elif IsDifferent(row[1:]):
print " <td class='d'>%d</td>" % sz
else:
print " <td class='sz'>%d</td>" % sz
print " <td class='fn'>%s</td>" % cgi.escape(row[0])
print "</tr>"
print "</table>"
print "</body></html>"
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
mozts2005/OuterSpace | server/lib/ige/ospace/Rules/Techs.py | 2 | 18469 | #
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import sys, copy
from ige import log
from ige.ospace import Const
import sha, os.path
import cPickle as pickle
import types
from ige.ospace import TechHandlers
from xml.sax.handler import ContentHandler
from ige.IDataHolder import IDataHolder
import xml.sax
def noop(*args, **kwargs):
return 1
attrs = {
# types of technologies
'isDiscovery' : 0,
'isStructure' : 0,
'isProject': 0,
'isShipEquip' : 0,
'isShipHull' : 0,
'isMine' : 0,
'isStarting' : 0,
"subtype": "techSubtype",
"level": 0,
"maxImprovement": 5, #Rules.techMaxImprovement,
"isMilitary": 0,
# dialog info
'shortname': '', #for TechInfoDlg tech linkages
# construction costs & conditions
'buildProd' : 0,
'buildTurns' : 1,
'buildSRes': ['resource'],
# operation costs
'operBio' : 0,
'operEn' : 0,
"operWorkers": 0,
# production
'prodBio' : 0, # bio
'prodEn' : 0, # energy
'prodProd' : 0, # production
'prodSci' : 0, # science
'prodEnv': 0, # enviromental effect
'prodPop': 0, # produce population
'prodBioMod': [0.0, 0.0, 0.0, 0.0], # tuple of (plBio, plMin, plEn, default)
'prodEnMod': [0.0, 0.0, 0.0, 0.0], # dtto
'prodProdMod': [0.0, 0.0, 0.0, 0.0], # dtto
'prodSciMod' : [0.0, 0.0, 0.0, 1.0], # ditto; default is 1.0 for legacy support
# env
'solarMod': 0, # - : cloak solar radiation / + : create thermal radiation
# storage
'storBio' : 0,
'storProd' : 0,
'storEn' : 0,
'storPop' : 0,
# morale affecting
'revoltThr': 0,
'moraleTrgt': 0,
'govPwr': 0,
# military
'scannerPwr': 0,
"structWeapons": [0],
"planetShield": 0, #planetary shield; when structure built, shield = 0; shield will regenerate at 2% per turn until equal to this value. Structures do not add shield strength; strongest shield = planet shield
"systemAtt": 0,
"systemDef": 0,
"refuelMax": 0,
"refuelInc": 0,
"repairShip": 0.0,
"upgradeShip": 0,
"trainShipInc": 0.0, # how many exp/turn
"trainShipMax": 0, # exp. cap (in base exps), not affected by techEff
"fleetSpeedBoost": 0.0, # speed boost for stargates
# misc
"unpackPop": 0,
'envDmg' : 0,
'maxHP' : 0,
"fullInfo": 0, # if True, show full tech info even player not own tech
# ship equipment
'equipType' : '', # identifier of subtype seq_mod's equipment type; see maxEquipType in Rules/__init__
"addMP" : 0, #for extra MP to be added to ship equipment
'combatClass' : 0,
'combatAtt': 0, #not cumulative for equipment; cumulative for hull, drives, cockpits, etc
'combatDef': 0, #not cumulative for equipment; cumulative for hull, drives, cockpits, etc
"missileDef": 0, #not cumulative for equipment; cumulative for hull, drives, cockpits, etc
"combatAttPerc": 1.0, #multiplier of ATT; min of 100%; not cumulative
"combatDefPerc": 1.0, #multiplier of DEF; min of 100%; not cumulative
"missileDefPerc": 1.0, #multiplier of missile DEF; min of 100%; not cumulative
'unpackStruct': '',
'deployHandlerID' : '', #technology ID of tech to find deployHandlerFunction & deployHandlerValidator (this can be the deployable device OR a project)
'deployHandlerFunction' : noop, #function name of TechHandler
'deployHandlerValidator' : noop, #function name of TechHandler Validator
'signature': 0, #**** NOT cumulative (change effective 0.5.63)
'signatureCloak': 1.0, # max of 1.0 is effective; not cumulative
'signatureDecloak': 1.0, # min of 1.0 is effective; not cumulative
"minSignature": 0,
"slots": 0,
"weight": 0,
"maxWeight": 0,
"engPwr": 0,
"shieldPerc": 0.0, # how many percent of maxHP have shields
"minHull": 0,
"maxHull": 10, #just make this higher than the largest hull so we know it doesn't break anything
"maxInstallations": 0,
"shieldRechargeFix": 0, # fixed amount of HP/turn to recharge
"shieldRechargePerc": 0.0, # how many percent of shieldHP/turn is recharged
"hardShield": 0.0, #shield penetrating weapons will penetrate at 100%; use as 1-hardShield for penentration level (hardShield percent = %damage absorbed by shield)
"autoRepairFix": 0, # fixed amount of HP/turn to repair
"autoRepairPerc": 0.0, # how many percent of maxHP/turn is repaired
"damageAbsorb": 0, #amount of damage absorbed by the hull (not shield!); max sum is 5 damage (set in Rules)
# weapons
'weaponDmgMin': 0,
'weaponDmgMax': 0,
'weaponAtt': 0,
'weaponClass': 0,
"weaponROF": 0.0,
"weaponIgnoreShield": 0,
"weaponIsMissile": 0,
"weaponGoodForFlak": 1,
#mines
'mineclass':0, #tech id of the mine; usually level 99 tech - structure in the system with the highest tech id will always deploy; others will be ignored (per player)
'minenum':0, #number of mines this control structure supports; if another structure built more mines, mines will not self destruct
'minerate':0, #number of turns between mine deployments; note that system will deploy mines on: turn%minerate==0
# research
'researchRequires': ['technology'],
'researchEnables': ['technology'],
'researchDisables': ['technology'],
'researchReqSRes': ['resource'],
"researchMod": "expr",
'researchTurns': 1,
"researchRaces": "BCH",
# misc
"data": "none",
"recheckWhenTechLost": 0,
"deprecated": 0, # this tech is no longer active
# before build handler
'validateConstrHandler' : noop,
# after build handler
'finishConstrHandler' : noop,
# after research handler
'finishResearchHandler' : noop,
# names
'name' : u'Unspecified',
# textual description
'textPreRsrch': u'Not specified',
'textDescr': u'Not specified',
'textFlavor': u'Not specified',
}
# class representing technologies
class Technology:
def __init__(self, id, symbol, reg):
self.id = id
self.symbol = symbol
if id in reg:
raise KeyError("%s is already registered" % id)
reg[id] = self
def set(self, key, value):
if attrs.has_key(key):
attrType = type(attrs[key])
if attrType == types.IntType:
value = int(value)
elif attrType == types.FloatType:
value = float(value)
elif attrType == types.UnicodeType:
pass
elif attrType == types.StringType:
value = str(value)
elif attrType == types.FunctionType:
value = getattr(TechHandlers, value)
elif attrType == types.ListType:
itemType = type(attrs[key][0])
if itemType == types.IntType:
convertFunc = int
elif itemType == types.StringType:
convertFunc = str
elif itemType == types.FloatType:
convertFunc = float
else:
raise 'Unsupported attribute type %s' % repr(attrType)
result = []
for item in value.split(','):
if item:
result.append(convertFunc(item))
value = result
else:
raise 'Unsupported attribute type %s' % repr(attrType)
setattr(self, key, value)
else:
raise AttributeError('Cannot create %s - unsupported attribute.' % key)
def __getattr__(self, attr):
if attrs.has_key(attr):
# optimalization
setattr(self, attr, attrs[attr])
return attrs[attr]
else:
raise AttributeError('No attribute %s' % attr)
def isDefault(self, attr):
if hasattr(self, attr):
return getattr(self, attr) == attrs[attr]
else:
return 1
def __repr__(self):
result = '(Technology '
for key, value in self.__dict__.items():
result += '%s : %s, ' % (repr(key), repr(value))
result += ')'
return result
# parse TechTree.xml and create all tech objects
class TechTreeContentHandler(ContentHandler):
def setGlobals(self, techs, tech):
self.techs = techs
self.Tech = tech
def startDocument(self):
#@log.message('Parsing tech tree...')
self.state = 1
self.text = ''
def endDocument(self):
#@log.message('Tech tree created')
if self.state != 1:
raise 'Wrong TechTree specification'
def startElement(self, name, attrs):
if self.state == 1 and name == 'techtree':
self.state = 2
elif self.state == 2 and name == 'technology':
log.debug('Tech %s [%s]' % (attrs['name'], attrs['id']))
self.state = 3
self.tech = Technology(int(attrs['id']), attrs['symbol'], self.techs)
setattr(self.Tech, attrs['symbol'], int(attrs['id']))
self.tech.set('name', attrs['name'])
elif self.state == 3 and name == 'structure':
self.tech.set('isStructure', 1)
for key in attrs.keys():
self.tech.set(key, attrs[key])
elif self.state == 3 and name == 'discovery':
self.tech.set('isDiscovery', 1)
for key in attrs.keys():
self.tech.set(key, attrs[key])
elif self.state == 3 and name == 'notdiscovery':
self.tech.set('isDiscovery', 0)
for key in attrs.keys():
self.tech.set(key, attrs[key])
elif self.state == 3 and name == 'starting':
self.tech.set('isStarting', 1)
for key in attrs.keys():
self.tech.set(key, attrs[key])
elif self.state == 3 and name == 'notstarting':
self.tech.set('isStarting', 0)
for key in attrs.keys():
self.tech.set(key, attrs[key])
elif self.state == 3 and name == 'shipequip':
self.tech.set('isShipEquip', 1)
for key in attrs.keys():
self.tech.set(key, attrs[key])
elif self.state == 3 and name == 'project':
self.tech.set('isProject', 1)
for key in attrs.keys():
self.tech.set(key, attrs[key])
elif self.state == 3 and name == 'shiphull':
self.tech.set('isShipHull', 1)
for key in attrs.keys():
self.tech.set(key, attrs[key])
elif self.state == 3 and name == 'mine':
self.tech.set('isMine', 1)
for key in attrs.keys():
self.tech.set(key, attrs[key])
elif self.state == 3 and name == 'data':
for key in attrs.keys():
self.tech.set(key, attrs[key])
elif self.state == 3 and name == 'preresearch':
self.state = 4
self.text = ''
elif self.state == 3 and name == 'description':
self.state = 4
self.text = ''
elif self.state == 3 and name == 'flavor':
self.state = 4
self.text = ''
else:
raise 'Unsupported tag %s' % str(name)
def endElement(self, name):
if self.state == 2 and name == 'techtree':
self.state = 1
elif self.state == 3 and name == 'technology':
self.state = 2
#log.debug(repr(self.tech))
elif self.state == 4 and name == 'preresearch':
self.tech.textPreRsrch = self.text
self.state = 3
elif self.state == 4 and name == 'description':
self.tech.textDescr = self.text
self.state = 3
elif self.state == 4 and name == 'flavor':
self.tech.textFlavor = self.text
self.state = 3
def characters(self, text):
self.text += text
## check, if anything has been changed
def chsumDir(chsum, dirname, names):
names.sort()
for filename in names:
if os.path.splitext(filename)[1] in ('.xml',):
log.debug('Checking file', filename)
# read file
fh = open(os.path.join(dirname, filename), 'rb')
chsum.update(fh.read())
fh.close()
def initTechnologies(path):
"""Init technologies from XML files in the path"""
# holder for all technologies
techs = {}
# holder for tech IDs
Tech = IDataHolder()
# compute checksum
file = sys.modules[__name__].__file__
forceLoad = 0
if os.path.exists(file):
# regular module
chsum = sha.new()
os.path.walk(path, chsumDir, chsum)
else:
# packed, cannot access xml specifications
path = os.path.join('res', 'techspec')
forceLoad = 1
# read old checksum
try:
fh = open(os.path.join(path, 'checksum'), 'rb')
oldChsum = fh.read()
fh.close()
except IOError:
oldChsum = ''
# compare
if forceLoad or chsum.hexdigest() == oldChsum:
# load old definitions
log.message('Loading stored specifications from', path)
techs = pickle.load(open(os.path.join(path, 'techs.spf'), 'rb'))
Tech = pickle.load(open(os.path.join(path, 'Tech.spf'), 'rb'))
log.message("There is %d technologies" % len(techs))
# clean up 'type' in lists
for key in attrs.keys():
if type(attrs[key]) == types.ListType and len(attrs[key]) == 1:
log.debug("Cleaning up", key)
attrs[key] = []
return techs, Tech
# create new ones
## load technologies definitions
def processDir(arg, dirname, names):
if dirname.find(".svn") >= 0:
log.message("Skipping directory", dirname)
return
log.message('Loading XML files from', dirname)
names.sort()
for filename in names:
if os.path.splitext(filename)[1] == '.xml':
log.message('Parsing XML file', filename)
contentHandler = TechTreeContentHandler()
contentHandler.setGlobals(techs, Tech)
xml.sax.parse(os.path.join(dirname, filename), contentHandler)
# collect xml files
os.path.walk(path, processDir, None)
# clean up 'type' in lists
for key in attrs.keys():
if type(attrs[key]) == types.ListType and len(attrs[key]) == 1:
log.debug("Cleaning up", key)
attrs[key] = []
# link tech tree using researchRequires fields
# construct researchEnables fields
log.message('Converting symbolic fields...')
for techID in techs.keys():
tech = techs[techID]
# convert symbolic names to numbers
techIDs = []
for techSymName in tech.researchRequires:
#@log.debug('Converting REQ', techSymName)
symName, improvement = techSymName.split('-')
techIDs.append((getattr(Tech, symName), int(improvement)))
tech.researchRequires = techIDs
techIDs = {1: [], 2:[], 3:[], 4:[], 5:[], 6:[]}
for techSymName in tech.researchEnables:
#@log.debug('Converting EN', techSymName)
improvement, symName = techSymName.split('-')
techIDs[int(improvement)].append(getattr(Tech, symName))
tech.researchEnables = techIDs
techIDs = []
for techSymName in tech.researchDisables:
techIDs.append(getattr(Tech, techSymName))
tech.researchDisables = techIDs
techIDs = []
if tech.unpackStruct:
tech.unpackStruct = getattr(Tech, tech.unpackStruct)
else:
tech.unpackStruct = 0
# strat. resources
stratRes = []
for sr in tech.researchReqSRes:
stratRes.append(getattr(Const, sr))
tech.researchReqSRes = stratRes
stratRes = []
for sr in tech.buildSRes:
stratRes.append(getattr(Const, sr))
tech.buildSRes = stratRes
# evaluate researchMod
if tech.researchMod == "expr":
tech.researchMod = 1.0
else:
tech.researchMod = eval(tech.researchMod)
#~ # convert weapons
#~ techIDs = []
#~ for weaponName in tech.weapons:
#~ techIDs.append(getattr(Tech, weaponName))
#~ tech.weapons = techIDs
# link
log.message('Linking tech tree...')
for techID in techs.keys():
tech = techs[techID]
#@log.debug(techID, 'Req', tech.researchRequires)
#@log.debug(techID, 'En', tech.researchEnables)
for tmpTechID, improvement in tech.researchRequires:
if techID not in techs[tmpTechID].researchEnables[improvement]:
#@log.debug('Adding', tmpTechID, improvement, 'ENABLES', techID)
techs[tmpTechID].researchEnables[improvement].append(techID)
for improvement in tech.researchEnables.keys():
for tmpTechID in tech.researchEnables[improvement]:
if (techID, improvement) not in techs[tmpTechID].researchRequires:
#@log.debug('Adding', tmpTechID, 'REQUIRES', techID, improvement)
techs[tmpTechID].researchRequires.append((techID, improvement))
changed = 1
while changed:
changed = 0
log.debug("Tech disable iteration")
for techID in techs:
tech = techs[techID]
for tech2ID in tech.researchDisables:
tech2 = techs[tech2ID]
if techID not in tech2.researchDisables and techID != tech2ID:
tech2.researchDisables.append(techID)
changed = 1
log.debug("Adding", tech2ID, "DISABLES", techID, ", NOW", tech2.researchDisables)
for tech3ID in tech2.researchDisables:
tech3 = techs[tech3ID]
if tech3ID not in tech.researchDisables and tech3ID != techID:
tech.researchDisables.append(tech3ID)
changed = 1
log.debug("Adding", techID, "DISABLES", tech3ID, "NOW", tech.researchDisables)
# just for debug
#for techID in techs.keys():
# tech = techs[techID]
# log.debug('Link:', techID, tech.isStarting, tech.researchRequires, tech.researchEnables)
# save new specification
log.message('Saving specification...')
pickle.dump(techs, open(os.path.join(path, 'techs.spf'), 'wb'), 1)
pickle.dump(Tech, open(os.path.join(path, 'Tech.spf'), 'wb'), 1)
fh = open(os.path.join(path, 'checksum'), 'wb')
fh.write(chsum.hexdigest())
fh.close()
log.message("There is %d technologies" % len(techs))
return techs, Tech
#~ # save DOT file
#~ dotName = os.path.join(os.path.dirname(__file__), "techtree.dot")
#~ fh = open(dotName, "w+")
#~ print >> fh, """
#~ digraph {
#~ rankdir=LR;
#~ ranksep=0.25;
#~ nodesep=0.25;
#~ overlap=scale;
#~ spline=false;
#~ node[shape=record,fontsize=10,height=0.25,fontname=Tahoma];
#~ edge[fontsize=8,fontname=Tahoma]
#~ {
#~ TL1 -> TL2 -> TL3 -> TL4 -> TL5;
#~ }
#~ """
#~ # process nodes
#~ for level in range(1,6):
#~ print >>fh, "\tsubgraph xcluster_%d {" % level
#~ print >>fh, "\t\tcolor = black;"
#~ print >>fh, "\t\tTL%d;" % level,
#~ for techID in techs:
#~ tech = techs[techID]
#~ if tech.level != level:
#~ continue
#~ print >>fh, '"{%s|%d}";' % (tech.name, tech.level),
#~ print >>fh
#~ print >>fh, "\t}"
#~ for techID in techs:
#~ tech = techs[techID]
#~ if tech.level != level:
#~ continue
#~ for tech2ID, impr in tech.researchRequires:
#~ tech2 = techs[tech2ID]
#~ print >> fh, '\t"{%s|%d}" -> "{%s|%d}" [label = "%d"];' % (
#~ tech2.name,
#~ tech2.level,
#~ tech.name,
#~ tech.level,
#~ impr,
#~ )
#~ print >> fh, """
#~ }
#~ """
#~ fh.close()
| gpl-2.0 |
srikantbmandal/ansible | lib/ansible/modules/cloud/google/gce_img.py | 70 | 6747 | #!/usr/bin/python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""An Ansible module to utilize GCE image resources."""
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce_img
version_added: "1.9"
short_description: utilize GCE image resources
description:
- This module can create and delete GCE private images from gzipped
compressed tarball containing raw disk data or from existing detached
disks in any zone. U(https://cloud.google.com/compute/docs/images)
options:
name:
description:
- the name of the image to create or delete
required: true
default: null
description:
description:
- an optional description
required: false
default: null
family:
description:
- an optional family name
required: false
default: null
version_added: "2.2"
source:
description:
- the source disk or the Google Cloud Storage URI to create the image from
required: false
default: null
state:
description:
- desired state of the image
required: false
default: "present"
choices: ["present", "absent"]
zone:
description:
- the zone of the disk specified by source
required: false
default: "us-central1-a"
timeout:
description:
- timeout for the operation
required: false
default: 180
version_added: "2.0"
service_account_email:
description:
- service account email
required: false
default: null
pem_file:
description:
- path to the pem file associated with the service account email
required: false
default: null
project_id:
description:
- your GCE project ID
required: false
default: null
requirements:
- "python >= 2.6"
- "apache-libcloud"
author: "Tom Melendez (supertom)"
'''
EXAMPLES = '''
# Create an image named test-image from the disk 'test-disk' in zone us-central1-a.
- gce_img:
name: test-image
source: test-disk
zone: us-central1-a
state: present
# Create an image named test-image from a tarball in Google Cloud Storage.
- gce_img:
name: test-image
source: https://storage.googleapis.com/bucket/path/to/image.tgz
# Alternatively use the gs scheme
- gce_img:
name: test-image
source: gs://bucket/path/to/image.tgz
# Delete an image named test-image.
- gce_img:
name: test-image
state: absent
'''
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError
from libcloud.common.google import ResourceExistsError
from libcloud.common.google import ResourceNotFoundError
_ = Provider.GCE
has_libcloud = True
except ImportError:
has_libcloud = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gce import gce_connect
GCS_URI = 'https://storage.googleapis.com/'
def create_image(gce, name, module):
"""Create an image with the specified name."""
source = module.params.get('source')
zone = module.params.get('zone')
desc = module.params.get('description')
timeout = module.params.get('timeout')
family = module.params.get('family')
if not source:
module.fail_json(msg='Must supply a source', changed=False)
if source.startswith(GCS_URI):
# source is a Google Cloud Storage URI
volume = source
elif source.startswith('gs://'):
# libcloud only accepts https URI.
volume = source.replace('gs://', GCS_URI)
else:
try:
volume = gce.ex_get_volume(source, zone)
except ResourceNotFoundError:
module.fail_json(msg='Disk %s not found in zone %s' % (source, zone),
changed=False)
except GoogleBaseError as e:
module.fail_json(msg=str(e), changed=False)
gce_extra_args = {}
if family is not None:
gce_extra_args['family'] = family
old_timeout = gce.connection.timeout
try:
gce.connection.timeout = timeout
gce.ex_create_image(name, volume, desc, use_existing=False, **gce_extra_args)
return True
except ResourceExistsError:
return False
except GoogleBaseError as e:
module.fail_json(msg=str(e), changed=False)
finally:
gce.connection.timeout = old_timeout
def delete_image(gce, name, module):
"""Delete a specific image resource by name."""
try:
gce.ex_delete_image(name)
return True
except ResourceNotFoundError:
return False
except GoogleBaseError as e:
module.fail_json(msg=str(e), changed=False)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
family=dict(),
description=dict(),
source=dict(),
state=dict(default='present', choices=['present', 'absent']),
zone=dict(default='us-central1-a'),
service_account_email=dict(),
pem_file=dict(type='path'),
project_id=dict(),
timeout=dict(type='int', default=180)
)
)
if not has_libcloud:
module.fail_json(msg='libcloud with GCE support is required.')
gce = gce_connect(module)
name = module.params.get('name')
state = module.params.get('state')
family = module.params.get('family')
changed = False
if family is not None and hasattr(libcloud, '__version__') and libcloud.__version__ <= '0.20.1':
module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'family' option",
changed=False)
# user wants to create an image.
if state == 'present':
changed = create_image(gce, name, module)
# user wants to delete the image.
if state == 'absent':
changed = delete_image(gce, name, module)
module.exit_json(changed=changed, name=name)
if __name__ == '__main__':
main()
| gpl-3.0 |
delhivery/django | django/contrib/redirects/middleware.py | 383 | 1810 | from __future__ import unicode_literals
from django import http
from django.apps import apps
from django.conf import settings
from django.contrib.redirects.models import Redirect
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ImproperlyConfigured
class RedirectFallbackMiddleware(object):
# Defined as class-level attributes to be subclassing-friendly.
response_gone_class = http.HttpResponseGone
response_redirect_class = http.HttpResponsePermanentRedirect
def __init__(self):
if not apps.is_installed('django.contrib.sites'):
raise ImproperlyConfigured(
"You cannot use RedirectFallbackMiddleware when "
"django.contrib.sites is not installed."
)
def process_response(self, request, response):
# No need to check for a redirect for non-404 responses.
if response.status_code != 404:
return response
full_path = request.get_full_path()
current_site = get_current_site(request)
r = None
try:
r = Redirect.objects.get(site=current_site, old_path=full_path)
except Redirect.DoesNotExist:
pass
if r is None and settings.APPEND_SLASH and not request.path.endswith('/'):
try:
r = Redirect.objects.get(
site=current_site,
old_path=request.get_full_path(force_append_slash=True),
)
except Redirect.DoesNotExist:
pass
if r is not None:
if r.new_path == '':
return self.response_gone_class()
return self.response_redirect_class(r.new_path)
# No redirect was found. Return the response.
return response
| bsd-3-clause |
karanvivekbhargava/robot-butler-enpm808x | vendor/googletest/googletest/test/gtest_color_test.py | 3259 | 4911 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
vkscool/nupic | examples/opf/experiments/multistep/simple_1/description.py | 17 | 1591 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/simple_1.csv'),
'modelParams': { 'clParams': { },
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tpParams': { }}}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| gpl-3.0 |
vipins/ccccms | env/Lib/site-packages/django/utils/daemonize.py | 452 | 1907 | import os
import sys
if os.name == 'posix':
def become_daemon(our_home_dir='.', out_log='/dev/null',
err_log='/dev/null', umask=022):
"Robustly turn into a UNIX daemon, running in our_home_dir."
# First fork
try:
if os.fork() > 0:
sys.exit(0) # kill off parent
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
os.setsid()
os.chdir(our_home_dir)
os.umask(umask)
# Second fork
try:
if os.fork() > 0:
os._exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
os._exit(1)
si = open('/dev/null', 'r')
so = open(out_log, 'a+', 0)
se = open(err_log, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# Set custom file descriptors so that they get proper buffering.
sys.stdout, sys.stderr = so, se
else:
def become_daemon(our_home_dir='.', out_log=None, err_log=None, umask=022):
"""
If we're not running under a POSIX system, just simulate the daemon
mode by doing redirections and directory changing.
"""
os.chdir(our_home_dir)
os.umask(umask)
sys.stdin.close()
sys.stdout.close()
sys.stderr.close()
if err_log:
sys.stderr = open(err_log, 'a', 0)
else:
sys.stderr = NullDevice()
if out_log:
sys.stdout = open(out_log, 'a', 0)
else:
sys.stdout = NullDevice()
class NullDevice:
"A writeable object that writes to nowhere -- like /dev/null."
def write(self, s):
pass
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.