repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
marcosmodesto/django-testapp | django/contrib/gis/geoip/prototypes.py | 200 | 3730 | from ctypes import c_char_p, c_float, c_int, string_at, Structure, POINTER
from django.contrib.gis.geoip.libgeoip import lgeoip, free
#### GeoIP C Structure definitions ####
class GeoIPRecord(Structure):
_fields_ = [('country_code', c_char_p),
('country_code3', c_char_p),
('country_name', c_char_p),
('region', c_char_p),
('city', c_char_p),
('postal_code', c_char_p),
('latitude', c_float),
('longitude', c_float),
# TODO: In 1.4.6 this changed from `int dma_code;` to
# `union {int metro_code; int dma_code;};`. Change
# to a `ctypes.Union` in to accomodate in future when
# pre-1.4.6 versions are no longer distributed.
('dma_code', c_int),
('area_code', c_int),
('charset', c_int),
('continent_code', c_char_p),
]
geoip_char_fields = [name for name, ctype in GeoIPRecord._fields_ if ctype is c_char_p]
geoip_encodings = { 0: 'iso-8859-1',
1: 'utf8',
}
class GeoIPTag(Structure): pass
RECTYPE = POINTER(GeoIPRecord)
DBTYPE = POINTER(GeoIPTag)
#### ctypes function prototypes ####
# GeoIP_lib_version appeared in version 1.4.7.
if hasattr(lgeoip, 'GeoIP_lib_version'):
GeoIP_lib_version = lgeoip.GeoIP_lib_version
GeoIP_lib_version.argtypes = None
GeoIP_lib_version.restype = c_char_p
else:
GeoIP_lib_version = None
# For freeing memory allocated within a record
GeoIPRecord_delete = lgeoip.GeoIPRecord_delete
GeoIPRecord_delete.argtypes = [RECTYPE]
GeoIPRecord_delete.restype = None
# For retrieving records by name or address.
def check_record(result, func, cargs):
if bool(result):
# Checking the pointer to the C structure, if valid pull out elements
# into a dicionary.
rec = result.contents
record = dict((fld, getattr(rec, fld)) for fld, ctype in rec._fields_)
# Now converting the strings to unicode using the proper encoding.
encoding = geoip_encodings[record['charset']]
for char_field in geoip_char_fields:
if record[char_field]:
record[char_field] = record[char_field].decode(encoding)
# Free the memory allocated for the struct & return.
GeoIPRecord_delete(result)
return record
else:
return None
def record_output(func):
func.argtypes = [DBTYPE, c_char_p]
func.restype = RECTYPE
func.errcheck = check_record
return func
GeoIP_record_by_addr = record_output(lgeoip.GeoIP_record_by_addr)
GeoIP_record_by_name = record_output(lgeoip.GeoIP_record_by_name)
# For opening & closing GeoIP database files.
GeoIP_open = lgeoip.GeoIP_open
GeoIP_open.restype = DBTYPE
GeoIP_delete = lgeoip.GeoIP_delete
GeoIP_delete.argtypes = [DBTYPE]
GeoIP_delete.restype = None
# This is so the string pointer can be freed within Python.
class geoip_char_p(c_char_p):
pass
def check_string(result, func, cargs):
if result:
s = string_at(result)
free(result)
else:
s = ''
return s
GeoIP_database_info = lgeoip.GeoIP_database_info
GeoIP_database_info.restype = geoip_char_p
GeoIP_database_info.errcheck = check_string
# String output routines.
def string_output(func):
func.restype = c_char_p
return func
GeoIP_country_code_by_addr = string_output(lgeoip.GeoIP_country_code_by_addr)
GeoIP_country_code_by_name = string_output(lgeoip.GeoIP_country_code_by_name)
GeoIP_country_name_by_addr = string_output(lgeoip.GeoIP_country_name_by_addr)
GeoIP_country_name_by_name = string_output(lgeoip.GeoIP_country_name_by_name)
| bsd-3-clause |
ryfeus/lambda-packs | Lxml_requests/source/pip/_vendor/requests/packages/chardet/sjisprober.py | 1777 | 3764 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| mit |
edx/ecommerce | ecommerce/extensions/voucher/migrations/0001_initial.py | 1 | 3161 | # -*- coding: utf-8 -*-
from decimal import Decimal
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0001_initial'),
('offer', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Voucher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(verbose_name='Name', max_length=128, help_text='This will be shown in the checkout and basket once the voucher is entered')),
('code', models.CharField(max_length=128, verbose_name='Code', unique=True, db_index=True, help_text='Case insensitive / No spaces allowed')),
('usage', models.CharField(default='Multi-use', max_length=128, verbose_name='Usage', choices=[('Single use', 'Can be used once by one customer'), ('Multi-use', 'Can be used multiple times by multiple customers'), ('Once per customer', 'Can only be used once per customer')])),
('start_datetime', models.DateTimeField(verbose_name='Start datetime')),
('end_datetime', models.DateTimeField(verbose_name='End datetime')),
('num_basket_additions', models.PositiveIntegerField(default=0, verbose_name='Times added to basket')),
('num_orders', models.PositiveIntegerField(default=0, verbose_name='Times on orders')),
('total_discount', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Total discount')),
('date_created', models.DateField(auto_now_add=True)),
('offers', models.ManyToManyField(related_name='vouchers', verbose_name='Offers', to='offer.ConditionalOffer')),
],
options={
'verbose_name_plural': 'Vouchers',
'get_latest_by': 'date_created',
'verbose_name': 'Voucher',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='VoucherApplication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateField(auto_now_add=True, verbose_name='Date Created')),
('order', models.ForeignKey(verbose_name='Order', to='order.Order', on_delete=models.CASCADE)),
('user', models.ForeignKey(null=True, verbose_name='User', to=settings.AUTH_USER_MODEL, blank=True, on_delete=models.CASCADE)),
('voucher', models.ForeignKey(verbose_name='Voucher', related_name='applications', to='voucher.Voucher', on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'Voucher Applications',
'verbose_name': 'Voucher Application',
'abstract': False,
},
bases=(models.Model,),
),
]
| agpl-3.0 |
krikru/tensorflow-opencl | tensorflow/python/debug/debug_data.py | 11 | 41977 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions to handle debug-dump data of TensorFlow Debugger."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import gfile
METADATA_FILE_PREFIX = "_tfdbg_"
GRAPH_FILE_TAG = "graph_"
FETCHES_INFO_FILE_TAG = "fetches_info_"
FEED_KEYS_INFO_FILE_TAG = "feed_keys_info_"
def load_tensor_from_event_file(event_file_path):
"""Load a tensor from an event file.
Assumes that the event file contains a `Event` protobuf and the `Event`
protobuf contains a `Tensor` value.
Args:
event_file_path: (`str`) path to the event file.
Returns:
The tensor value loaded from the event file, as a `numpy.ndarray`. For
uninitialized tensors, returns None.
"""
event = event_pb2.Event()
with gfile.Open(event_file_path, "rb") as f:
event.ParseFromString(f.read())
if (event.summary.value[0].tensor.tensor_content or
event.summary.value[0].tensor.string_val):
# Initialized tensor.
tensor_value = tensor_util.MakeNdarray(event.summary.value[0].tensor)
else:
# Uninitialized tensor.
tensor_value = None
return tensor_value
def _load_graph_def_from_event_file(event_file_path):
event = event_pb2.Event()
with gfile.Open(event_file_path, "rb") as f:
event.ParseFromString(f.read())
return graph_pb2.GraphDef.FromString(event.graph_def)
def _load_log_message_from_event_file(event_file_path):
event = event_pb2.Event()
with gfile.Open(event_file_path, "rb") as f:
event.ParseFromString(f.read())
return event.log_message.message
def parse_node_or_tensor_name(name):
"""Get the node name from a string that can be node or tensor name.
Args:
name: An input node name (e.g., "node_a") or tensor name (e.g.,
"node_a:0"), as a str.
Returns:
1) The node name, as a str. If the input name is a tensor name, i.e.,
consists of a colon, the final colon and the following output slot
will be stripped.
2) If the input name is a tensor name, the output slot, as an int. If
the input name is not a tensor name, None.
"""
if ":" in name and not name.endswith(":"):
node_name = name[:name.rfind(":")]
output_slot = int(name[name.rfind(":") + 1:])
return node_name, output_slot
else:
return name, None
def _is_graph_file(file_name):
return file_name.startswith(METADATA_FILE_PREFIX + GRAPH_FILE_TAG)
def _is_run_fetches_info_file(file_name):
return file_name == METADATA_FILE_PREFIX + FETCHES_INFO_FILE_TAG
def _is_run_feed_keys_info_file(file_name):
return file_name == METADATA_FILE_PREFIX + FEED_KEYS_INFO_FILE_TAG
def get_node_name(element_name):
return element_name.split(":")[0] if ":" in element_name else element_name
def get_output_slot(element_name):
"""Get the output slot number from the name of a graph element.
If element_name is a node name without output slot at the end, 0 will be
assumed.
Args:
element_name: (`str`) name of the graph element in question.
Returns:
(`int`) output slot number.
"""
return int(element_name.split(":")[-1]) if ":" in element_name else 0
def _get_tensor_name(node_name, output_slot):
"""Get tensor name given node name and output slot index.
Args:
node_name: Name of the node that outputs the tensor, as a string.
output_slot: Output slot index of the tensor, as an integer.
Returns:
Name of the tensor, as a string.
"""
return "%s:%d" % (node_name, output_slot)
def _get_tensor_watch_key(node_name, output_slot, debug_op):
"""Get the string representation of a debug watch on a tensor.
Args:
node_name: Name of the node by which the watched tensor is produced, as a
string.
output_slot: Output slot index of the tensor, as an integer.
debug_op: Name of the debug op that is used to watch the tensor, as a
string.
Returns:
A string representing the debug watch on the tensor (i.e., the "watch
key").
"""
return "%s:%s" % (_get_tensor_name(node_name, output_slot), debug_op)
def _is_copy_node(node_name):
"""Determine whether a node name is that of a debug Copy node.
Such nodes are inserted by TensorFlow core upon request in
RunOptions.debug_options.debug_tensor_watch_opts.
Args:
node_name: Name of the node.
Returns:
A bool indicating whether the input argument is the name of a debug Copy
node.
"""
return node_name.startswith("__copy_")
def _is_debug_node(node_name):
"""Determine whether a node name is that of a debug node.
Such nodes are inserted by TensorFlow core upon request in
RunOptions.debug_options.debug_tensor_watch_opts.
Args:
node_name: Name of the node.
Returns:
A bool indicating whether the input argument is the name of a debug node.
"""
return node_name.startswith("__dbg_")
def _parse_debug_node_name(node_name):
"""Parse the name of a debug node.
Args:
node_name: Name of the debug node.
Returns:
1. Name of the watched node, as a str.
2. Output slot index of the watched tensor, as an int.
3. Index of the debug node, as an int.
4. Name of the debug op, as a str, e.g, "DebugIdentity".
Raises:
ValueError: If the input node name is not a valid debug node name.
"""
prefix = "__dbg_"
name = node_name
if not name.startswith(prefix):
raise ValueError("Invalid prefix in debug node name: '%s'" % node_name)
name = name[len(prefix):]
if name.count("_") < 2:
raise ValueError("Invalid debug node name: '%s'" % node_name)
debug_op = name[name.rindex("_") + 1:]
name = name[:name.rindex("_")]
debug_op_index = int(name[name.rindex("_") + 1:])
name = name[:name.rindex("_")]
if name.count(":") != 1:
raise ValueError("Invalid tensor name in debug node name: '%s'" % node_name)
watched_node_name = name[:name.index(":")]
watched_output_slot = int(name[name.index(":") + 1:])
return watched_node_name, watched_output_slot, debug_op_index, debug_op
def has_inf_or_nan(datum, tensor):
"""A predicate for whether a tensor consists of any bad numerical values.
This predicate is common enough to merit definition in this module.
Bad numerical values include `nan`s and `inf`s.
The signature of this function follows the requirement of the method
`DebugDumpDir.find()`.
Args:
datum: (`DebugTensorDatum`) Datum metadata.
tensor: (`numpy.ndarray` or None) Value of the tensor. None represents
an uninitialized tensor.
Returns:
(`bool`) True if and only if tensor consists of any nan or inf values.
"""
_ = datum # Datum metadata is unused in this predicate.
if tensor is None:
# Uninitialized tensor doesn't have bad numerical values.
return False
elif (np.issubdtype(tensor.dtype, np.float) or
np.issubdtype(tensor.dtype, np.complex) or
np.issubdtype(tensor.dtype, np.integer)):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
else:
return False
class DebugTensorDatum(object):
"""A single tensor dumped by TensorFlow Debugger (tfdbg).
Contains metadata about the dumped tensor, including `timestamp`,
`node_name`, `output_slot`, `debug_op`, and path to the dump file
(`file_path`).
This type does not hold the generally space-expensive tensor value (numpy
array). Instead, it points to the file from which the tensor value can be
loaded (with the `get_tensor` method) if needed.
"""
def __init__(self, dump_root, debug_dump_rel_path):
"""`DebugTensorDatum` constructor.
Args:
dump_root: (`str`) Debug dump root directory.
debug_dump_rel_path: (`str`) Path to a debug dump file, relative to the
`dump_root`. For example, suppose the debug dump root
directory is `/tmp/tfdbg_1` and the dump file is at
`/tmp/tfdbg_1/ns_1/node_a_0_DebugIdentity_123456789`, then
the value of the debug_dump_rel_path should be
`ns_1/node_a_0_DebugIdenity_1234456789`.
Raises:
ValueError: If the base file name of the dump file does not conform to
the dump file naming pattern:
`node_name`_`output_slot`_`debug_op`_`timestamp`
"""
base = os.path.basename(debug_dump_rel_path)
if base.count("_") < 3:
raise ValueError(
"Dump file path does not conform to the naming pattern: %s" % base)
# TODO(cais): Add hostname and pid to support dumps from distributed
# sessions.
self._timestamp = int(base.split("_")[-1])
self._debug_op = base.split("_")[-2]
self._output_slot = int(base.split("_")[-3])
namespace = os.path.dirname(debug_dump_rel_path).replace("\\", "/")
node_base_name = "_".join(base.split("_")[:-3])
if not namespace or namespace == ".":
self._node_name = node_base_name
else:
self._node_name = namespace + "/" + node_base_name
self._file_path = os.path.join(dump_root, debug_dump_rel_path)
self._dump_size_bytes = (gfile.Stat(self._file_path).length if
gfile.Exists(self._file_path) else None)
self._run_fetches_info = None
self._run_feed_keys_info = None
def __str__(self):
return "{DebugTensorDatum: %s:%d @ %s @ %d}" % (self.node_name,
self.output_slot,
self.debug_op,
self.timestamp)
def __repr__(self):
return self.__str__()
def get_tensor(self):
"""Get tensor from the dump (`Event`) file.
Returns:
The tensor loaded from the dump (`Event`) file.
"""
return load_tensor_from_event_file(self.file_path)
# TODO(cais): Add time unit suffix to timestamp and t0 (us).
@property
def timestamp(self):
"""Timestamp of when this tensor value was dumped.
Returns:
(`int`) The timestamp in microseconds.
"""
return self._timestamp
@property
def debug_op(self):
"""Name of the debug op.
Returns:
(`str`) debug op name (e.g., `DebugIdentity`).
"""
return self._debug_op
@property
def node_name(self):
"""Name of the node from which the tensor value was dumped.
Returns:
(`str`) name of the node watched by the debug op.
"""
return self._node_name
@property
def output_slot(self):
"""Output slot index from which the tensor value was dumped.
Returns:
(`int`) output slot index watched by the debug op.
"""
return self._output_slot
@property
def tensor_name(self):
"""Name of the tensor watched by the debug op.
Returns:
(`str`) `Tensor` name, in the form of `node_name`:`output_slot`
"""
return _get_tensor_name(self.node_name, self.output_slot)
@property
def watch_key(self):
"""Watch key identities a debug watch on a tensor.
Returns:
(`str`) A watch key, in the form of `tensor_name`:`debug_op`.
"""
return _get_tensor_watch_key(self.node_name, self.output_slot,
self.debug_op)
@property
def file_path(self):
"""Path to the file which stores the value of the dumped tensor."""
return self._file_path
@property
def dump_size_bytes(self):
"""Size of the dump file.
Unit: byte.
Returns:
If the dump file exists, size of the dump file, in bytes.
If the dump file does not exist, None.
"""
return self._dump_size_bytes
class DebugDumpDir(object):
"""Data set from a debug-dump directory on filesystem.
An instance of `DebugDumpDir` contains all `DebugTensorDatum` instances
in a tfdbg dump root directory.
"""
def __init__(self, dump_root, partition_graphs=None, validate=True):
"""`DebugDumpDir` constructor.
Args:
dump_root: (`str`) path to the dump root directory.
partition_graphs: A repeated field of GraphDefs representing the
partition graphs executed by the TensorFlow runtime.
validate: (`bool`) whether the dump files are to be validated against the
partition graphs.
Raises:
IOError: If dump_root does not exist as a directory.
"""
if not gfile.IsDirectory(dump_root):
raise IOError("Dump root directory %s does not exist" % dump_root)
self._load_dumps(dump_root)
self._create_tensor_watch_maps()
self._load_partition_graphs(partition_graphs, validate)
self._python_graph = None
def _load_dumps(self, dump_root):
"""Load `DebugTensorDatum` instances from the dump root.
Populates a list of `DebugTensorDatum` instance and sorts the list by
ascending timestamp.
This sorting order reflects the order in which the TensorFlow executor
processed the nodes of the graph. It is (one of many possible) topological
sort of the nodes. This is useful for displaying tensors in the debugger
frontend as well as for the use case in which the user wants to find a
"culprit tensor", i.e., the first tensor in the graph that exhibits certain
problematic properties, i.e., all zero values, or bad numerical values such
as nan and inf.
In addition, creates a map from node name to debug watches. In this Map,
the key is the watched node name; the value is a dictionary.
Of this dictionary, the key is the watched_output_slot.
This method attempts to load the debug watches from the tensor dump files
first, before loading the full set of debug watches from the partition
graphs as done later. This is necessary because sometimes the partition
graphs may not be available, e.g., when the run errors out.
Args:
dump_root: (`str`) Dump root directory.
"""
self._dump_root = dump_root
self._dump_tensor_data = []
self._dump_graph_file_paths = []
self._debug_watches = collections.defaultdict(
lambda: collections.defaultdict(set))
for root, _, files in gfile.Walk(self._dump_root):
for f in files:
if f.startswith(METADATA_FILE_PREFIX):
if _is_graph_file(f):
self._dump_graph_file_paths.append(
os.path.join(self._dump_root, root, f))
if _is_run_fetches_info_file(f):
self._run_fetches_info = _load_log_message_from_event_file(
os.path.join(root, f))
if _is_run_feed_keys_info_file(f):
self._run_feed_keys_info = _load_log_message_from_event_file(
os.path.join(root, f))
continue
datum = self._dump_file_name_to_datum(root, f)
self._dump_tensor_data.append(datum)
self._debug_watches[datum.node_name][datum.output_slot].add(
datum.debug_op)
self._dump_tensor_data = sorted(
self._dump_tensor_data, key=lambda x: x.timestamp)
if self._dump_tensor_data:
self._t0 = self._dump_tensor_data[0].timestamp
else:
self._t0 = None
def _dump_file_name_to_datum(self, dir_name, file_name):
"""Obtain a DebugTensorDatum from the directory and file name.
Args:
dir_name: (`str`) Name of the directory in which the dump file resides.
file_name: (`str`) Base name of the dump file.
Returns:
(`DebugTensorDatum`) The `DebugTensorDatum` loaded from the dump file.
"""
# Calculate the relative path of the dump file with respect to the root.
debug_dump_rel_path = os.path.join(
os.path.relpath(dir_name, self._dump_root), file_name)
return DebugTensorDatum(self._dump_root, debug_dump_rel_path)
def _create_tensor_watch_maps(self):
"""Create maps from tensor watch keys to datum and to timestamps.
Create a map from watch key (tensor name + debug op) to `DebugTensorDatum`
item. Also make a map from watch key to relative timestamp.
"relative" means (absolute timestamp - t0).
"""
self._watch_key_to_datum = {}
self._watch_key_to_rel_time = {}
self._watch_key_to_dump_size_bytes = {}
for datum in self._dump_tensor_data:
if datum.watch_key not in self._watch_key_to_datum:
self._watch_key_to_datum[datum.watch_key] = [datum]
self._watch_key_to_rel_time[datum.watch_key] = [
datum.timestamp - self._t0
]
self._watch_key_to_dump_size_bytes[datum.watch_key] = [
datum.dump_size_bytes
]
else:
self._watch_key_to_datum[datum.watch_key].append(datum)
self._watch_key_to_rel_time[datum.watch_key].append(datum.timestamp -
self._t0)
self._watch_key_to_dump_size_bytes[datum.watch_key].append(
datum.dump_size_bytes)
def set_python_graph(self, python_graph):
"""Provide Python `Graph` object to the wrapper.
Unlike the partition graphs, which are protobuf `GraphDef` objects, `Graph`
is a Python object and carries additional information such as the traceback
of the construction of the nodes in the graph.
Args:
python_graph: (ops.Graph) The Python Graph object.
"""
self._python_graph = python_graph
self._node_traceback = {}
if self._python_graph:
for op in self._python_graph.get_operations():
self._node_traceback[op.name] = op.traceback
@property
def dumped_tensor_data(self):
return self._dump_tensor_data
@property
def t0(self):
"""Absolute timestamp of the first dumped tensor.
Returns:
(`int`) absolute timestamp of the first dumped tensor, in microseconds.
"""
return self._t0
@property
def size(self):
"""Total number of dumped tensors in the dump root directory.
Returns:
(`int`) total number of dumped tensors in the dump root directory.
"""
return len(self._dump_tensor_data)
def _load_partition_graphs(self, partition_graphs, validate):
"""Load and process partition graphs.
Load the graphs; parse the input and control input structure; obtain the
device and op type of each node; remove the Copy and debug ops inserted
by the debugger. The gathered information can be used to validate the
tensor dumps.
Args:
partition_graphs: Partition graphs executed by the TensorFlow runtime,
represented as repeated fields of GraphDef.
If no partition_graph is available, use None.
validate: (`bool`) Whether the dump files are to be validated against the
partition graphs.
"""
if partition_graphs:
self._partition_graphs = partition_graphs
elif self._dump_graph_file_paths:
# In case partition graphs are not available from arguments, load them
# from the dump directory.
self._partition_graphs = [
_load_graph_def_from_event_file(dump_file_path)
for dump_file_path in self._dump_graph_file_paths
]
else:
self._partition_graphs = None
return
self._node_attributes = {}
self._node_inputs = {}
self._node_ctrl_inputs = {}
self._node_recipients = {}
self._node_ctrl_recipients = {}
self._devices = []
self._node_devices = {}
self._node_op_types = {}
self._copy_send_nodes = []
for pg in self._partition_graphs:
for node in pg.node:
self._process_partition_graph_node(node)
self._prune_non_control_edges_of_debug_ops()
self._prune_control_edges_of_debug_ops()
self._populate_recipient_maps()
if validate:
self._validate_dump_with_graphs()
def _process_partition_graph_node(self, node):
"""Process a node from the partition graphs.
Args:
node: (NodeDef) A partition-graph node to be processed.
Raises:
ValueError: If duplicate node names are encountered.
"""
if _is_debug_node(node.name):
# This is a debug node. Parse the node name and retrieve the
# information about debug watches on tensors. But do not include
# the node in the graph.
(watched_node_name, watched_output_slot, _,
debug_op) = _parse_debug_node_name(node.name)
self._debug_watches[watched_node_name][watched_output_slot].add(
debug_op)
return
if node.name in self._node_inputs:
raise ValueError("Duplicate node name: '%s'" % node.name)
self._node_attributes[node.name] = node.attr
if node.device not in self._devices and node.device:
self._devices.append(node.device)
self._node_inputs[node.name] = []
self._node_ctrl_inputs[node.name] = []
self._node_recipients[node.name] = []
self._node_ctrl_recipients[node.name] = []
self._node_devices[node.name] = node.device
self._node_op_types[node.name] = node.op
for inp in node.input:
if _is_copy_node(inp) and node.op == "_Send":
self._copy_send_nodes.append(node.name)
if inp.startswith("^"):
cinp = inp[1:]
self._node_ctrl_inputs[node.name].append(cinp)
else:
self._node_inputs[node.name].append(inp)
def _prune_nodes_from_input_and_recipient_maps(self, nodes_to_prune):
"""Prune nodes out of input and recipient maps.
Args:
nodes_to_prune: (`list` of `str`) Names of the nodes to be pruned.
"""
for node in nodes_to_prune:
del self._node_inputs[node]
del self._node_ctrl_inputs[node]
del self._node_recipients[node]
del self._node_ctrl_recipients[node]
def _prune_non_control_edges_of_debug_ops(self):
"""Prune (non-control) edges related to debug ops.
Prune the Copy ops and associated _Send ops inserted by the debugger out
from the non-control inputs and output recipients map. Replace the inputs
and recipients with original ones.
"""
copy_nodes = []
for node in self._node_inputs:
if node in self._copy_send_nodes:
continue
if _is_copy_node(node):
copy_nodes.append(node)
inputs = self._node_inputs[node]
for i in xrange(len(inputs)):
inp = inputs[i]
if _is_copy_node(inp):
# Find the input to the Copy node, which should be the original
# input to the node.
orig_inp = self._node_inputs[inp][0]
inputs[i] = orig_inp
self._prune_nodes_from_input_and_recipient_maps(copy_nodes)
self._prune_nodes_from_input_and_recipient_maps(self._copy_send_nodes)
def _prune_control_edges_of_debug_ops(self):
"""Prune control edges related to the debug ops."""
for node in self._node_ctrl_inputs:
ctrl_inputs = self._node_ctrl_inputs[node]
debug_op_inputs = []
for ctrl_inp in ctrl_inputs:
if _is_debug_node(ctrl_inp):
debug_op_inputs.append(ctrl_inp)
for debug_op_inp in debug_op_inputs:
ctrl_inputs.remove(debug_op_inp)
def _populate_recipient_maps(self):
"""Populate the map from node name to recipient(s) of its output(s)."""
for node in self._node_inputs:
inputs = self._node_inputs[node]
for inp in inputs:
inp = get_node_name(inp)
if inp not in self._node_recipients:
self._node_recipients[inp] = []
self._node_recipients[inp].append(node)
for node in self._node_ctrl_inputs:
ctrl_inputs = self._node_ctrl_inputs[node]
for ctrl_inp in ctrl_inputs:
if ctrl_inp in self._copy_send_nodes:
continue
if ctrl_inp not in self._node_ctrl_recipients:
self._node_ctrl_recipients[ctrl_inp] = []
self._node_ctrl_recipients[ctrl_inp].append(node)
def _validate_dump_with_graphs(self):
"""Validate the dumped tensor data against the partition graphs.
Only the watched nodes are validated by this method, because tfdbg allows
clients to watch only a subset of the nodes.
Raises:
LookupError: If the partition graphs have not been loaded yet.
ValueError: If dumps contain node names not found in partition graph.
Or if the temporal order of the dump's timestamps violate the
input relations on the partition graphs.
"""
if not self._partition_graphs:
raise LookupError("No partition graphs loaded.")
# Verify that the node names in the dump data are all present in the
# partition graphs.
for datum in self._dump_tensor_data:
if datum.node_name not in self._node_inputs:
raise ValueError("Node name '%s' is not found in partition graphs." %
datum.node_name)
pending_inputs = {}
for node in self._node_inputs:
pending_inputs[node] = []
inputs = self._node_inputs[node]
for inp in inputs:
inp_node = get_node_name(inp)
inp_output_slot = get_output_slot(inp)
if (inp_node in self._debug_watches and
inp_output_slot in self._debug_watches[inp_node] and
(inp_node, inp_output_slot) not in pending_inputs[node]):
pending_inputs[node].append((inp_node, inp_output_slot))
for datum in self._dump_tensor_data:
node = datum.node_name
slot = datum.output_slot
if pending_inputs[node]:
raise ValueError("Causality violated in timing relations of debug "
"dumps: %s (%d): "
"these input(s) are not satisfied: %s" %
(node, datum.timestamp, repr(pending_inputs[node])))
recipients = self._node_recipients[node]
for recipient in recipients:
recipient_pending_inputs = pending_inputs[recipient]
if (node, slot) in recipient_pending_inputs:
if self.node_op_type(recipient) == "Merge":
# If this is a Merge op, we automatically clear the list because
# a Merge node only requires one of its two inputs.
del recipient_pending_inputs[:]
else:
del recipient_pending_inputs[
recipient_pending_inputs.index((node, slot))]
def loaded_partition_graphs(self):
"""Test whether partition graphs have been loaded."""
return self._partition_graphs is not None
def partition_graphs(self):
"""Get the partition graphs.
Returns:
Partition graphs as repeated fields of GraphDef.
Raises:
LookupError: If no partition graphs have been loaded.
"""
if self._partition_graphs is None:
raise LookupError("No partition graphs have been loaded.")
return self._partition_graphs
@property
def run_fetches_info(self):
"""Get a str representation of the fetches used in the Session.run() call.
Returns:
If the information is available, a `str` obtained from `repr(fetches)`.
If the information is not available, `None`.
"""
return self._run_fetches_info
@property
def run_feed_keys_info(self):
"""Get a str representation of the feed_dict used in the Session.run() call.
Returns:
If the information is available, a `str` obtained from `repr(feed_dict)`.
If the information is not available, `None`.
"""
return self._run_feed_keys_info
def nodes(self):
"""Get a list of all nodes from the partition graphs.
Returns:
All nodes' names, as a list of str.
Raises:
LookupError: If no partition graphs have been loaded.
"""
if self._partition_graphs is None:
raise LookupError("No partition graphs have been loaded.")
return [node_name for node_name in self._node_inputs]
def node_attributes(self, node_name):
"""Get the attributes of a node.
Args:
node_name: Name of the node in question.
Returns:
Attributes of the node.
Raises:
LookupError: If no partition graphs have been loaded.
ValueError: If no node named node_name exists.
"""
if self._partition_graphs is None:
raise LookupError("No partition graphs have been loaded.")
if node_name in self._node_attributes:
return self._node_attributes[node_name]
else:
raise ValueError("No node named \"%s\" exists." % node_name)
def node_inputs(self, node_name, is_control=False):
"""Get the inputs of given node according to partition graphs.
Args:
node_name: Name of the node.
is_control: (`bool`) Whether control inputs, rather than non-control
inputs, are to be returned.
Returns:
(`list` of `str`) inputs to the node, as a list of node names.
Raises:
LookupError: If node inputs and control inputs have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if self._partition_graphs is None:
raise LookupError(
"Node inputs are not loaded from partition graphs yet.")
if node_name not in self._node_inputs:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
if is_control:
return self._node_ctrl_inputs[node_name]
else:
return self._node_inputs[node_name]
def transitive_inputs(self, node_name, include_control=True):
"""Get the transitive inputs of given node according to partition graphs.
Args:
node_name: Name of the node
include_control: Include control inputs (True by default).
Returns:
(`list` of `str`) all transitive inputs to the node, as a list of node
names.
Raises:
LookupError: If node inputs and control inputs have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if self._partition_graphs is None:
raise LookupError(
"Node inputs are not loaded from partition graphs yet.")
if node_name not in self._node_inputs:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
inputs = []
# Keep track of visited nodes to avoid infinite loops during input
# tracing.
visited_nodes = []
def trace_inputs(node):
"""Inner function for recursive tracing of node inputs.
The transitive input names are appended to the list captured list
"inputs".
Args:
node: Name of the node, as a str.
"""
node = get_node_name(node)
# Stop the tracing at a Merge op, as it is generally impossible to infer
# outside the runtime which input to the Merge op is alive.
if self._node_op_types[node] == "Merge":
return
if node in visited_nodes:
return
visited_nodes.append(node)
for inp in self._node_inputs[node]:
if inp == node_name:
continue
inputs.append(inp)
trace_inputs(inp)
if include_control:
for ctrl_inp in self._node_ctrl_inputs[node]:
if ctrl_inp == node_name:
continue
inputs.append(ctrl_inp)
trace_inputs(ctrl_inp)
trace_inputs(node_name)
return inputs
def node_recipients(self, node_name, is_control=False):
"""Get recipient of the given node's output according to partition graphs.
Args:
node_name: (`str`) name of the node.
is_control: (`bool`) whether control outputs, rather than non-control
outputs, are to be returned.
Returns:
(`list` of `str`) all inputs to the node, as a list of node names.
Raises:
LookupError: If node inputs and control inputs have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if self._partition_graphs is None:
raise LookupError(
"Node recipients are not loaded from partition graphs yet.")
if node_name not in self._node_recipients:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
if is_control:
return self._node_ctrl_recipients[node_name]
else:
return self._node_recipients[node_name]
def devices(self):
"""Get the list of devices.
Returns:
(`list` of `str`) names of the devices.
Raises:
LookupError: If node inputs and control inputs have not been loaded
from partition graphs yet.
"""
if self._partition_graphs is None:
raise LookupError("Devices are not loaded from partition graphs yet.")
return self._devices
def node_exists(self, node_name):
"""Test if a node exists in the partition graphs.
Args:
node_name: (`str`) name of the node to be checked.
Returns:
A boolean indicating whether the node exists.
Raises:
LookupError: If no partition graphs have been loaded yet.
"""
if self._node_inputs is None:
raise LookupError(
"Nodes have not been loaded from partition graphs yet.")
return node_name in self._node_inputs
def node_device(self, node_name):
"""Get the device of a node.
Args:
node_name: (`str`) name of the node.
Returns:
(`str`) name of the device on which the node is placed.
Raises:
LookupError: If node inputs and control inputs have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if self._partition_graphs is None:
raise LookupError(
"Node devices are not loaded from partition graphs yet.")
if node_name not in self._node_devices:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
return self._node_devices[node_name]
def node_op_type(self, node_name):
"""Get the op type of given node.
Args:
node_name: (`str`) name of the node.
Returns:
(`str`) op type of the node.
Raises:
LookupError: If node op types have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if self._partition_graphs is None:
raise LookupError(
"Node op types are not loaded from partition graphs yet.")
if node_name not in self._node_op_types:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
return self._node_op_types[node_name]
def debug_watch_keys(self, node_name):
"""Get all tensor watch keys of given node according to partition graphs.
Args:
node_name: (`str`) name of the node.
Returns:
(`list` of `str`) all debug tensor watch keys. Returns an empty list if
the node name does not correspond to any debug watch keys.
Raises:
`LookupError`: If debug watch information has not been loaded from
partition graphs yet.
"""
if node_name not in self._debug_watches:
return []
watch_keys = []
for watched_slot in self._debug_watches[node_name]:
debug_ops = self._debug_watches[node_name][watched_slot]
for debug_op in debug_ops:
watch_keys.append(
_get_tensor_watch_key(node_name, watched_slot, debug_op))
return watch_keys
def watch_key_to_data(self, debug_watch_key):
"""Get all `DebugTensorDatum` instances corresponding to a debug watch key.
Args:
debug_watch_key: (`str`) debug watch key.
Returns:
A list of `DebugTensorDatum` instances that correspond to the debug watch
key. If the watch key does not exist, returns an empty list.
Raises:
ValueError: If the debug watch key does not exist.
"""
return self._watch_key_to_datum.get(debug_watch_key, [])
def find(self, predicate, first_n=0):
"""Find dumped tensor data by a certain predicate.
Args:
predicate: A callable that takes two input arguments:
```python
def predicate(debug_tensor_datum, tensor):
# returns a bool
```
where `debug_tensor_datum` is an instance of `DebugTensorDatum`, which
carries the metadata, such as the `Tensor`'s node name, output slot
timestamp, debug op name, etc.; and `tensor` is the dumped tensor value
as a `numpy.ndarray`.
first_n: (`int`) return only the first n `DebugTensotDatum` instances (in
time order) for which the predicate returns True. To return all the
`DebugTensotDatum` instances, let first_n be <= 0.
Returns:
A list of all `DebugTensorDatum` objects in this `DebugDumpDir` object
for which predicate returns True, sorted in ascending order of the
timestamp.
"""
matched_data = []
for datum in self._dump_tensor_data:
if predicate(datum, datum.get_tensor()):
matched_data.append(datum)
if first_n > 0 and len(matched_data) >= first_n:
break
return matched_data
def get_tensor_file_paths(self, node_name, output_slot, debug_op):
"""Get the file paths from a debug-dumped tensor.
Args:
node_name: (`str`) name of the node that the tensor is produced by.
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
Returns:
List of file path(s) loaded. This is a list because each debugged tensor
may be dumped multiple times.
Raises:
ValueError: If the tensor does not exist in the debug-dump data.
"""
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum:
raise ValueError("Watch key \"%s\" does not exist in the debug dump" %
watch_key)
return [datum.file_path for datum in self._watch_key_to_datum[watch_key]]
def get_tensors(self, node_name, output_slot, debug_op):
"""Get the tensor value from for a debug-dumped tensor.
The tensor may be dumped multiple times in the dump root directory, so a
list of tensors (`numpy.ndarray`) is returned.
Args:
node_name: (`str`) name of the node that the tensor is produced by.
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
Returns:
List of tensors (`numpy.ndarray`) loaded from the debug-dump file(s).
Raises:
ValueError: If the tensor does not exist in the debug-dump data.
"""
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum:
raise ValueError("Watch key \"%s\" does not exist in the debug dump" %
watch_key)
return [datum.get_tensor() for datum in self._watch_key_to_datum[watch_key]]
def get_rel_timestamps(self, node_name, output_slot, debug_op):
"""Get the relative timestamp from for a debug-dumped tensor.
Relative timestamp means (absolute timestamp - `t0`), where `t0` is the
absolute timestamp of the first dumped tensor in the dump root. The tensor
may be dumped multiple times in the dump root directory, so a list of
relative timestamps (`numpy.ndarray`) is returned.
Args:
node_name: (`str`) name of the node that the tensor is produced by.
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
Returns:
(`list` of `int`) list of relative timestamps.
Raises:
ValueError: If the tensor watch key does not exist in the debug dump data.
"""
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum:
raise ValueError("Watch key \"%s\" does not exist in the debug dump" %
watch_key)
return self._watch_key_to_rel_time[watch_key]
def get_dump_sizes_bytes(self, node_name, output_slot, debug_op):
"""Get the sizes of the dump files for a debug-dumped tensor.
Unit of the file size: byte.
Args:
node_name: (`str`) name of the node that the tensor is produced by.
output_slot: (`int`) output slot index of tensor.
debug_op: (`str`) name of the debug op.
Returns:
(`list` of `int`): list of dump file sizes in bytes.
Raises:
ValueError: If the tensor watch key does not exist in the debug dump data.
"""
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum:
raise ValueError("Watch key \"%s\" does not exist in the debug dump" %
watch_key)
return self._watch_key_to_dump_size_bytes[watch_key]
def node_traceback(self, element_name):
"""Try to retrieve the Python traceback of node's construction.
Args:
element_name: (`str`) Name of a graph element (node or tensor).
Returns:
(list) The traceback list object as returned by the `extract_trace`
method of Python's traceback module.
Raises:
LookupError: If Python graph is not available for traceback lookup.
KeyError: If the node cannot be found in the Python graph loaded.
"""
if self._python_graph is None:
raise LookupError("Python graph is not available for traceback lookup")
node_name = get_node_name(element_name)
if node_name not in self._node_traceback:
raise KeyError("Cannot find node \"%s\" in Python graph" % node_name)
return self._node_traceback[node_name]
| apache-2.0 |
Jgarcia-IAS/ReporsitorioVacioOdoo | openerp/addons/hr_contract/base_action_rule.py | 389 | 2646 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013 OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.base_action_rule.base_action_rule import get_datetime
from openerp.osv import fields, osv
class base_action_rule(osv.Model):
""" Add resource and calendar for time-based conditions """
_name = 'base.action.rule'
_inherit = ['base.action.rule']
_columns = {
'trg_date_resource_field_id': fields.many2one(
'ir.model.fields', 'Use employee work schedule',
help='Use the user\'s working schedule.',
),
}
def _check_delay(self, cr, uid, action, record, record_dt, context=None):
""" Override the check of delay to try to use a user-related calendar.
If no calendar is found, fallback on the default behavior. """
if action.trg_date_calendar_id and action.trg_date_range_type == 'day' and action.trg_date_resource_field_id:
user = record[action.trg_date_resource_field_id.name]
if user.employee_ids and user.employee_ids[0].contract_id \
and user.employee_ids[0].contract_id.working_hours:
calendar = user.employee_ids[0].contract_id.working_hours
start_dt = get_datetime(record_dt)
resource_id = user.employee_ids[0].resource_id.id
action_dt = self.pool['resource.calendar'].schedule_days_get_date(
cr, uid, calendar.id, action.trg_date_range,
day_date=start_dt, compute_leaves=True, resource_id=resource_id,
context=context
)
return action_dt
return super(base_action_rule, self)._check_delay(cr, uid, action, record, record_dt, context=context)
| agpl-3.0 |
arj1231/kernel_lge_msm8226 | tools/perf/scripts/python/net_dropmonitor.py | 1258 | 1562 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms[::-1]:
if loc >= i['loc']:
return (i['name'], loc - i['loc'])
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
Tatsh-ansible/ansible-modules-core | utilities/helper/_fireball.py | 12 | 1343 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['deprecated'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: fireball
short_description: Enable fireball mode on remote node
version_added: "0.9"
deprecated: "in favor of SSH with ControlPersist"
description:
- Modern SSH clients support ControlPersist which is just as fast as
fireball was. Please enable that in ansible.cfg as a replacement
for fireball.
- Removed in ansible 2.0.
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
'''
| gpl-3.0 |
nielsvanoch/django | tests/model_regress/tests.py | 39 | 9289 | from __future__ import unicode_literals
import datetime
from operator import attrgetter
import sys
import unittest
from django.core.exceptions import ValidationError
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from django.utils.timezone import get_fixed_timezone
from django.db import connection, router
from django.db.models.sql import InsertQuery
from .models import (Worker, Article, Party, Event, Department,
BrokenUnicodeMethod, NonAutoPK, Model1, Model2, Model3)
class ModelTests(TestCase):
# The bug is that the following queries would raise:
# "TypeError: Related Field has invalid lookup: gte"
def test_related_gte_lookup(self):
"""
Regression test for #10153: foreign key __gte lookups.
"""
Worker.objects.filter(department__gte=0)
def test_related_lte_lookup(self):
"""
Regression test for #10153: foreign key __lte lookups.
"""
Worker.objects.filter(department__lte=0)
def test_sql_insert_compiler_return_id_attribute(self):
"""
Regression test for #14019: SQLInsertCompiler.as_sql() failure
"""
db = router.db_for_write(Party)
query = InsertQuery(Party)
query.insert_values([Party._meta.fields[0]], [], raw=False)
# this line will raise an AttributeError without the accompanying fix
query.get_compiler(using=db).as_sql()
def test_empty_choice(self):
# NOTE: Part of the regression test here is merely parsing the model
# declaration. The verbose_name, in particular, did not always work.
a = Article.objects.create(
headline="Look at me!", pub_date=datetime.datetime.now()
)
# An empty choice field should return None for the display name.
self.assertIs(a.get_status_display(), None)
# Empty strings should be returned as Unicode
a = Article.objects.get(pk=a.pk)
self.assertEqual(a.misc_data, '')
self.assertIs(type(a.misc_data), six.text_type)
def test_long_textfield(self):
# TextFields can hold more than 4000 characters (this was broken in
# Oracle).
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text="ABCDE" * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 5000)
def test_long_unicode_textfield(self):
# TextFields can hold more than 4000 bytes also when they are
# less than 4000 characters
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text='\u05d0\u05d1\u05d2' * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 3000)
def test_date_lookup(self):
# Regression test for #659
Party.objects.create(when=datetime.datetime(1999, 12, 31))
Party.objects.create(when=datetime.datetime(1998, 12, 31))
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create(when=datetime.datetime(1, 3, 3))
self.assertQuerysetEqual(
Party.objects.filter(when__month=2), []
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=1), [
datetime.date(1999, 1, 1)
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=12), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year=1998), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #8510
self.assertQuerysetEqual(
Party.objects.filter(when__day="31"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__month="12"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year="1998"), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #18969
self.assertQuerysetEqual(
Party.objects.filter(when__year=1), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__year='1'), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
if (3,) <= sys.version_info < (3, 3) and connection.vendor == 'mysql':
# In Python < 3.3, datetime.strftime raises an exception for years
# below 1000, and existing MySQL DB-API drivers hit this problem.
test_date_lookup = unittest.expectedFailure(test_date_lookup)
def test_date_filter_null(self):
# Date filtering was failing with NULL date values in SQLite
# (regression test for #3501, among other things).
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create()
p = Party.objects.filter(when__month=1)[0]
self.assertEqual(p.when, datetime.date(1999, 1, 1))
self.assertQuerysetEqual(
Party.objects.filter(pk=p.pk).dates("when", "month"), [
1
],
attrgetter("month")
)
def test_get_next_prev_by_field(self):
# Check that get_next_by_FIELD and get_previous_by_FIELD don't crash
# when we have usecs values stored on the database
#
# It crashed after the Field.get_db_prep_* refactor, because on most
# backends DateTimeFields supports usecs, but DateTimeField.to_python
# didn't recognize them. (Note that
# Model._get_next_or_previous_by_FIELD coerces values to strings)
Event.objects.create(when=datetime.datetime(2000, 1, 1, 16, 0, 0))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 6, 1, 1))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 13, 1, 1))
e = Event.objects.create(when=datetime.datetime(2000, 1, 1, 12, 0, 20, 24))
self.assertEqual(
e.get_next_by_when().when, datetime.datetime(2000, 1, 1, 13, 1, 1)
)
self.assertEqual(
e.get_previous_by_when().when, datetime.datetime(2000, 1, 1, 6, 1, 1)
)
def test_primary_key_foreign_key_types(self):
# Check Department and Worker (non-default PK type)
d = Department.objects.create(id=10, name="IT")
w = Worker.objects.create(department=d, name="Full-time")
self.assertEqual(six.text_type(w), "Full-time")
def test_broken_unicode(self):
# Models with broken unicode methods should still have a printable repr
b = BrokenUnicodeMethod.objects.create(name="Jerry")
self.assertEqual(repr(b), "<BrokenUnicodeMethod: [Bad Unicode data]>")
@skipUnlessDBFeature("supports_timezones")
def test_timezones(self):
# Saving an updating with timezone-aware datetime Python objects.
# Regression test for #10443.
# The idea is that all these creations and saving should work without
# crashing. It's not rocket science.
dt1 = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=get_fixed_timezone(600))
dt2 = datetime.datetime(2008, 8, 31, 17, 20, tzinfo=get_fixed_timezone(600))
obj = Article.objects.create(
headline="A headline", pub_date=dt1, article_text="foo"
)
obj.pub_date = dt2
obj.save()
self.assertEqual(
Article.objects.filter(headline="A headline").update(pub_date=dt1),
1
)
def test_chained_fks(self):
"""
Regression for #18432: Chained foreign keys with to_field produce incorrect query
"""
m1 = Model1.objects.create(pkey=1000)
m2 = Model2.objects.create(model1=m1)
m3 = Model3.objects.create(model2=m2)
# this is the actual test for #18432
m3 = Model3.objects.get(model2=1000)
m3.model2
class ModelValidationTest(TestCase):
def test_pk_validation(self):
NonAutoPK.objects.create(name="one")
again = NonAutoPK(name="one")
self.assertRaises(ValidationError, again.validate_unique)
class EvaluateMethodTest(TestCase):
"""
Regression test for #13640: cannot filter by objects with 'evaluate' attr
"""
def test_model_with_evaluate_method(self):
"""
Ensures that you can filter by objects that have an 'evaluate' attr
"""
dept = Department.objects.create(pk=1, name='abc')
dept.evaluate = 'abc'
Worker.objects.filter(department=dept)
| bsd-3-clause |
tensorflow/tfx | tfx/examples/mnist/mnist_pipeline_native_keras_e2e_test.py | 1 | 5568 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""E2E Tests for tfx.examples.mnist.mnist_pipeline_native_keras."""
import os
from typing import Text
import unittest
import tensorflow as tf
from tfx.dsl.io import fileio
from tfx.examples.mnist import mnist_pipeline_native_keras
from tfx.orchestration import metadata
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
@unittest.skipIf(tf.__version__ < '2',
'Uses keras Model only compatible with TF 2.x')
class MNISTPipelineNativeKerasEndToEndTest(tf.test.TestCase):
def setUp(self):
super(MNISTPipelineNativeKerasEndToEndTest, self).setUp()
self._test_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
self._pipeline_name = 'keras_test'
self._data_root = os.path.join(os.path.dirname(__file__), 'data')
self._module_file = os.path.join(
os.path.dirname(__file__), 'mnist_utils_native_keras.py')
self._module_file_lite = os.path.join(
os.path.dirname(__file__), 'mnist_utils_native_keras_lite.py')
self._serving_model_dir = os.path.join(self._test_dir, 'serving_model')
self._serving_model_dir_lite = os.path.join(
self._test_dir, 'serving_model_lite')
self._pipeline_root = os.path.join(self._test_dir, 'tfx', 'pipelines',
self._pipeline_name)
self._metadata_path = os.path.join(self._test_dir, 'tfx', 'metadata',
self._pipeline_name, 'metadata.db')
def assertExecutedOnce(self, component: Text) -> None:
"""Check the component is executed exactly once."""
component_path = os.path.join(self._pipeline_root, component)
self.assertTrue(fileio.exists(component_path))
outputs = fileio.listdir(component_path)
self.assertIn('.system', outputs)
outputs.remove('.system')
system_paths = [
os.path.join('.system', path)
for path in fileio.listdir(os.path.join(component_path, '.system'))
]
self.assertNotEmpty(system_paths)
self.assertIn('.system/executor_execution', system_paths)
outputs.extend(system_paths)
self.assertNotEmpty(outputs)
for output in outputs:
execution = fileio.listdir(os.path.join(component_path, output))
self.assertLen(execution, 1)
def assertPipelineExecution(self) -> None:
self.assertExecutedOnce('ImportExampleGen')
self.assertExecutedOnce('Evaluator.mnist')
self.assertExecutedOnce('Evaluator.mnist_lite')
self.assertExecutedOnce('ExampleValidator')
self.assertExecutedOnce('Pusher.mnist')
self.assertExecutedOnce('Pusher.mnist_lite')
self.assertExecutedOnce('SchemaGen')
self.assertExecutedOnce('StatisticsGen')
self.assertExecutedOnce('Trainer.mnist')
self.assertExecutedOnce('Trainer.mnist_lite')
self.assertExecutedOnce('Transform')
def testMNISTPipelineNativeKeras(self):
if not tf.executing_eagerly():
self.skipTest('The test requires TF2.')
BeamDagRunner().run(
mnist_pipeline_native_keras._create_pipeline(
pipeline_name=self._pipeline_name,
data_root=self._data_root,
module_file=self._module_file,
module_file_lite=self._module_file_lite,
serving_model_dir=self._serving_model_dir,
serving_model_dir_lite=self._serving_model_dir_lite,
pipeline_root=self._pipeline_root,
metadata_path=self._metadata_path,
beam_pipeline_args=[]))
self.assertTrue(fileio.exists(self._serving_model_dir))
self.assertTrue(fileio.exists(self._serving_model_dir_lite))
self.assertTrue(fileio.exists(self._metadata_path))
metadata_config = metadata.sqlite_metadata_connection_config(
self._metadata_path)
expected_execution_count = 11
with metadata.Metadata(metadata_config) as m:
artifact_count = len(m.store.get_artifacts())
execution_count = len(m.store.get_executions())
self.assertGreaterEqual(artifact_count, execution_count)
self.assertEqual(execution_count, expected_execution_count)
self.assertPipelineExecution()
# Runs pipeline the second time.
BeamDagRunner().run(
mnist_pipeline_native_keras._create_pipeline(
pipeline_name=self._pipeline_name,
data_root=self._data_root,
module_file=self._module_file,
module_file_lite=self._module_file_lite,
serving_model_dir=self._serving_model_dir,
serving_model_dir_lite=self._serving_model_dir_lite,
pipeline_root=self._pipeline_root,
metadata_path=self._metadata_path,
beam_pipeline_args=[]))
# Asserts cache execution.
with metadata.Metadata(metadata_config) as m:
# Artifact count is unchanged.
self.assertLen(m.store.get_artifacts(), artifact_count)
self.assertLen(m.store.get_executions(), expected_execution_count * 2)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
GlobalBoost/GlobalBoost-Y | test/functional/test_framework/netutil.py | 12 | 5103 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Linux network utilities.
Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
"""
import sys
import socket
import struct
import array
import os
from binascii import unhexlify, hexlify
# STATE_ESTABLISHED = '01'
# STATE_SYN_SENT = '02'
# STATE_SYN_RECV = '03'
# STATE_FIN_WAIT1 = '04'
# STATE_FIN_WAIT2 = '05'
# STATE_TIME_WAIT = '06'
# STATE_CLOSE = '07'
# STATE_CLOSE_WAIT = '08'
# STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
# STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = unhexlify(host)
host_out = ''
for x in range(0, len(host) // 4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r',encoding='utf8') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
import fcntl # Linux only, so only import when required
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', b'\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split(b'\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return hexlify(bytearray(addr)).decode('ascii')
def test_ipv6_local():
'''
Check for (local) IPv6 support.
'''
import socket
# By using SOCK_DGRAM this will not actually make a connection, but it will
# fail if there is no route to IPv6 localhost.
have_ipv6 = True
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 0))
except socket.error:
have_ipv6 = False
return have_ipv6
| mit |
AlfioEmanueleFresta/practical-ecb-lib | cp_ecb/image.py | 1 | 4402 | from PIL import Image
class InMemoryImage:
"""
A very simple class to represent an image.
"""
def __init__(self, w, h, c=3,
b=b'', encrypted=False):
"""
Instantiate a new image.
:param w: The width of the image (px).
:param h: The height of the image (px).
:param c: The number of colour channels of the image. Default is 3.
:param b: A byte literal for the body of the image.
:param encrypted: A flag to say whether the image is encrypted or not.
"""
self.w = w
self.h = h
self.c = c
self.b = b
self.encrypted = encrypted
def __repr__(self):
return "<InMemoryImage(%s): channels=%d, width=%d, height=%d>" % (
"encrypted" if self.encrypted else "unencrypted",
self.c, self.w, self.h
)
def load_image(input_file, encrypted=False):
"""
Load an image file into memory as a InMemoryImage object.
:param input_file: The file to load.
:param encrypted: Whether to flag the file as an encrypted image or not.
:return: An instantiated InMemoryImage object.
"""
image_file = Image.open(input_file)
image = image_file.convert('RGB')
image_size = image.size
image_b = b''
for y in range(image_size[1]):
for x in range(image_size[0]):
r, g, b = image.getpixel((x, y))
image_b += bytes([r, g, b])
image_file.close()
return InMemoryImage(w=image_size[0], h=image_size[1],
c=3, b=image_b, encrypted=encrypted)
def save_image(image, output_file):
output = Image.new("RGB", (image.w, image.h))
maxlen = len(image.b) - (len(image.b) % image.c)
data = tuple(tuple(image.b[i:i + image.c]) for i in range(0, maxlen, image.c))
data = data[:(image.w * image.h)]
output.putdata(data)
output.save(output_file)
def _crypt_image(encrypt, image, function):
if type(image) is not InMemoryImage:
raise ValueError("You need to pass this function a valid InMemoryImage object.")
if encrypt and image.encrypted:
raise ValueError("The input image is already encrypted.")
elif (not encrypt) and (not image.encrypted):
raise ValueError("The input image is not flagged as encrypted and can't be decrypted.")
image.b = function(image.b)
# Allow return list of ordinals
if type(image.b) is list:
image.b = bytes(image.b)
image.encrypted = encrypt
return image
def encrypt_image(image, function):
"""
Encrypt the content of an InMemoryImage using a given function.
:param image: The unencrypted InMemoryImage object.
:param function: An encryption function which takes a single bytes literal and returns a single bytes literal.
:return: An encrypted InMemoryImage object.
"""
return _crypt_image(encrypt=True, image=image, function=function)
def decrypt_image(image, function):
"""
Decrypt the content of an InMemoryImage using a given function.
:param image: The encrypted InMemoryImage object.
:param function: A decryption function which takes a single bytes literal and returns a single bytes literal.
:return: An unencrypted InMemoryImage object.
"""
return _crypt_image(encrypt=False, image=image, function=function)
def encrypt_image_file(input_file, function, output_file):
"""
Loads an image file, encrypts its contents and saves it as another image file.
:param input_file: The original unencrytped image file.
:param function: The encryption function to use. This must take a single bytes literal and return a single bytes literal.
:param output_file: The file name for the encrypted image.
"""
image = load_image(input_file)
image = encrypt_image(image, function)
save_image(image, output_file)
def decrypt_image_file(input_file, function, output_file):
"""
Loads an encrypted image file, decrypts its contents and saves it as another image file.
:param input_file: The encrypted image file.
:param function: The decryption function to use. This must take a single bytes literal and return a single bytes literal.
:param output_file: The file name for the decrypted image.
"""
image = load_image(input_file, encrypted=True)
image = decrypt_image(image, function)
save_image(image, output_file)
| gpl-3.0 |
boegel/easybuild-framework | easybuild/tools/parallelbuild.py | 2 | 8609 | # #
# Copyright 2012-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Module for doing parallel builds. This uses a PBS-like cluster. You should be able to submit jobs (which can have
dependencies)
Support for PBS is provided via the PbsJob class. If you want you could create other job classes and use them here.
:author: Toon Willems (Ghent University)
:author: Kenneth Hoste (Ghent University)
:author: Stijn De Weirdt (Ghent University)
"""
import math
import os
import re
from easybuild.base import fancylogger
from easybuild.framework.easyblock import get_easyblock_instance
from easybuild.framework.easyconfig.easyconfig import ActiveMNS
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import build_option, get_repository, get_repositorypath
from easybuild.tools.module_naming_scheme.utilities import det_full_ec_version
from easybuild.tools.job.backend import job_backend
from easybuild.tools.repository.repository import init_repository
_log = fancylogger.getLogger('parallelbuild', fname=False)
def _to_key(dep):
"""Determine key for specified dependency."""
return ActiveMNS().det_full_module_name(dep)
def build_easyconfigs_in_parallel(build_command, easyconfigs, output_dir='easybuild-build', prepare_first=True):
"""
Build easyconfigs in parallel by submitting jobs to a batch-queuing system.
Return list of jobs submitted.
Argument `easyconfigs` is a list of easyconfigs which can be
built: e.g. they have no unresolved dependencies. This function
will build them in parallel by submitting jobs.
:param build_command: build command to use
:param easyconfigs: list of easyconfig files
:param output_dir: output directory
:param prepare_first: prepare by runnning fetch step first for each easyconfig
"""
_log.info("going to build these easyconfigs in parallel: %s", [os.path.basename(ec['spec']) for ec in easyconfigs])
active_job_backend = job_backend()
if active_job_backend is None:
raise EasyBuildError("Can not use --job if no job backend is available.")
try:
active_job_backend.init()
except RuntimeError as err:
raise EasyBuildError("connection to server failed (%s: %s), can't submit jobs.", err.__class__.__name__, err)
# dependencies have already been resolved,
# so one can linearly walk over the list and use previous job id's
jobs = []
# keep track of which job builds which module
module_to_job = {}
for easyconfig in easyconfigs:
# this is very important, otherwise we might have race conditions
# e.g. GCC-4.5.3 finds cloog.tar.gz but it was incorrectly downloaded by GCC-4.6.3
# running this step here, prevents this
if prepare_first:
prepare_easyconfig(easyconfig)
# the new job will only depend on already submitted jobs
_log.info("creating job for ec: %s" % os.path.basename(easyconfig['spec']))
new_job = create_job(active_job_backend, build_command, easyconfig, output_dir=output_dir)
# filter out dependencies marked as external modules
deps = [d for d in easyconfig['ec'].all_dependencies if not d.get('external_module', False)]
dep_mod_names = map(ActiveMNS().det_full_module_name, deps)
job_deps = [module_to_job[dep] for dep in dep_mod_names if dep in module_to_job]
# actually (try to) submit job
active_job_backend.queue(new_job, job_deps)
_log.info("job %s for module %s has been submitted", new_job, new_job.module)
# update dictionary
module_to_job[new_job.module] = new_job
jobs.append(new_job)
active_job_backend.complete()
return jobs
def submit_jobs(ordered_ecs, cmd_line_opts, testing=False, prepare_first=True):
"""
Submit jobs.
:param ordered_ecs: list of easyconfigs, in the order they should be processed
:param cmd_line_opts: list of command line options (in 'longopt=value' form)
:param testing: If `True`, skip actual job submission
:param prepare_first: prepare by runnning fetch step first for each easyconfig
"""
curdir = os.getcwd()
# regex pattern for options to ignore (help options can't reach here)
ignore_opts = re.compile('^--robot$|^--job|^--try-.*$')
# generate_cmd_line returns the options in form --longopt=value
opts = [o for o in cmd_line_opts if not ignore_opts.match(o.split('=')[0])]
# add --disable-job to make sure the submitted job doesn't submit a job itself,
# resulting in an infinite cycle of jobs;
# this can happen if job submission is enabled via a configuration file or via $EASYBUILD_JOB,
# cfr. https://github.com/easybuilders/easybuild-framework/issues/3307
opts.append('--disable-job')
# compose string with command line options, properly quoted and with '%' characters escaped
opts_str = ' '.join(opts).replace('%', '%%')
command = "unset TMPDIR && cd %s && eb %%(spec)s %s %%(add_opts)s --testoutput=%%(output_dir)s" % (curdir, opts_str)
_log.info("Command template for jobs: %s" % command)
if testing:
_log.debug("Skipping actual submission of jobs since testing mode is enabled")
return command
else:
return build_easyconfigs_in_parallel(command, ordered_ecs, prepare_first=prepare_first)
def create_job(job_backend, build_command, easyconfig, output_dir='easybuild-build'):
"""
Creates a job to build a *single* easyconfig.
:param job_backend: A factory object for querying server parameters and creating actual job objects
:param build_command: format string for command, full path to an easyconfig file will be substituted in it
:param easyconfig: easyconfig as processed by process_easyconfig
:param output_dir: optional output path; --regtest-output-dir will be used inside the job with this variable
returns the job
"""
# obtain unique name based on name/easyconfig version tuple
ec_tuple = (easyconfig['ec']['name'], det_full_ec_version(easyconfig['ec']))
name = '-'.join(ec_tuple)
# determine whether additional options need to be passed to the 'eb' command
add_opts = ''
if easyconfig['hidden']:
add_opts += ' --hidden'
# create command based on build_command template
command = build_command % {
'add_opts': add_opts,
'output_dir': os.path.join(os.path.abspath(output_dir), name),
'spec': easyconfig['spec'],
}
# just use latest build stats
repo = init_repository(get_repository(), get_repositorypath())
buildstats = repo.get_buildstats(*ec_tuple)
extra = {}
if buildstats:
previous_time = buildstats[-1]['build_time']
extra['hours'] = int(math.ceil(previous_time * 2 / 60))
if build_option('job_cores'):
extra['cores'] = build_option('job_cores')
job = job_backend.make_job(command, name, **extra)
job.module = easyconfig['ec'].full_mod_name
return job
def prepare_easyconfig(ec):
"""
Prepare for building specified easyconfig (fetch sources)
:param ec: parsed easyconfig (EasyConfig instance)
"""
try:
easyblock_instance = get_easyblock_instance(ec)
easyblock_instance.update_config_template_run_step()
easyblock_instance.fetch_step(skip_checksums=True)
_log.debug("Cleaning up log file %s..." % easyblock_instance.logfile)
easyblock_instance.close_log()
os.remove(easyblock_instance.logfile)
except (OSError, EasyBuildError) as err:
raise EasyBuildError("An error occurred while preparing %s: %s", ec, err)
| gpl-2.0 |
pranjalpatil/scrapy | scrapy/core/downloader/webclient.py | 115 | 5048 | from time import time
from six.moves.urllib.parse import urlparse, urlunparse, urldefrag
from twisted.web.client import HTTPClientFactory
from twisted.web.http import HTTPClient
from twisted.internet import defer
from scrapy.http import Headers
from scrapy.utils.httpobj import urlparse_cached
from scrapy.responsetypes import responsetypes
def _parsed_url_args(parsed):
path = urlunparse(('', '', parsed.path or '/', parsed.params, parsed.query, ''))
host = parsed.hostname
port = parsed.port
scheme = parsed.scheme
netloc = parsed.netloc
if port is None:
port = 443 if scheme == 'https' else 80
return scheme, netloc, host, port, path
def _parse(url):
url = url.strip()
parsed = urlparse(url)
return _parsed_url_args(parsed)
class ScrapyHTTPPageGetter(HTTPClient):
delimiter = '\n'
def connectionMade(self):
self.headers = Headers() # bucket for response headers
# Method command
self.sendCommand(self.factory.method, self.factory.path)
# Headers
for key, values in self.factory.headers.items():
for value in values:
self.sendHeader(key, value)
self.endHeaders()
# Body
if self.factory.body is not None:
self.transport.write(self.factory.body)
def lineReceived(self, line):
return HTTPClient.lineReceived(self, line.rstrip())
def handleHeader(self, key, value):
self.headers.appendlist(key, value)
def handleStatus(self, version, status, message):
self.factory.gotStatus(version, status, message)
def handleEndHeaders(self):
self.factory.gotHeaders(self.headers)
def connectionLost(self, reason):
self._connection_lost_reason = reason
HTTPClient.connectionLost(self, reason)
self.factory.noPage(reason)
def handleResponse(self, response):
if self.factory.method.upper() == 'HEAD':
self.factory.page('')
elif self.length is not None and self.length > 0:
self.factory.noPage(self._connection_lost_reason)
else:
self.factory.page(response)
self.transport.loseConnection()
def timeout(self):
self.transport.loseConnection()
self.factory.noPage(\
defer.TimeoutError("Getting %s took longer than %s seconds." % \
(self.factory.url, self.factory.timeout)))
class ScrapyHTTPClientFactory(HTTPClientFactory):
"""Scrapy implementation of the HTTPClientFactory overwriting the
serUrl method to make use of our Url object that cache the parse
result.
"""
protocol = ScrapyHTTPPageGetter
waiting = 1
noisy = False
followRedirect = False
afterFoundGet = False
def __init__(self, request, timeout=180):
self.url = urldefrag(request.url)[0]
self.method = request.method
self.body = request.body or None
self.headers = Headers(request.headers)
self.response_headers = None
self.timeout = request.meta.get('download_timeout') or timeout
self.start_time = time()
self.deferred = defer.Deferred().addCallback(self._build_response, request)
# Fixes Twisted 11.1.0+ support as HTTPClientFactory is expected
# to have _disconnectedDeferred. See Twisted r32329.
# As Scrapy implements it's own logic to handle redirects is not
# needed to add the callback _waitForDisconnect.
# Specifically this avoids the AttributeError exception when
# clientConnectionFailed method is called.
self._disconnectedDeferred = defer.Deferred()
self._set_connection_attributes(request)
# set Host header based on url
self.headers.setdefault('Host', self.netloc)
# set Content-Length based len of body
if self.body is not None:
self.headers['Content-Length'] = len(self.body)
# just in case a broken http/1.1 decides to keep connection alive
self.headers.setdefault("Connection", "close")
# Content-Length must be specified in POST method even with no body
elif self.method == 'POST':
self.headers['Content-Length'] = 0
def _build_response(self, body, request):
request.meta['download_latency'] = self.headers_time-self.start_time
status = int(self.status)
headers = Headers(self.response_headers)
respcls = responsetypes.from_args(headers=headers, url=self.url)
return respcls(url=self.url, status=status, headers=headers, body=body)
def _set_connection_attributes(self, request):
parsed = urlparse_cached(request)
self.scheme, self.netloc, self.host, self.port, self.path = _parsed_url_args(parsed)
proxy = request.meta.get('proxy')
if proxy:
self.scheme, _, self.host, self.port, _ = _parse(proxy)
self.path = self.url
def gotHeaders(self, headers):
self.headers_time = time()
self.response_headers = headers
| bsd-3-clause |
wyc/django | tests/migrations/test_multidb.py | 366 | 6909 | import unittest
from django.db import connection, migrations, models
from django.db.migrations.state import ProjectState
from django.test import override_settings
from .test_operations import OperationTestBase
try:
import sqlparse
except ImportError:
sqlparse = None
class AgnosticRouter(object):
"""
A router that doesn't have an opinion regarding migrating.
"""
def allow_migrate(self, db, app_label, **hints):
return None
class MigrateNothingRouter(object):
"""
A router that doesn't allow migrating.
"""
def allow_migrate(self, db, app_label, **hints):
return False
class MigrateEverythingRouter(object):
"""
A router that always allows migrating.
"""
def allow_migrate(self, db, app_label, **hints):
return True
class MigrateWhenFooRouter(object):
"""
A router that allows migrating depending on a hint.
"""
def allow_migrate(self, db, app_label, **hints):
return hints.get('foo', False)
class MultiDBOperationTests(OperationTestBase):
multi_db = True
def _test_create_model(self, app_label, should_run):
"""
Tests that CreateModel honours multi-db settings.
"""
operation = migrations.CreateModel(
"Pony",
[("id", models.AutoField(primary_key=True))],
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Test the database alteration
self.assertTableNotExists("%s_pony" % app_label)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
if should_run:
self.assertTableExists("%s_pony" % app_label)
else:
self.assertTableNotExists("%s_pony" % app_label)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertTableNotExists("%s_pony" % app_label)
@override_settings(DATABASE_ROUTERS=[AgnosticRouter()])
def test_create_model(self):
"""
Test when router doesn't have an opinion (i.e. CreateModel should run).
"""
self._test_create_model("test_mltdb_crmo", should_run=True)
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_create_model2(self):
"""
Test when router returns False (i.e. CreateModel shouldn't run).
"""
self._test_create_model("test_mltdb_crmo2", should_run=False)
@override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()])
def test_create_model3(self):
"""
Test when router returns True (i.e. CreateModel should run).
"""
self._test_create_model("test_mltdb_crmo3", should_run=True)
def test_create_model4(self):
"""
Test multiple routers.
"""
with override_settings(DATABASE_ROUTERS=[AgnosticRouter(), AgnosticRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=True)
with override_settings(DATABASE_ROUTERS=[MigrateNothingRouter(), MigrateEverythingRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=False)
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter(), MigrateNothingRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=True)
def _test_run_sql(self, app_label, should_run, hints=None):
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()]):
project_state = self.set_up_test_model(app_label)
sql = """
INSERT INTO {0}_pony (pink, weight) VALUES (1, 3.55);
INSERT INTO {0}_pony (pink, weight) VALUES (3, 5.0);
""".format(app_label)
operation = migrations.RunSQL(sql, hints=hints or {})
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(project_state.apps.get_model(app_label, "Pony").objects.count(), 0)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = project_state.apps.get_model(app_label, "Pony")
if should_run:
self.assertEqual(Pony.objects.count(), 2)
else:
self.assertEqual(Pony.objects.count(), 0)
@unittest.skipIf(sqlparse is None and connection.features.requires_sqlparse_for_splitting, "Missing sqlparse")
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_run_sql(self):
self._test_run_sql("test_mltdb_runsql", should_run=False)
@unittest.skipIf(sqlparse is None and connection.features.requires_sqlparse_for_splitting, "Missing sqlparse")
@override_settings(DATABASE_ROUTERS=[MigrateWhenFooRouter()])
def test_run_sql2(self):
self._test_run_sql("test_mltdb_runsql2", should_run=False)
self._test_run_sql("test_mltdb_runsql2", should_run=True, hints={'foo': True})
def _test_run_python(self, app_label, should_run, hints=None):
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()]):
project_state = self.set_up_test_model(app_label)
# Create the operation
def inner_method(models, schema_editor):
Pony = models.get_model(app_label, "Pony")
Pony.objects.create(pink=1, weight=3.55)
Pony.objects.create(weight=5)
operation = migrations.RunPython(inner_method, hints=hints or {})
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(project_state.apps.get_model(app_label, "Pony").objects.count(), 0)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = project_state.apps.get_model(app_label, "Pony")
if should_run:
self.assertEqual(Pony.objects.count(), 2)
else:
self.assertEqual(Pony.objects.count(), 0)
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_run_python(self):
self._test_run_python("test_mltdb_runpython", should_run=False)
@override_settings(DATABASE_ROUTERS=[MigrateWhenFooRouter()])
def test_run_python2(self):
self._test_run_python("test_mltdb_runpython2", should_run=False)
self._test_run_python("test_mltdb_runpython2", should_run=True, hints={'foo': True})
| bsd-3-clause |
BrotherPhil/django | django/contrib/contenttypes/migrations/0002_remove_content_type_name.py | 582 | 1168 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def add_legacy_name(apps, schema_editor):
ContentType = apps.get_model('contenttypes', 'ContentType')
for ct in ContentType.objects.all():
try:
ct.name = apps.get_model(ct.app_label, ct.model)._meta.object_name
except LookupError:
ct.name = ct.model
ct.save()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='contenttype',
options={'verbose_name': 'content type', 'verbose_name_plural': 'content types'},
),
migrations.AlterField(
model_name='contenttype',
name='name',
field=models.CharField(max_length=100, null=True),
),
migrations.RunPython(
migrations.RunPython.noop,
add_legacy_name,
hints={'model_name': 'contenttype'},
),
migrations.RemoveField(
model_name='contenttype',
name='name',
),
]
| bsd-3-clause |
akamel001/Sick-Beard | sickbeard/history.py | 9 | 2862 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import db
import datetime
from sickbeard.common import SNATCHED, SUBTITLED, Quality
dateFormat = "%Y%m%d%H%M%S"
def _logHistoryItem(action, showid, season, episode, quality, resource, provider):
logDate = datetime.datetime.today().strftime(dateFormat)
if not isinstance(resource, unicode):
resource = unicode(resource, 'utf-8')
myDB = db.DBConnection()
myDB.action("INSERT INTO history (action, date, showid, season, episode, quality, resource, provider) VALUES (?,?,?,?,?,?,?,?)",
[action, logDate, showid, season, episode, quality, resource, provider])
def logSnatch(searchResult):
for curEpObj in searchResult.episodes:
showid = int(curEpObj.show.tvdbid)
season = int(curEpObj.season)
episode = int(curEpObj.episode)
quality = searchResult.quality
providerClass = searchResult.provider
if providerClass != None:
provider = providerClass.name
else:
provider = "unknown"
action = Quality.compositeStatus(SNATCHED, searchResult.quality)
resource = searchResult.name
_logHistoryItem(action, showid, season, episode, quality, resource, provider)
def logDownload(episode, filename, new_ep_quality, release_group=None):
showid = int(episode.show.tvdbid)
season = int(episode.season)
epNum = int(episode.episode)
quality = new_ep_quality
# store the release group as the provider if possible
if release_group:
provider = release_group
else:
provider = -1
action = episode.status
_logHistoryItem(action, showid, season, epNum, quality, filename, provider)
def logSubtitle(showid, season, episode, status, subtitleResult):
resource = subtitleResult.path
provider = subtitleResult.service
status, quality = Quality.splitCompositeStatus(status)
action = Quality.compositeStatus(SUBTITLED, quality)
_logHistoryItem(action, showid, season, episode, quality, resource, provider) | gpl-3.0 |
cluckmaster/MissionPlanner | Lib/site-packages/numpy/f2py/common_rules.py | 51 | 4709 | #!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
"""
Build common block mechanism for f2py2e.
Copyright 2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 10:57:33 $
Pearu Peterson
"""
__version__ = "$Revision: 1.19 $"[10:-1]
import __version__
f2py_version = __version__.version
import pprint
import sys
errmess=sys.stderr.write
outmess=sys.stdout.write
show=pprint.pprint
from auxfuncs import *
import capi_maps
import func2subr
from crackfortran import rmbadname
##############
def findcommonblocks(block,top=1):
ret = []
if hascommon(block):
for n in block['common'].keys():
vars={}
for v in block['common'][n]:
vars[v]=block['vars'][v]
ret.append((n,block['common'][n],vars))
elif hasbody(block):
for b in block['body']:
ret=ret+findcommonblocks(b,0)
if top:
tret=[]
names=[]
for t in ret:
if t[0] not in names:
names.append(t[0])
tret.append(t)
return tret
return ret
def buildhooks(m):
ret = {'commonhooks':[],'initcommonhooks':[],'docs':['"COMMON blocks:\\n"']}
fwrap = ['']
def fadd(line,s=fwrap): s[0] = '%s\n %s'%(s[0],line)
chooks = ['']
def cadd(line,s=chooks): s[0] = '%s\n%s'%(s[0],line)
ihooks = ['']
def iadd(line,s=ihooks): s[0] = '%s\n%s'%(s[0],line)
doc = ['']
def dadd(line,s=doc): s[0] = '%s\n%s'%(s[0],line)
for (name,vnames,vars) in findcommonblocks(m):
lower_name = name.lower()
hnames,inames = [],[]
for n in vnames:
if isintent_hide(vars[n]): hnames.append(n)
else: inames.append(n)
if hnames:
outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n'%(name,','.join(inames),','.join(hnames)))
else:
outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n'%(name,','.join(inames)))
fadd('subroutine f2pyinit%s(setupfunc)'%name)
fadd('external setupfunc')
for n in vnames:
fadd(func2subr.var2fixfortran(vars,n))
if name=='_BLNK_':
fadd('common %s'%(','.join(vnames)))
else:
fadd('common /%s/ %s'%(name,','.join(vnames)))
fadd('call setupfunc(%s)'%(','.join(inames)))
fadd('end\n')
cadd('static FortranDataDef f2py_%s_def[] = {'%(name))
idims=[]
for n in inames:
ct = capi_maps.getctype(vars[n])
at = capi_maps.c2capi_map[ct]
dm = capi_maps.getarrdims(n,vars[n])
if dm['dims']: idims.append('(%s)'%(dm['dims']))
else: idims.append('')
dms=dm['dims'].strip()
if not dms: dms='-1'
cadd('\t{\"%s\",%s,{{%s}},%s},'%(n,dm['rank'],dms,at))
cadd('\t{NULL}\n};')
inames1 = rmbadname(inames)
inames1_tps = ','.join(map(lambda s:'char *'+s,inames1))
cadd('static void f2py_setup_%s(%s) {'%(name,inames1_tps))
cadd('\tint i_f2py=0;')
for n in inames1:
cadd('\tf2py_%s_def[i_f2py++].data = %s;'%(name,n))
cadd('}')
if '_' in lower_name:
F_FUNC='F_FUNC_US'
else:
F_FUNC='F_FUNC'
cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));'\
%(F_FUNC,lower_name,name.upper(),
','.join(['char*']*len(inames1))))
cadd('static void f2py_init_%s(void) {'%name)
cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'\
%(F_FUNC,lower_name,name.upper(),name))
cadd('}\n')
iadd('\tF2PyDict_SetItemString(d, \"%s\", PyFortranObject_New(f2py_%s_def,f2py_init_%s));'%(name,name,name))
tname = name.replace('_','\\_')
dadd('\\subsection{Common block \\texttt{%s}}\n'%(tname))
dadd('\\begin{description}')
for n in inames:
dadd('\\item[]{{}\\verb@%s@{}}'%(capi_maps.getarrdocsign(n,vars[n])))
if hasnote(vars[n]):
note = vars[n]['note']
if type(note) is type([]): note='\n'.join(note)
dadd('--- %s'%(note))
dadd('\\end{description}')
ret['docs'].append('"\t/%s/ %s\\n"'%(name,','.join(map(lambda v,d:v+d,inames,idims))))
ret['commonhooks']=chooks
ret['initcommonhooks']=ihooks
ret['latexdoc']=doc[0]
if len(ret['docs'])<=1: ret['docs']=''
return ret,fwrap[0]
| gpl-3.0 |
pvagner/orca | test/keystrokes/firefox/line_nav_lists.py | 1 | 9203 | #!/usr/bin/python
"""Test of HTML list presentation."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
# Work around some new quirk in Gecko that causes this test to fail if
# run via the test harness rather than manually.
sequence.append(KeyComboAction("<Control>r"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.AssertPresentationAction(
"1. Top of file",
["BRAILLE LINE: 'Welcome to a List of Lists h1'",
" VISIBLE: 'Welcome to a List of Lists h1', cursor=1",
"SPEECH OUTPUT: 'Welcome to a List of Lists heading level 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"2. Line Down",
["BRAILLE LINE: 'Lists are not only fun to make, they are fun to use. They help us:'",
" VISIBLE: 'Lists are not only fun to make, ', cursor=1",
"SPEECH OUTPUT: 'Lists are not only fun to make, they are fun to use. They help us:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"3. Line Down",
["BRAILLE LINE: '1. remember what the heck we are doing each day'",
" VISIBLE: '1. remember what the heck we are', cursor=1",
"SPEECH OUTPUT: '1. remember what the heck we are doing each day.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"4. Line Down",
["BRAILLE LINE: '2. arrange long and arbitrary lines of text into ordered lists that are pleasing to the eye and suggest some'",
" VISIBLE: '2. arrange long and arbitrary li', cursor=1",
"SPEECH OUTPUT: '2. arrange long and arbitrary lines of text into ordered lists that are pleasing to the eye and suggest some.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"5. Line Down",
["BRAILLE LINE: 'sense of priority, even if it is artificial'",
" VISIBLE: 'sense of priority, even if it is', cursor=1",
"SPEECH OUTPUT: 'sense of priority, even if it is artificial.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"6. Line Down",
["BRAILLE LINE: '3. look really cool when we carry them around on yellow Post-Itstm.'",
" VISIBLE: '3. look really cool when we carr', cursor=1",
"SPEECH OUTPUT: '3. look really cool when we carry them around on yellow Post-Itstm.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"7. Line Down",
["BRAILLE LINE: '4. and that other thing I keep forgetting.'",
" VISIBLE: '4. and that other thing I keep f', cursor=1",
"SPEECH OUTPUT: '4. and that other thing I keep forgetting.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"8. Line Down",
["BRAILLE LINE: 'Your ordered lists can start at a strange number, like:'",
" VISIBLE: 'Your ordered lists can start at ', cursor=1",
"SPEECH OUTPUT: 'Your ordered lists can start at a strange number, like:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"9. Line Down",
["KNOWN ISSUE: Gecko is not exposing this as a roman numeral.",
"BRAILLE LINE: '6. And use roman numerals,'",
" VISIBLE: '6. And use roman numerals,', cursor=1",
"SPEECH OUTPUT: '6. And use roman numerals,.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"10. Line Down",
["BRAILLE LINE: 'g. You might try using letters as well,'",
" VISIBLE: 'g. You might try using letters a', cursor=1",
"SPEECH OUTPUT: 'g. You might try using letters as well,.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"11. Line Down",
["BRAILLE LINE: 'H. Maybe you prefer Big Letters,'",
" VISIBLE: 'H. Maybe you prefer Big Letters,', cursor=1",
"SPEECH OUTPUT: 'H. Maybe you prefer Big Letters,.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"12. Line Down",
["KNOWN ISSUE: Gecko is not exposing this as a roman numeral.",
"BRAILLE LINE: '9. or small roman numerals'",
" VISIBLE: '9. or small roman numerals', cursor=1",
"SPEECH OUTPUT: '9. or small roman numerals.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"13. Line Up",
["BRAILLE LINE: 'H. Maybe you prefer Big Letters,'",
" VISIBLE: 'H. Maybe you prefer Big Letters,', cursor=1",
"SPEECH OUTPUT: 'H. Maybe you prefer Big Letters,.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"14. Line Up",
["BRAILLE LINE: 'g. You might try using letters as well,'",
" VISIBLE: 'g. You might try using letters a', cursor=1",
"SPEECH OUTPUT: 'g. You might try using letters as well,.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"15. Line Up",
["KNOWN ISSUE: Gecko is not exposing this as a roman numeral.",
"BRAILLE LINE: '6. And use roman numerals,'",
" VISIBLE: '6. And use roman numerals,', cursor=1",
"SPEECH OUTPUT: '6. And use roman numerals,.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"16. Line Up",
["BRAILLE LINE: 'Your ordered lists can start at a strange number, like:'",
" VISIBLE: 'Your ordered lists can start at ', cursor=1",
"SPEECH OUTPUT: 'Your ordered lists can start at a strange number, like:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"17. Line Up",
["BRAILLE LINE: '4. and that other thing I keep forgetting.'",
" VISIBLE: '4. and that other thing I keep f', cursor=1",
"SPEECH OUTPUT: '4. and that other thing I keep forgetting.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"18. Line Up",
["BRAILLE LINE: '3. look really cool when we carry them around on yellow Post-Itstm.'",
" VISIBLE: '3. look really cool when we carr', cursor=1",
"SPEECH OUTPUT: '3. look really cool when we carry them around on yellow Post-Itstm.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"19. Line Up",
["BRAILLE LINE: 'sense of priority, even if it is artificial'",
" VISIBLE: 'sense of priority, even if it is', cursor=1",
"SPEECH OUTPUT: 'sense of priority, even if it is artificial.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"20. Line Up",
["BRAILLE LINE: '2. arrange long and arbitrary lines of text into ordered lists that are pleasing to the eye and suggest some'",
" VISIBLE: '2. arrange long and arbitrary li', cursor=1",
"SPEECH OUTPUT: '2. arrange long and arbitrary lines of text into ordered lists that are pleasing to the eye and suggest some.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"21. Line Up",
["BRAILLE LINE: '1. remember what the heck we are doing each day'",
" VISIBLE: '1. remember what the heck we are', cursor=1",
"SPEECH OUTPUT: '1. remember what the heck we are doing each day.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"22. Line Up",
["BRAILLE LINE: 'Lists are not only fun to make, they are fun to use. They help us:'",
" VISIBLE: 'Lists are not only fun to make, ', cursor=1",
"SPEECH OUTPUT: 'Lists are not only fun to make, they are fun to use. They help us:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"23. Line Up",
["BRAILLE LINE: 'Welcome to a List of Lists h1'",
" VISIBLE: 'Welcome to a List of Lists h1', cursor=1",
"SPEECH OUTPUT: 'Welcome to a List of Lists heading level 1'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| lgpl-2.1 |
Ever-Never/androguard | elsim/elsim/similarity/simhash.py | 44 | 2143 | """
Implementation of Charikar similarity hashes in Python.
Most useful for creating 'fingerprints' of documents or metadata
so you can quickly find duplicates or cluster items.
Part of python-hashes by sangelone. See README and LICENSE.
"""
from hashtype import hashtype
class simhash(hashtype):
def create_hash(self, tokens):
"""Calculates a Charikar simhash with appropriate bitlength.
Input can be any iterable, but for strings it will automatically
break it into words first, assuming you don't want to iterate
over the individual characters. Returns nothing.
Reference used: http://dsrg.mff.cuni.cz/~holub/sw/shash
"""
if type(tokens) == str:
tokens = tokens.split()
v = [0]*self.hashbits
for t in [self._string_hash(x) for x in tokens]:
bitmask = 0
for i in xrange(self.hashbits):
bitmask = 1 << i
if t & bitmask:
v[i] += 1
else:
v[i] -= 1
fingerprint = 0
for i in xrange(self.hashbits):
if v[i] >= 0:
fingerprint += 1 << i
self.hash = fingerprint
def _string_hash(self, v):
"A variable-length version of Python's builtin hash. Neat!"
if v == "":
return 0
else:
x = ord(v[0])<<7
m = 1000003
mask = 2**self.hashbits-1
for c in v:
x = ((x*m)^ord(c)) & mask
x ^= len(v)
if x == -1:
x = -2
return x
def similarity(self, other_hash):
"""Calculate how different this hash is from another simhash.
Returns a float from 0.0 to 1.0 (inclusive)
"""
if type(other_hash) != simhash:
raise Exception('Hashes must be of same type to find similarity')
b = self.hashbits
if b!= other_hash.hashbits:
raise Exception('Hashes must be of equal size to find similarity')
return float(b - self.hamming_distance(other_hash)) / b
| apache-2.0 |
deepakantony/sms-tools | lectures/06-Harmonic-model/plots-code/predominantmelody.py | 3 | 3124 | import sys, csv, os
from essentia import *
from essentia.standard import *
from pylab import *
from numpy import *
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
filename = '../../../sounds/carnatic.wav'
hopSize = 128
frameSize = 2048
sampleRate = 44100
guessUnvoiced = True
run_windowing = Windowing(type='hann', zeroPadding=3*frameSize) # Hann window with x4 zero padding
run_spectrum = Spectrum(size=frameSize * 4)
run_spectral_peaks = SpectralPeaks(minFrequency=50,
maxFrequency=10000,
maxPeaks=100,
sampleRate=sampleRate,
magnitudeThreshold=0,
orderBy="magnitude")
run_pitch_salience_function = PitchSalienceFunction(magnitudeThreshold=60)
run_pitch_salience_function_peaks = PitchSalienceFunctionPeaks(minFrequency=90, maxFrequency=800)
run_pitch_contours = PitchContours(hopSize=hopSize, peakFrameThreshold=0.7)
run_pitch_contours_melody = PitchContoursMelody(guessUnvoiced=guessUnvoiced,
hopSize=hopSize)
pool = Pool();
audio = MonoLoader(filename = filename)()
audio = EqualLoudness()(audio)
for frame in FrameGenerator(audio, frameSize=frameSize, hopSize=hopSize):
frame = run_windowing(frame)
spectrum = run_spectrum(frame)
peak_frequencies, peak_magnitudes = run_spectral_peaks(spectrum)
salience = run_pitch_salience_function(peak_frequencies, peak_magnitudes)
salience_peaks_bins, salience_peaks_saliences = run_pitch_salience_function_peaks(salience)
pool.add('allframes_salience_peaks_bins', salience_peaks_bins)
pool.add('allframes_salience_peaks_saliences', salience_peaks_saliences)
contours_bins, contours_saliences, contours_start_times, duration = run_pitch_contours(
pool['allframes_salience_peaks_bins'],
pool['allframes_salience_peaks_saliences'])
pitch, confidence = run_pitch_contours_melody(contours_bins,
contours_saliences,
contours_start_times,
duration)
figure(1, figsize=(9, 6))
mX, pX = STFT.stftAnal(audio, hamming(frameSize), frameSize, hopSize)
maxplotfreq = 3000.0
numFrames = int(mX[:,0].size)
frmTime = hopSize*arange(numFrames)/float(sampleRate)
binFreq = sampleRate*arange(frameSize*maxplotfreq/sampleRate)/frameSize
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:frameSize*maxplotfreq/sampleRate+1]))
plt.autoscale(tight=True)
offset = .5 * frameSize/sampleRate
for i in range(len(contours_bins)):
time = contours_start_times[i] - offset + hopSize*arange(size(contours_bins[i]))/float(sampleRate)
contours_freq = 55.0 * pow(2, array(contours_bins[i]) * 10 / 1200.0)
plot(time,contours_freq, color='k', linewidth = 2)
plt.title('mX + F0 trajectories (carnatic.wav)')
tight_layout()
savefig('predominantmelody.png')
show()
| agpl-3.0 |
JioCloud/python-heatclient | heatclient/tests/test_shell.py | 2 | 55063 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import six
from six.moves.urllib import parse
from six.moves.urllib import request
import sys
import fixtures
import tempfile
import testscenarios
import testtools
from heatclient.openstack.common import jsonutils
from heatclient.openstack.common import strutils
from mox3 import mox
from keystoneclient.v2_0 import client as ksclient
from heatclient.common import http
from heatclient import exc
import heatclient.shell
from heatclient.tests import fakes
load_tests = testscenarios.load_tests_apply_scenarios
TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
'var'))
class TestCase(testtools.TestCase):
def set_fake_env(self, fake_env):
client_env = ('OS_USERNAME', 'OS_PASSWORD', 'OS_TENANT_ID',
'OS_TENANT_NAME', 'OS_AUTH_URL', 'OS_REGION_NAME',
'OS_AUTH_TOKEN', 'OS_NO_CLIENT_AUTH', 'OS_SERVICE_TYPE',
'OS_ENDPOINT_TYPE', 'HEAT_URL')
for key in client_env:
self.useFixture(
fixtures.EnvironmentVariable(key, fake_env.get(key)))
# required for testing with Python 2.6
def assertRegexpMatches(self, text, expected_regexp, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regexp, six.string_types):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(text):
msg = msg or "Regexp didn't match"
msg = '%s: %r not found in %r' % (
msg, expected_regexp.pattern, text)
raise self.failureException(msg)
def shell_error(self, argstr, error_match):
orig = sys.stderr
sys.stderr = six.StringIO()
_shell = heatclient.shell.HeatShell()
e = self.assertRaises(Exception, _shell.main, argstr.split())
self.assertRegexpMatches(e.__str__(), error_match)
err = sys.stderr.getvalue()
sys.stderr.close()
sys.stderr = orig
return err
class EnvVarTest(TestCase):
scenarios = [
('username', dict(
remove='OS_USERNAME',
err='You must provide a username')),
('password', dict(
remove='OS_PASSWORD',
err='You must provide a password')),
('tenant_name', dict(
remove='OS_TENANT_NAME',
err='You must provide a tenant_id')),
('auth_url', dict(
remove='OS_AUTH_URL',
err='You must provide an auth url')),
]
def test_missing_auth(self):
fake_env = {
'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_TENANT_NAME': 'tenant_name',
'OS_AUTH_URL': 'http://no.where',
}
fake_env[self.remove] = None
self.set_fake_env(fake_env)
self.shell_error('stack-list', self.err)
class EnvVarTestToken(TestCase):
scenarios = [
('tenant_id', dict(
remove='OS_TENANT_ID',
err='You must provide a tenant_id')),
('auth_url', dict(
remove='OS_AUTH_URL',
err='You must provide an auth url')),
]
def test_missing_auth(self):
fake_env = {
'OS_AUTH_TOKEN': 'atoken',
'OS_TENANT_ID': 'tenant_id',
'OS_AUTH_URL': 'http://no.where',
}
fake_env[self.remove] = None
self.set_fake_env(fake_env)
self.shell_error('stack-list', self.err)
class ShellParamValidationTest(TestCase):
scenarios = [
('create', dict(
command='create ts -P "a!b"',
err='Malformed parameter')),
('stack-create', dict(
command='stack-create ts -P "ab"',
err='Malformed parameter')),
('update', dict(
command='update ts -P "a~b"',
err='Malformed parameter')),
('stack-update', dict(
command='stack-update ts -P "a-b"',
err='Malformed parameter')),
('validate', dict(
command='validate -P "a=b;c"',
err='Malformed parameter')),
('template-validate', dict(
command='template-validate -P "a$b"',
err='Malformed parameter')),
]
def setUp(self):
super(ShellParamValidationTest, self).setUp()
self.m = mox.Mox()
self.addCleanup(self.m.VerifyAll)
self.addCleanup(self.m.UnsetStubs)
def test_bad_parameters(self):
self.m.StubOutWithMock(ksclient, 'Client')
self.m.StubOutWithMock(http.HTTPClient, 'json_request')
fakes.script_keystone_client()
self.m.ReplayAll()
fake_env = {
'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_TENANT_NAME': 'tenant_name',
'OS_AUTH_URL': 'http://no.where',
}
self.set_fake_env(fake_env)
template_file = os.path.join(TEST_VAR_DIR, 'minimal.template')
cmd = '%s --template-file=%s ' % (self.command, template_file)
self.shell_error(cmd, self.err)
class ShellValidationTest(TestCase):
def setUp(self):
super(ShellValidationTest, self).setUp()
self.m = mox.Mox()
self.addCleanup(self.m.VerifyAll)
self.addCleanup(self.m.UnsetStubs)
def test_failed_auth(self):
self.m.StubOutWithMock(ksclient, 'Client')
self.m.StubOutWithMock(http.HTTPClient, 'json_request')
fakes.script_keystone_client()
failed_msg = 'Unable to authenticate user with credentials provided'
http.HTTPClient.json_request(
'GET', '/stacks?').AndRaise(exc.Unauthorized(failed_msg))
self.m.ReplayAll()
fake_env = {
'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_TENANT_NAME': 'tenant_name',
'OS_AUTH_URL': 'http://no.where',
}
self.set_fake_env(fake_env)
self.shell_error('stack-list', failed_msg)
def test_stack_create_validation(self):
self.m.StubOutWithMock(ksclient, 'Client')
self.m.StubOutWithMock(http.HTTPClient, 'json_request')
fakes.script_keystone_client()
self.m.ReplayAll()
fake_env = {
'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_TENANT_NAME': 'tenant_name',
'OS_AUTH_URL': 'http://no.where',
}
self.set_fake_env(fake_env)
self.shell_error(
'stack-create teststack '
'--parameters="InstanceType=m1.large;DBUsername=wp;'
'DBPassword=verybadpassword;KeyName=heat_key;'
'LinuxDistribution=F17"',
'Need to specify exactly one of')
class ShellBase(TestCase):
def setUp(self):
super(ShellBase, self).setUp()
self.m = mox.Mox()
self.m.StubOutWithMock(ksclient, 'Client')
self.m.StubOutWithMock(http.HTTPClient, 'json_request')
self.m.StubOutWithMock(http.HTTPClient, 'raw_request')
self.addCleanup(self.m.VerifyAll)
self.addCleanup(self.m.UnsetStubs)
# Some tests set exc.verbose = 1, so reset on cleanup
def unset_exc_verbose():
exc.verbose = 0
self.addCleanup(unset_exc_verbose)
def shell(self, argstr):
orig = sys.stdout
try:
sys.stdout = six.StringIO()
_shell = heatclient.shell.HeatShell()
_shell.main(argstr.split())
self.subcommands = _shell.subcommands.keys()
except SystemExit:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(0, exc_value.code)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = orig
return out
class ShellTestCommon(ShellBase):
def setUp(self):
super(ShellTestCommon, self).setUp()
def test_help_unknown_command(self):
self.assertRaises(exc.CommandError, self.shell, 'help foofoo')
def test_help(self):
required = [
'^usage: heat',
'(?m)^See "heat help COMMAND" for help on a specific command',
]
for argstr in ['--help', 'help']:
help_text = self.shell(argstr)
for r in required:
self.assertRegexpMatches(help_text, r)
def test_command_help(self):
output = self.shell('help help')
self.assertIn('usage: heat help [<subcommand>]', output)
subcommands = list(self.subcommands)
for command in subcommands:
if command.replace('_', '-') == 'bash-completion':
continue
output1 = self.shell('help %s' % command)
output2 = self.shell('%s --help' % command)
self.assertEqual(output1, output2)
self.assertRegexpMatches(output1, '^usage: heat %s' % command)
def test_debug_switch_raises_error(self):
fakes.script_keystone_client()
http.HTTPClient.json_request(
'GET', '/stacks?').AndRaise(exc.Unauthorized("FAIL"))
self.m.ReplayAll()
fake_env = {
'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_TENANT_NAME': 'tenant_name',
'OS_AUTH_URL': 'http://no.where',
}
self.set_fake_env(fake_env)
args = ['--debug', 'stack-list']
self.assertRaises(exc.Unauthorized, heatclient.shell.main, args)
def test_dash_d_switch_raises_error(self):
fakes.script_keystone_client()
http.HTTPClient.json_request(
'GET', '/stacks?').AndRaise(exc.CommandError("FAIL"))
self.m.ReplayAll()
fake_env = {
'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_TENANT_NAME': 'tenant_name',
'OS_AUTH_URL': 'http://no.where',
}
self.set_fake_env(fake_env)
args = ['-d', 'stack-list']
self.assertRaises(exc.CommandError, heatclient.shell.main, args)
def test_no_debug_switch_no_raises_errors(self):
fakes.script_keystone_client()
http.HTTPClient.json_request(
'GET', '/stacks?').AndRaise(exc.Unauthorized("FAIL"))
self.m.ReplayAll()
fake_env = {
'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_TENANT_NAME': 'tenant_name',
'OS_AUTH_URL': 'http://no.where',
}
self.set_fake_env(fake_env)
args = ['stack-list']
self.assertRaises(SystemExit, heatclient.shell.main, args)
def test_help_on_subcommand(self):
required = [
'^usage: heat stack-list',
"(?m)^List the user's stacks",
]
argstrings = [
'help stack-list',
]
for argstr in argstrings:
help_text = self.shell(argstr)
for r in required:
self.assertRegexpMatches(help_text, r)
class ShellTestUserPass(ShellBase):
def setUp(self):
super(ShellTestUserPass, self).setUp()
self._set_fake_env()
# Patch os.environ to avoid required auth info.
def _set_fake_env(self):
fake_env = {
'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_TENANT_NAME': 'tenant_name',
'OS_AUTH_URL': 'http://no.where',
}
self.set_fake_env(fake_env)
def _script_keystone_client(self):
fakes.script_keystone_client()
def test_stack_list(self):
self._script_keystone_client()
fakes.script_heat_list()
self.m.ReplayAll()
list_text = self.shell('stack-list')
required = [
'id',
'stack_status',
'creation_time',
'teststack',
'1',
'CREATE_COMPLETE',
'IN_PROGRESS',
]
for r in required:
self.assertRegexpMatches(list_text, r)
def test_stack_list_with_args(self):
self._script_keystone_client()
expected_url = '/stacks?%s' % parse.urlencode({
'limit': 2,
'status': ['COMPLETE', 'FAILED'],
'marker': 'fake_id',
}, True)
fakes.script_heat_list(expected_url)
self.m.ReplayAll()
list_text = self.shell('stack-list'
' --limit 2'
' --marker fake_id'
' --filters=status=COMPLETE'
' --filters=status=FAILED')
required = [
'teststack',
'teststack2',
]
for r in required:
self.assertRegexpMatches(list_text, r)
def test_parsable_error(self):
message = "The Stack (bad) could not be found."
resp_dict = {
"explanation": "The resource could not be found.",
"code": 404,
"error": {
"message": message,
"type": "StackNotFound",
"traceback": "",
},
"title": "Not Found"
}
self._script_keystone_client()
fakes.script_heat_error(jsonutils.dumps(resp_dict))
self.m.ReplayAll()
e = self.assertRaises(exc.HTTPException, self.shell, "stack-show bad")
self.assertEqual("ERROR: " + message, str(e))
def test_parsable_verbose(self):
message = "The Stack (bad) could not be found."
resp_dict = {
"explanation": "The resource could not be found.",
"code": 404,
"error": {
"message": message,
"type": "StackNotFound",
"traceback": "<TRACEBACK>",
},
"title": "Not Found"
}
self._script_keystone_client()
fakes.script_heat_error(jsonutils.dumps(resp_dict))
self.m.ReplayAll()
exc.verbose = 1
e = self.assertRaises(exc.HTTPException, self.shell, "stack-show bad")
self.assertIn(message, str(e))
def test_parsable_malformed_error(self):
invalid_json = "ERROR: {Invalid JSON Error."
self._script_keystone_client()
fakes.script_heat_error(invalid_json)
self.m.ReplayAll()
e = self.assertRaises(exc.HTTPException, self.shell, "stack-show bad")
self.assertEqual("ERROR: " + invalid_json, str(e))
def test_parsable_malformed_error_missing_message(self):
missing_message = {
"explanation": "The resource could not be found.",
"code": 404,
"error": {
"type": "StackNotFound",
"traceback": "",
},
"title": "Not Found"
}
self._script_keystone_client()
fakes.script_heat_error(jsonutils.dumps(missing_message))
self.m.ReplayAll()
e = self.assertRaises(exc.HTTPException, self.shell, "stack-show bad")
self.assertEqual("ERROR: Internal Error", str(e))
def test_parsable_malformed_error_missing_traceback(self):
message = "The Stack (bad) could not be found."
resp_dict = {
"explanation": "The resource could not be found.",
"code": 404,
"error": {
"message": message,
"type": "StackNotFound",
},
"title": "Not Found"
}
self._script_keystone_client()
fakes.script_heat_error(jsonutils.dumps(resp_dict))
self.m.ReplayAll()
exc.verbose = 1
e = self.assertRaises(exc.HTTPException, self.shell, "stack-show bad")
self.assertEqual("ERROR: The Stack (bad) could not be found.\n",
str(e))
def test_stack_show(self):
self._script_keystone_client()
resp_dict = {"stack": {
"id": "1",
"stack_name": "teststack",
"stack_status": 'CREATE_COMPLETE',
"creation_time": "2012-10-25T01:58:47Z"
}}
resp = fakes.FakeHTTPResponse(
200,
'OK',
{'content-type': 'application/json'},
jsonutils.dumps(resp_dict))
http.HTTPClient.json_request(
'GET', '/stacks/teststack/1').AndReturn((resp, resp_dict))
self.m.ReplayAll()
list_text = self.shell('stack-show teststack/1')
required = [
'id',
'stack_name',
'stack_status',
'creation_time',
'teststack',
'CREATE_COMPLETE',
'2012-10-25T01:58:47Z'
]
for r in required:
self.assertRegexpMatches(list_text, r)
def test_stack_abandon(self):
self._script_keystone_client()
resp_dict = {"stack": {
"id": "1",
"stack_name": "teststack",
"stack_status": 'CREATE_COMPLETE',
"creation_time": "2012-10-25T01:58:47Z"
}}
abandoned_stack = {
"action": "CREATE",
"status": "COMPLETE",
"name": "teststack",
"id": "1",
"resources": {
"foo": {
"name": "foo",
"resource_id": "test-res-id",
"action": "CREATE",
"status": "COMPLETE",
"resource_data": {},
"metadata": {},
}
}
}
resp = fakes.FakeHTTPResponse(
200,
'OK',
{'content-type': 'application/json'},
jsonutils.dumps(resp_dict))
http.HTTPClient.json_request(
'GET', '/stacks/teststack/1').AndReturn((resp, resp_dict))
http.HTTPClient.json_request(
'DELETE',
'/stacks/teststack/1/abandon').AndReturn((resp, abandoned_stack))
self.m.ReplayAll()
abandon_resp = self.shell('stack-abandon teststack/1')
self.assertEqual(abandoned_stack, jsonutils.loads(abandon_resp))
def _output_fake_response(self):
self._script_keystone_client()
resp_dict = {"stack": {
"id": "1",
"stack_name": "teststack",
"stack_status": 'CREATE_COMPLETE',
"creation_time": "2012-10-25T01:58:47Z",
"outputs": [
{
"output_value": "value1",
"output_key": "output1",
"description": "test output 1",
},
{
"output_value": ["output", "value", "2"],
"output_key": "output2",
"description": "test output 2",
},
],
"creation_time": "2012-10-25T01:58:47Z"
}}
resp = fakes.FakeHTTPResponse(
200,
'OK',
{'content-type': 'application/json'},
jsonutils.dumps(resp_dict))
http.HTTPClient.json_request(
'GET', '/stacks/teststack/1').AndReturn((resp, resp_dict))
self.m.ReplayAll()
def test_output_list(self):
self._output_fake_response()
list_text = self.shell('output-list teststack/1')
for r in ['output1', 'output2']:
self.assertRegexpMatches(list_text, r)
def test_output_show(self):
self._output_fake_response()
list_text = self.shell('output-show teststack/1 output1')
self.assertRegexpMatches(list_text, 'value1')
def test_template_show_cfn(self):
self._script_keystone_client()
template_data = open(os.path.join(TEST_VAR_DIR,
'minimal.template')).read()
resp = fakes.FakeHTTPResponse(
200,
'OK',
{'content-type': 'application/json'},
template_data)
resp_dict = jsonutils.loads(template_data)
http.HTTPClient.json_request(
'GET', '/stacks/teststack/template').AndReturn((resp, resp_dict))
self.m.ReplayAll()
show_text = self.shell('template-show teststack')
required = [
'{',
' "AWSTemplateFormatVersion": "2010-09-09"',
' "Outputs": {}',
' "Resources": {}',
' "Parameters": {}',
'}'
]
for r in required:
self.assertRegexpMatches(show_text, r)
def test_template_show_hot(self):
self._script_keystone_client()
resp_dict = {"heat_template_version": "2013-05-23",
"parameters": {},
"resources": {},
"outputs": {}}
resp = fakes.FakeHTTPResponse(
200,
'OK',
{'content-type': 'application/json'},
jsonutils.dumps(resp_dict))
http.HTTPClient.json_request(
'GET', '/stacks/teststack/template').AndReturn((resp, resp_dict))
self.m.ReplayAll()
show_text = self.shell('template-show teststack')
required = [
"heat_template_version: '2013-05-23'",
"outputs: {}",
"parameters: {}",
"resources: {}"
]
for r in required:
self.assertRegexpMatches(show_text, r)
def test_stack_preview(self):
self._script_keystone_client()
resp_dict = {"stack": {
"id": "1",
"stack_name": "teststack",
"stack_status": 'CREATE_COMPLETE',
"resources": {'1': {'name': 'r1'}},
"creation_time": "2012-10-25T01:58:47Z",
}}
resp = fakes.FakeHTTPResponse(
200,
'OK',
{'location': 'http://no.where/v1/tenant_id/stacks/teststack2/2'},
jsonutils.dumps(resp_dict))
http.HTTPClient.json_request(
'POST', '/stacks/preview', data=mox.IgnoreArg(),
headers={'X-Auth-Key': 'password', 'X-Auth-User': 'username'}
).AndReturn((resp, resp_dict))
self.m.ReplayAll()
template_file = os.path.join(TEST_VAR_DIR, 'minimal.template')
preview_text = self.shell(
'stack-preview teststack '
'--template-file=%s '
'--parameters="InstanceType=m1.large;DBUsername=wp;'
'DBPassword=verybadpassword;KeyName=heat_key;'
'LinuxDistribution=F17"' % template_file)
required = [
'stack_name',
'id',
'teststack',
'1',
'resources'
]
for r in required:
self.assertRegexpMatches(preview_text, r)
def test_stack_create(self):
self._script_keystone_client()
resp = fakes.FakeHTTPResponse(
201,
'Created',
{'location': 'http://no.where/v1/tenant_id/stacks/teststack2/2'},
None)
http.HTTPClient.json_request(
'POST', '/stacks', data=mox.IgnoreArg(),
headers={'X-Auth-Key': 'password', 'X-Auth-User': 'username'}
).AndReturn((resp, None))
fakes.script_heat_list()
self.m.ReplayAll()
template_file = os.path.join(TEST_VAR_DIR, 'minimal.template')
create_text = self.shell(
'stack-create teststack '
'--template-file=%s '
'--parameters="InstanceType=m1.large;DBUsername=wp;'
'DBPassword=verybadpassword;KeyName=heat_key;'
'LinuxDistribution=F17"' % template_file)
required = [
'stack_name',
'id',
'teststack',
'1'
]
for r in required:
self.assertRegexpMatches(create_text, r)
def test_stack_create_timeout(self):
self._script_keystone_client()
template_file = os.path.join(TEST_VAR_DIR, 'minimal.template')
template_data = open(template_file).read()
resp = fakes.FakeHTTPResponse(
201,
'Created',
{'location': 'http://no.where/v1/tenant_id/stacks/teststack2/2'},
None)
expected_data = {
'files': {},
'disable_rollback': True,
'parameters': {'DBUsername': 'wp',
'KeyName': 'heat_key',
'LinuxDistribution': 'F17"',
'"InstanceType': 'm1.large',
'DBPassword': 'verybadpassword'},
'stack_name': 'teststack',
'environment': {},
'template': jsonutils.loads(template_data),
'timeout_mins': 123}
http.HTTPClient.json_request(
'POST', '/stacks', data=expected_data,
headers={'X-Auth-Key': 'password', 'X-Auth-User': 'username'}
).AndReturn((resp, None))
fakes.script_heat_list()
self.m.ReplayAll()
create_text = self.shell(
'stack-create teststack '
'--template-file=%s '
'--timeout=123 '
'--parameters="InstanceType=m1.large;DBUsername=wp;'
'DBPassword=verybadpassword;KeyName=heat_key;'
'LinuxDistribution=F17"' % template_file)
required = [
'stack_name',
'id',
'teststack',
'1'
]
for r in required:
self.assertRegexpMatches(create_text, r)
def test_stack_update_timeout(self):
self._script_keystone_client()
template_file = os.path.join(TEST_VAR_DIR, 'minimal.template')
template_data = open(template_file).read()
resp = fakes.FakeHTTPResponse(
202,
'Accepted',
{},
'The request is accepted for processing.')
expected_data = {
'files': {},
'environment': {},
'template': jsonutils.loads(template_data),
'parameters': {'DBUsername': 'wp',
'KeyName': 'heat_key',
'LinuxDistribution': 'F17"',
'"InstanceType': 'm1.large',
'DBPassword': 'verybadpassword'},
'timeout_mins': 123}
http.HTTPClient.json_request(
'PUT', '/stacks/teststack2/2',
data=expected_data,
headers={'X-Auth-Key': 'password', 'X-Auth-User': 'username'}
).AndReturn((resp, None))
fakes.script_heat_list()
self.m.ReplayAll()
update_text = self.shell(
'stack-update teststack2/2 '
'--template-file=%s '
'--timeout 123 '
'--parameters="InstanceType=m1.large;DBUsername=wp;'
'DBPassword=verybadpassword;KeyName=heat_key;'
'LinuxDistribution=F17"' % template_file)
required = [
'stack_name',
'id',
'teststack2',
'1'
]
for r in required:
self.assertRegexpMatches(update_text, r)
def test_stack_create_url(self):
self._script_keystone_client()
resp = fakes.FakeHTTPResponse(
201,
'Created',
{'location': 'http://no.where/v1/tenant_id/stacks/teststack2/2'},
None)
self.m.StubOutWithMock(request, 'urlopen')
request.urlopen('http://no.where/minimal.template').AndReturn(
six.StringIO('{"AWSTemplateFormatVersion" : "2010-09-09"}'))
expected_data = {
'files': {},
'disable_rollback': True,
'stack_name': 'teststack',
'environment': {},
'template': {"AWSTemplateFormatVersion": "2010-09-09"},
'parameters': {'DBUsername': 'wp',
'KeyName': 'heat_key',
'LinuxDistribution': 'F17"',
'"InstanceType': 'm1.large',
'DBPassword': 'verybadpassword'}}
http.HTTPClient.json_request(
'POST', '/stacks', data=expected_data,
headers={'X-Auth-Key': 'password', 'X-Auth-User': 'username'}
).AndReturn((resp, None))
fakes.script_heat_list()
self.m.ReplayAll()
create_text = self.shell(
'stack-create teststack '
'--template-url=http://no.where/minimal.template '
'--parameters="InstanceType=m1.large;DBUsername=wp;'
'DBPassword=verybadpassword;KeyName=heat_key;'
'LinuxDistribution=F17"')
required = [
'stack_name',
'id',
'teststack2',
'2'
]
for r in required:
self.assertRegexpMatches(create_text, r)
def test_stack_create_object(self):
self._script_keystone_client()
template_file = os.path.join(TEST_VAR_DIR, 'minimal.template')
template_data = open(template_file).read()
http.HTTPClient.raw_request(
'GET',
'http://no.where/container/minimal.template',
).AndReturn(template_data)
resp = fakes.FakeHTTPResponse(
201,
'Created',
{'location': 'http://no.where/v1/tenant_id/stacks/teststack2/2'},
None)
http.HTTPClient.json_request(
'POST', '/stacks', data=mox.IgnoreArg(),
headers={'X-Auth-Key': 'password', 'X-Auth-User': 'username'}
).AndReturn((resp, None))
fakes.script_heat_list()
self.m.ReplayAll()
create_text = self.shell(
'stack-create teststack2 '
'--template-object=http://no.where/container/minimal.template '
'--parameters="InstanceType=m1.large;DBUsername=wp;'
'DBPassword=verybadpassword;KeyName=heat_key;'
'LinuxDistribution=F17"')
required = [
'stack_name',
'id',
'teststack2',
'2'
]
for r in required:
self.assertRegexpMatches(create_text, r)
def test_stack_adopt(self):
self._script_keystone_client()
resp = fakes.FakeHTTPResponse(
201,
'Created',
{'location': 'http://no.where/v1/tenant_id/stacks/teststack/1'},
None)
http.HTTPClient.json_request(
'POST', '/stacks', data=mox.IgnoreArg(),
headers={'X-Auth-Key': 'password', 'X-Auth-User': 'username'}
).AndReturn((resp, None))
fakes.script_heat_list()
self.m.ReplayAll()
template_file = os.path.join(TEST_VAR_DIR, 'minimal.template')
adopt_data_file = os.path.join(TEST_VAR_DIR, 'adopt_stack_data.json')
adopt_text = self.shell(
'stack-adopt teststack '
'--template-file=%s '
'--adopt-file=%s '
'--parameters="InstanceType=m1.large;DBUsername=wp;'
'DBPassword=verybadpassword;KeyName=heat_key;'
'LinuxDistribution=F17"' % (template_file, adopt_data_file))
required = [
'stack_name',
'id',
'teststack',
'1'
]
for r in required:
self.assertRegexpMatches(adopt_text, r)
def test_stack_adopt_without_data(self):
failed_msg = 'Need to specify --adopt-file'
self._script_keystone_client()
self.m.ReplayAll()
template_file = os.path.join(TEST_VAR_DIR, 'minimal.template')
self.shell_error(
'stack-adopt teststack '
'--template-file=%s ' % template_file, failed_msg)
def test_stack_update(self):
self._script_keystone_client()
resp = fakes.FakeHTTPResponse(
202,
'Accepted',
{},
'The request is accepted for processing.')
http.HTTPClient.json_request(
'PUT', '/stacks/teststack2/2',
data=mox.IgnoreArg(),
headers={'X-Auth-Key': 'password', 'X-Auth-User': 'username'}
).AndReturn((resp, None))
fakes.script_heat_list()
self.m.ReplayAll()
template_file = os.path.join(TEST_VAR_DIR, 'minimal.template')
update_text = self.shell(
'stack-update teststack2/2 '
'--template-file=%s '
'--parameters="InstanceType=m1.large;DBUsername=wp;'
'DBPassword=verybadpassword;KeyName=heat_key;'
'LinuxDistribution=F17"' % template_file)
required = [
'stack_name',
'id',
'teststack2',
'1'
]
for r in required:
self.assertRegexpMatches(update_text, r)
def test_stack_delete(self):
self._script_keystone_client()
resp = fakes.FakeHTTPResponse(
204,
'No Content',
{},
None)
http.HTTPClient.raw_request(
'DELETE', '/stacks/teststack2/2',
).AndReturn((resp, None))
fakes.script_heat_list()
self.m.ReplayAll()
delete_text = self.shell('stack-delete teststack2/2')
required = [
'stack_name',
'id',
'teststack',
'1'
]
for r in required:
self.assertRegexpMatches(delete_text, r)
def test_stack_delete_multiple(self):
self._script_keystone_client()
resp = fakes.FakeHTTPResponse(
204,
'No Content',
{},
None)
http.HTTPClient.raw_request(
'DELETE', '/stacks/teststack1/1',
).AndReturn((resp, None))
http.HTTPClient.raw_request(
'DELETE', '/stacks/teststack2/2',
).AndReturn((resp, None))
fakes.script_heat_list()
self.m.ReplayAll()
delete_text = self.shell('stack-delete teststack1/1 teststack2/2')
required = [
'stack_name',
'id',
'teststack',
'1'
]
for r in required:
self.assertRegexpMatches(delete_text, r)
def test_build_info(self):
self._script_keystone_client()
resp_dict = {
'build_info': {
'api': {'revision': 'api_revision'},
'engine': {'revision': 'engine_revision'}
}
}
resp_string = jsonutils.dumps(resp_dict)
headers = {'content-type': 'application/json'}
http_resp = fakes.FakeHTTPResponse(200, 'OK', headers, resp_string)
response = (http_resp, resp_dict)
http.HTTPClient.json_request('GET', '/build_info').AndReturn(response)
self.m.ReplayAll()
build_info_text = self.shell('build-info')
required = [
'api',
'engine',
'revision',
'api_revision',
'engine_revision',
]
for r in required:
self.assertRegexpMatches(build_info_text, r)
class ShellTestEvents(ShellBase):
def setUp(self):
super(ShellTestEvents, self).setUp()
self._set_fake_env()
# Patch os.environ to avoid required auth info.
def _set_fake_env(self):
fake_env = {
'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_TENANT_NAME': 'tenant_name',
'OS_AUTH_URL': 'http://no.where',
}
self.set_fake_env(fake_env)
def _script_keystone_client(self):
fakes.script_keystone_client()
scenarios = [
('integer_id', dict(
event_id_one='24',
event_id_two='42')),
('uuid_id', dict(
event_id_one='3d68809e-c4aa-4dc9-a008-933823d2e44f',
event_id_two='43b68bae-ed5d-4aed-a99f-0b3d39c2418a'))]
def test_event_list(self):
self._script_keystone_client()
resp_dict = {"events": [
{"event_time": "2013-12-05T14:14:30Z",
"id": self.event_id_one,
"links": [{"href": "http://heat.example.com:8004/foo",
"rel": "self"},
{"href": "http://heat.example.com:8004/foo2",
"rel": "resource"},
{"href": "http://heat.example.com:8004/foo3",
"rel": "stack"}],
"logical_resource_id": "aResource",
"physical_resource_id": None,
"resource_name": "aResource",
"resource_status": "CREATE_IN_PROGRESS",
"resource_status_reason": "state changed"},
{"event_time": "2013-12-05T14:14:30Z",
"id": self.event_id_two,
"links": [{"href": "http://heat.example.com:8004/foo",
"rel": "self"},
{"href": "http://heat.example.com:8004/foo2",
"rel": "resource"},
{"href": "http://heat.example.com:8004/foo3",
"rel": "stack"}],
"logical_resource_id": "aResource",
"physical_resource_id":
"bce15ec4-8919-4a02-8a90-680960fb3731",
"resource_name": "aResource",
"resource_status": "CREATE_COMPLETE",
"resource_status_reason": "state changed"}]}
resp = fakes.FakeHTTPResponse(
200,
'OK',
{'content-type': 'application/json'},
jsonutils.dumps(resp_dict))
stack_id = 'teststack/1'
resource_name = 'testresource/1'
http.HTTPClient.json_request(
'GET', '/stacks/%s/resources/%s/events' % (
parse.quote(stack_id, ''),
parse.quote(strutils.safe_encode(
resource_name), ''))).AndReturn((resp, resp_dict))
self.m.ReplayAll()
event_list_text = self.shell('event-list {0} --resource {1}'.format(
stack_id, resource_name))
required = [
'resource_name',
'id',
'resource_status_reason',
'resource_status',
'event_time',
'aResource',
self.event_id_one,
self.event_id_two,
'state changed',
'CREATE_IN_PROGRESS',
'CREATE_COMPLETE',
'2013-12-05T14:14:30Z',
'2013-12-05T14:14:30Z',
]
for r in required:
self.assertRegexpMatches(event_list_text, r)
def test_event_show(self):
self._script_keystone_client()
resp_dict = {"event":
{"event_time": "2013-12-05T14:14:30Z",
"id": self.event_id_one,
"links": [{"href": "http://heat.example.com:8004/foo",
"rel": "self"},
{"href": "http://heat.example.com:8004/foo2",
"rel": "resource"},
{"href": "http://heat.example.com:8004/foo3",
"rel": "stack"}],
"logical_resource_id": "aResource",
"physical_resource_id": None,
"resource_name": "aResource",
"resource_properties": {"admin_user": "im_powerful",
"availability_zone": "nova"},
"resource_status": "CREATE_IN_PROGRESS",
"resource_status_reason": "state changed",
"resource_type": "OS::Nova::Server"
}}
resp = fakes.FakeHTTPResponse(
200,
'OK',
{'content-type': 'application/json'},
jsonutils.dumps(resp_dict))
stack_id = 'teststack/1'
resource_name = 'testresource/1'
http.HTTPClient.json_request(
'GET', '/stacks/%s/resources/%s/events/%s' %
(
parse.quote(stack_id, ''),
parse.quote(strutils.safe_encode(
resource_name), ''),
parse.quote(self.event_id_one, '')
)).AndReturn((resp, resp_dict))
self.m.ReplayAll()
event_list_text = self.shell('event-show {0} {1} {2}'.format(
stack_id, resource_name,
self.event_id_one))
required = [
'Property',
'Value',
'event_time',
'2013-12-05T14:14:30Z',
'id',
self.event_id_one,
'links',
'http://heat.example.com:8004/foo[0-9]',
'logical_resource_id',
'physical_resource_id',
'resource_name',
'aResource',
'resource_properties',
'admin_user',
'availability_zone',
'resource_status',
'CREATE_IN_PROGRESS',
'resource_status_reason',
'state changed',
'resource_type',
'OS::Nova::Server',
]
for r in required:
self.assertRegexpMatches(event_list_text, r)
class ShellTestResources(ShellBase):
def setUp(self):
super(ShellTestResources, self).setUp()
self._set_fake_env()
# Patch os.environ to avoid required auth info.
def _set_fake_env(self):
fake_env = {
'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_TENANT_NAME': 'tenant_name',
'OS_AUTH_URL': 'http://no.where',
}
self.set_fake_env(fake_env)
def _script_keystone_client(self):
fakes.script_keystone_client()
def _test_resource_list(self, with_resource_name):
self._script_keystone_client()
resp_dict = {"resources": [
{"links": [{"href": "http://heat.example.com:8004/foo",
"rel": "self"},
{"href": "http://heat.example.com:8004/foo2",
"rel": "resource"}],
"logical_resource_id": "aLogicalResource",
"physical_resource_id":
"43b68bae-ed5d-4aed-a99f-0b3d39c2418a",
"resource_status": "CREATE_COMPLETE",
"resource_status_reason": "state changed",
"resource_type": "OS::Nova::Server",
"updated_time": "2014-01-06T16:14:26Z"}]}
if with_resource_name:
resp_dict["resources"][0]["resource_name"] = "aResource"
resp = fakes.FakeHTTPResponse(
200,
'OK',
{'content-type': 'application/json'},
jsonutils.dumps(resp_dict))
stack_id = 'teststack/1'
http.HTTPClient.json_request(
'GET', '/stacks/%s/resources' % (
stack_id)).AndReturn((resp, resp_dict))
self.m.ReplayAll()
resource_list_text = self.shell('resource-list {0}'.format(stack_id))
required = [
'resource_type',
'resource_status',
'updated_time',
'OS::Nova::Server',
'CREATE_COMPLETE',
'2014-01-06T16:14:26Z'
]
if with_resource_name:
required.append('resource_name')
required.append('aResource')
else:
required.append('logical_resource_id')
required.append("aLogicalResource")
for r in required:
self.assertRegexpMatches(resource_list_text, r)
def test_resource_list(self):
self._test_resource_list(True)
def test_resource_list_no_resource_name(self):
self._test_resource_list(False)
def test_resource_list_empty(self):
self._script_keystone_client()
resp_dict = {"resources": []}
resp = fakes.FakeHTTPResponse(
200,
'OK',
{'content-type': 'application/json'},
jsonutils.dumps(resp_dict))
stack_id = 'teststack/1'
http.HTTPClient.json_request(
'GET', '/stacks/%s/resources' % (
stack_id)).AndReturn((resp, resp_dict))
self.m.ReplayAll()
resource_list_text = self.shell('resource-list {0}'.format(stack_id))
self.assertEqual('''\
+---------------+---------------+-----------------+--------------+
| resource_name | resource_type | resource_status | updated_time |
+---------------+---------------+-----------------+--------------+
+---------------+---------------+-----------------+--------------+
''', resource_list_text)
def test_resource_show(self):
self._script_keystone_client()
resp_dict = {"resource":
{"description": "",
"links": [{"href": "http://heat.example.com:8004/foo",
"rel": "self"},
{"href": "http://heat.example.com:8004/foo2",
"rel": "resource"}],
"logical_resource_id": "aResource",
"physical_resource_id":
"43b68bae-ed5d-4aed-a99f-0b3d39c2418a",
"required_by": [],
"resource_name": "aResource",
"resource_status": "CREATE_COMPLETE",
"resource_status_reason": "state changed",
"resource_type": "OS::Nova::Server",
"updated_time": "2014-01-06T16:14:26Z"}}
resp = fakes.FakeHTTPResponse(
200,
'OK',
{'content-type': 'application/json'},
jsonutils.dumps(resp_dict))
stack_id = 'teststack/1'
resource_name = 'aResource'
http.HTTPClient.json_request(
'GET', '/stacks/%s/resources/%s' %
(
parse.quote(stack_id, ''),
parse.quote(strutils.safe_encode(
resource_name), '')
)).AndReturn((resp, resp_dict))
self.m.ReplayAll()
resource_show_text = self.shell('resource-show {0} {1}'.format(
stack_id, resource_name))
required = [
'description',
'links',
'http://heat.example.com:8004/foo[0-9]',
'logical_resource_id',
'aResource',
'physical_resource_id',
'43b68bae-ed5d-4aed-a99f-0b3d39c2418a',
'required_by',
'resource_name',
'aResource',
'resource_status',
'CREATE_COMPLETE',
'resource_status_reason',
'state changed',
'resource_type',
'OS::Nova::Server',
'updated_time',
'2014-01-06T16:14:26Z',
]
for r in required:
self.assertRegexpMatches(resource_show_text, r)
def test_resource_signal(self):
self._script_keystone_client()
resp = fakes.FakeHTTPResponse(
200,
'OK',
{},
'')
stack_id = 'teststack/1'
resource_name = 'aResource'
http.HTTPClient.json_request(
'POST', '/stacks/%s/resources/%s/signal' %
(
parse.quote(stack_id, ''),
parse.quote(strutils.safe_encode(
resource_name), '')
),
data={'message': 'Content'}).AndReturn((resp, ''))
self.m.ReplayAll()
text = self.shell(
'resource-signal {0} {1} -D {{"message":"Content"}}'.format(
stack_id, resource_name))
self.assertEqual("", text)
def test_resource_signal_no_data(self):
self._script_keystone_client()
resp = fakes.FakeHTTPResponse(
200,
'OK',
{},
'')
stack_id = 'teststack/1'
resource_name = 'aResource'
http.HTTPClient.json_request(
'POST', '/stacks/%s/resources/%s/signal' %
(
parse.quote(stack_id, ''),
parse.quote(strutils.safe_encode(
resource_name), '')
), data=None).AndReturn((resp, ''))
self.m.ReplayAll()
text = self.shell(
'resource-signal {0} {1}'.format(stack_id, resource_name))
self.assertEqual("", text)
def test_resource_signal_no_json(self):
self._script_keystone_client()
stack_id = 'teststack/1'
resource_name = 'aResource'
self.m.ReplayAll()
error = self.assertRaises(
exc.CommandError, self.shell,
'resource-signal {0} {1} -D [2'.format(
stack_id, resource_name))
self.assertIn('Data should be in JSON format', str(error))
def test_resource_signal_no_dict(self):
self._script_keystone_client()
stack_id = 'teststack/1'
resource_name = 'aResource'
self.m.ReplayAll()
error = self.assertRaises(
exc.CommandError, self.shell,
'resource-signal {0} {1} -D "message"'.format(
stack_id, resource_name))
self.assertEqual('Data should be a JSON dict', str(error))
def test_resource_signal_both_data(self):
self._script_keystone_client()
stack_id = 'teststack/1'
resource_name = 'aResource'
self.m.ReplayAll()
error = self.assertRaises(
exc.CommandError, self.shell,
'resource-signal {0} {1} -D "message" -f foo'.format(
stack_id, resource_name))
self.assertEqual('Can only specify one of data and data-file',
str(error))
def test_resource_signal_data_file(self):
self._script_keystone_client()
resp = fakes.FakeHTTPResponse(
200,
'OK',
{},
'')
stack_id = 'teststack/1'
resource_name = 'aResource'
http.HTTPClient.json_request(
'POST', '/stacks/%s/resources/%s/signal' %
(
parse.quote(stack_id, ''),
parse.quote(strutils.safe_encode(
resource_name), '')
),
data={'message': 'Content'}).AndReturn((resp, ''))
self.m.ReplayAll()
with tempfile.NamedTemporaryFile() as data_file:
data_file.write(b'{"message":"Content"}')
data_file.flush()
text = self.shell(
'resource-signal {0} {1} -f {2}'.format(
stack_id, resource_name, data_file.name))
self.assertEqual("", text)
class ShellTestBuildInfo(ShellBase):
def setUp(self):
super(ShellTestBuildInfo, self).setUp()
self._set_fake_env()
def _set_fake_env(self):
'''Patch os.environ to avoid required auth info.'''
fake_env = {
'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_TENANT_NAME': 'tenant_name',
'OS_AUTH_URL': 'http://no.where',
}
self.set_fake_env(fake_env)
def test_build_info(self):
fakes.script_keystone_client()
resp_dict = {
'build_info': {
'api': {'revision': 'api_revision'},
'engine': {'revision': 'engine_revision'}
}
}
resp_string = jsonutils.dumps(resp_dict)
headers = {'content-type': 'application/json'}
http_resp = fakes.FakeHTTPResponse(200, 'OK', headers, resp_string)
response = (http_resp, resp_dict)
http.HTTPClient.json_request('GET', '/build_info').AndReturn(response)
self.m.ReplayAll()
build_info_text = self.shell('build-info')
required = [
'api',
'engine',
'revision',
'api_revision',
'engine_revision',
]
for r in required:
self.assertRegexpMatches(build_info_text, r)
class ShellTestToken(ShellTestUserPass):
# Rerun all ShellTestUserPass test with token auth
def setUp(self):
self.token = 'a_token'
super(ShellTestToken, self).setUp()
def _set_fake_env(self):
fake_env = {
'OS_AUTH_TOKEN': self.token,
'OS_TENANT_ID': 'tenant_id',
'OS_AUTH_URL': 'http://no.where',
# Note we also set username/password, because create/update
# pass them even if we have a token to support storing credentials
# Hopefully at some point we can remove this and move to only
# storing trust id's in heat-engine instead..
'OS_USERNAME': 'username',
'OS_PASSWORD': 'password'
}
self.set_fake_env(fake_env)
def _script_keystone_client(self):
fakes.script_keystone_client(token=self.token)
class ShellTestStandaloneToken(ShellTestUserPass):
# Rerun all ShellTestUserPass test in standalone mode, where we
# specify --os-no-client-auth, a token and Heat endpoint
def setUp(self):
self.token = 'a_token'
super(ShellTestStandaloneToken, self).setUp()
def _set_fake_env(self):
fake_env = {
'OS_AUTH_TOKEN': self.token,
'OS_NO_CLIENT_AUTH': 'True',
'HEAT_URL': 'http://no.where',
# Note we also set username/password, because create/update
# pass them even if we have a token to support storing credentials
# Hopefully at some point we can remove this and move to only
# storing trust id's in heat-engine instead..
'OS_USERNAME': 'username',
'OS_PASSWORD': 'password'
}
self.set_fake_env(fake_env)
def _script_keystone_client(self):
# The StanaloneMode shouldn't need any keystoneclient stubbing
pass
def test_bad_template_file(self):
failed_msg = 'Error parsing template '
with tempfile.NamedTemporaryFile() as bad_json_file:
bad_json_file.write(b"{foo:}")
bad_json_file.flush()
self.shell_error("stack-create ts -f %s" % bad_json_file.name,
failed_msg)
with tempfile.NamedTemporaryFile() as bad_json_file:
bad_json_file.write(b'{"foo": None}')
bad_json_file.flush()
self.shell_error("stack-create ts -f %s" % bad_json_file.name,
failed_msg)
| apache-2.0 |
pythonchelle/opencomparison | apps/accounts/urls.py | 1 | 1313 | from django.conf import settings
from django.conf.urls.defaults import patterns, include, url
from django.views.generic.simple import direct_to_template
from registration.views import activate
from registration.views import register
from accounts.forms import RegistrationForm
urlpatterns = patterns('',
url(r'^register/$',
register,
{'backend': "accounts.backends.DjangoPackagesRegistrationBackend",
'form_class': RegistrationForm},
name='registration_register'),
url(r'^activate/complete/$',
direct_to_template,
{'template': 'registration/activation_complete.html'},
name='registration_activation_complete'),
url(r'^activate/(?P<activation_key>\w+)/$',
activate,
{'backend': "accounts.backends.DjangoPackagesRegistrationBackend"},
name='registration_activate'),
url(r'^register/complete/$',
direct_to_template,
{'template': 'registration/registration_complete.html'},
name='registration_complete'),
url(r'^register/closed/$',
direct_to_template,
{'template': 'registration/registration_closed.html'},
name='registration_disallowed'),
(r'', include('registration.auth_urls')),
) | mit |
Benrflanders/Pytris | pyglet/pyglet/graphics/__init__.py | 22 | 26048 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
'''Low-level graphics rendering.
This module provides an efficient low-level abstraction over OpenGL. It gives
very good performance for rendering OpenGL primitives; far better than the
typical immediate-mode usage and, on modern graphics cards, better than using
display lists in many cases. The module is used internally by other areas of
pyglet.
See the Programming Guide for details on how to use this graphics API.
Batches and groups
==================
Without even needing to understand the details on how to draw primitives with
the graphics API, developers can make use of `Batch` and `Group`
objects to improve performance of sprite and text rendering.
The `Sprite`, `Label` and `TextLayout` classes all accept a ``batch`` and
``group`` parameter in their constructors. A batch manages a set of objects
that will be drawn all at once, and a group describes the manner in which an
object is drawn.
The following example creates a batch, adds two sprites to the batch, and then
draws the entire batch::
batch = pyglet.graphics.Batch()
car = pyglet.sprite.Sprite(car_image, batch=batch)
boat = pyglet.sprite.Sprite(boat_image, batch=batch)
def on_draw()
batch.draw()
Drawing a complete batch is much faster than drawing the items in the batch
individually, especially when those items belong to a common group.
Groups describe the OpenGL state required for an item. This is for the most
part managed by the sprite and text classes, however you can also use groups
to ensure items are drawn in a particular order. For example, the following
example adds a background sprite which is guaranteed to be drawn before the
car and the boat::
batch = pyglet.graphics.Batch()
background = pyglet.graphics.OrderedGroup(0)
foreground = pyglet.graphics.OrderedGroup(1)
background = pyglet.sprite.Sprite(background_image,
batch=batch, group=background)
car = pyglet.sprite.Sprite(car_image, batch=batch, group=foreground)
boat = pyglet.sprite.Sprite(boat_image, batch=batch, group=foreground)
def on_draw()
batch.draw()
It's preferable to manage sprites and text objects within as few batches as
possible. If the drawing of sprites or text objects need to be interleaved
with other drawing that does not use the graphics API, multiple batches will
be required.
Data item parameters
====================
Many of the functions and methods in this module accept any number of ``data``
parameters as their final parameters. In the documentation these are notated
as ``*data`` in the formal parameter list.
A data parameter describes a vertex attribute format and an optional sequence
to initialise that attribute. Examples of common attribute formats are:
``"v3f"``
Vertex position, specified as three floats.
``"c4B"``
Vertex color, specified as four unsigned bytes.
``"t2f"``
Texture coordinate, specified as two floats.
See `pyglet.graphics.vertexattribute` for the complete syntax of the vertex
format string.
When no initial data is to be given, the data item is just the format string.
For example, the following creates a 2 element vertex list with position and
color attributes::
vertex_list = pyglet.graphics.vertex_list(2, 'v2f', 'c4B')
When initial data is required, wrap the format string and the initial data in
a tuple, for example::
vertex_list = pyglet.graphics.vertex_list(2,
('v2f', (0.0, 1.0, 1.0, 0.0)),
('c4B', (255, 255, 255, 255) * 2))
Drawing modes
=============
Methods in this module that accept a ``mode`` parameter will accept any value
in the OpenGL drawing mode enumeration: ``GL_POINTS``, ``GL_LINE_STRIP``,
``GL_LINE_LOOP``, ``GL_LINES``, ``GL_TRIANGLE_STRIP``, ``GL_TRIANGLE_FAN``,
``GL_TRIANGLES``, ``GL_QUAD_STRIP``, ``GL_QUADS``, and ``GL_POLYGON``.
::
pyglet.graphics.draw(1, GL_POINTS, ('v2i',(10,20)))
However, because of the way the graphics API renders multiple primitives with
shared state, ``GL_POLYGON``, ``GL_LINE_LOOP`` and ``GL_TRIANGLE_FAN`` cannot
be used --- the results are undefined.
When using ``GL_LINE_STRIP``, ``GL_TRIANGLE_STRIP`` or ``GL_QUAD_STRIP`` care
must be taken to insert degenerate vertices at the beginning and end of each
vertex list. For example, given the vertex list::
A, B, C, D
the correct vertex list to provide the vertex list is::
A, A, B, C, D, D
Alternatively, the ``NV_primitive_restart`` extension can be used if it is
present. This also permits use of ``GL_POLYGON``, ``GL_LINE_LOOP`` and
``GL_TRIANGLE_FAN``. Unfortunately the extension is not provided by older
video drivers, and requires indexed vertex lists.
:since: pyglet 1.1
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import pyglet
from pyglet.gl import *
from pyglet import gl
from pyglet.graphics import vertexbuffer, vertexattribute, vertexdomain
_debug_graphics_batch = pyglet.options['debug_graphics_batch']
def draw(size, mode, *data):
'''Draw a primitive immediately.
:Parameters:
`size` : int
Number of vertices given
`mode` : gl primitive type
OpenGL drawing mode, e.g. ``GL_TRIANGLES``,
avoiding quotes.
`data` : data items
Attribute formats and data. See the module summary for
details.
'''
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
buffers = []
for format, array in data:
attribute = vertexattribute.create_attribute(format)
assert size == len(array) // attribute.count, \
'Data for %s is incorrect length' % format
buffer = vertexbuffer.create_mappable_buffer(
size * attribute.stride, vbo=False)
attribute.set_region(buffer, 0, size, array)
attribute.enable()
attribute.set_pointer(buffer.ptr)
buffers.append(buffer)
glDrawArrays(mode, 0, size)
glFlush()
glPopClientAttrib()
def draw_indexed(size, mode, indices, *data):
'''Draw a primitive with indexed vertices immediately.
:Parameters:
`size` : int
Number of vertices given
`mode` : int
OpenGL drawing mode, e.g. ``GL_TRIANGLES``
`indices` : sequence of int
Sequence of integers giving indices into the vertex list.
`data` : data items
Attribute formats and data. See the module summary for details.
'''
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
buffers = []
for format, array in data:
attribute = vertexattribute.create_attribute(format)
assert size == len(array) // attribute.count, \
'Data for %s is incorrect length' % format
buffer = vertexbuffer.create_mappable_buffer(
size * attribute.stride, vbo=False)
attribute.set_region(buffer, 0, size, array)
attribute.enable()
attribute.set_pointer(buffer.ptr)
buffers.append(buffer)
if size <= 0xff:
index_type = GL_UNSIGNED_BYTE
index_c_type = ctypes.c_ubyte
elif size <= 0xffff:
index_type = GL_UNSIGNED_SHORT
index_c_type = ctypes.c_ushort
else:
index_type = GL_UNSIGNED_INT
index_c_type = ctypes.c_uint
index_array = (index_c_type * len(indices))(*indices)
glDrawElements(mode, len(indices), index_type, index_array)
glFlush()
glPopClientAttrib()
def _parse_data(data):
'''Given a list of data items, returns (formats, initial_arrays).'''
assert data, 'No attribute formats given'
# Return tuple (formats, initial_arrays).
formats = []
initial_arrays = []
for i, format in enumerate(data):
if isinstance(format, tuple):
format, array = format
initial_arrays.append((i, array))
formats.append(format)
formats = tuple(formats)
return formats, initial_arrays
def _get_default_batch():
shared_object_space = gl.current_context.object_space
try:
return shared_object_space.pyglet_graphics_default_batch
except AttributeError:
shared_object_space.pyglet_graphics_default_batch = Batch()
return shared_object_space.pyglet_graphics_default_batch
def vertex_list(count, *data):
'''Create a `VertexList` not associated with a batch, group or mode.
:Parameters:
`count` : int
The number of vertices in the list.
`data` : data items
Attribute formats and initial data for the vertex list. See the
module summary for details.
:rtype: `VertexList`
'''
# Note that mode=0 because the default batch is never drawn: vertex lists
# returned from this function are drawn directly by the app.
return _get_default_batch().add(count, 0, None, *data)
def vertex_list_indexed(count, indices, *data):
'''Create an `IndexedVertexList` not associated with a batch, group or mode.
:Parameters:
`count` : int
The number of vertices in the list.
`indices` : sequence
Sequence of integers giving indices into the vertex list.
`data` : data items
Attribute formats and initial data for the vertex list. See the
module summary for details.
:rtype: `IndexedVertexList`
'''
# Note that mode=0 because the default batch is never drawn: vertex lists
# returned from this function are drawn directly by the app.
return _get_default_batch().add_indexed(count, 0, None, indices, *data)
class Batch(object):
'''Manage a collection of vertex lists for batched rendering.
Vertex lists are added to a `Batch` using the `add` and `add_indexed`
methods. An optional group can be specified along with the vertex list,
which gives the OpenGL state required for its rendering. Vertex lists
with shared mode and group are allocated into adjacent areas of memory and
sent to the graphics card in a single operation.
Call `VertexList.delete` to remove a vertex list from the batch.
'''
def __init__(self):
'''Create a graphics batch.'''
# Mapping to find domain.
# group -> (attributes, mode, indexed) -> domain
self.group_map = {}
# Mapping of group to list of children.
self.group_children = {}
# List of top-level groups
self.top_groups = []
self._draw_list = []
self._draw_list_dirty = False
def invalidate(self):
'''Force the batch to update the draw list.
This method can be used to force the batch to re-compute the draw list
when the ordering of groups has changed.
:since: pyglet 1.2
'''
self._draw_list_dirty = True
def add(self, count, mode, group, *data):
'''Add a vertex list to the batch.
:Parameters:
`count` : int
The number of vertices in the list.
`mode` : int
OpenGL drawing mode enumeration; for example, one of
``GL_POINTS``, ``GL_LINES``, ``GL_TRIANGLES``, etc.
See the module summary for additional information.
`group` : `Group`
Group of the vertex list, or ``None`` if no group is required.
`data` : data items
Attribute formats and initial data for the vertex list. See
the module summary for details.
:rtype: `VertexList`
'''
formats, initial_arrays = _parse_data(data)
domain = self._get_domain(False, mode, group, formats)
# Create vertex list and initialize
vlist = domain.create(count)
for i, array in initial_arrays:
vlist._set_attribute_data(i, array)
return vlist
def add_indexed(self, count, mode, group, indices, *data):
'''Add an indexed vertex list to the batch.
:Parameters:
`count` : int
The number of vertices in the list.
`mode` : int
OpenGL drawing mode enumeration; for example, one of
``GL_POINTS``, ``GL_LINES``, ``GL_TRIANGLES``, etc.
See the module summary for additional information.
`group` : `Group`
Group of the vertex list, or ``None`` if no group is required.
`indices` : sequence
Sequence of integers giving indices into the vertex list.
`data` : data items
Attribute formats and initial data for the vertex list. See
the module summary for details.
:rtype: `IndexedVertexList`
'''
formats, initial_arrays = _parse_data(data)
domain = self._get_domain(True, mode, group, formats)
# Create vertex list and initialize
vlist = domain.create(count, len(indices))
start = vlist.start
vlist._set_index_data(map(lambda i: i + start, indices))
for i, array in initial_arrays:
vlist._set_attribute_data(i, array)
return vlist
def migrate(self, vertex_list, mode, group, batch):
'''Migrate a vertex list to another batch and/or group.
`vertex_list` and `mode` together identify the vertex list to migrate.
`group` and `batch` are new owners of the vertex list after migration.
The results are undefined if `mode` is not correct or if `vertex_list`
does not belong to this batch (they are not checked and will not
necessarily throw an exception immediately).
`batch` can remain unchanged if only a group change is desired.
:Parameters:
`vertex_list` : `VertexList`
A vertex list currently belonging to this batch.
`mode` : int
The current GL drawing mode of the vertex list.
`group` : `Group`
The new group to migrate to.
`batch` : `Batch`
The batch to migrate to (or the current batch).
'''
formats = vertex_list.domain.__formats
domain = batch._get_domain(False, mode, group, formats)
vertex_list.migrate(domain)
def _get_domain(self, indexed, mode, group, formats):
if group is None:
group = null_group
# Batch group
if group not in self.group_map:
self._add_group(group)
domain_map = self.group_map[group]
# Find domain given formats, indices and mode
key = (formats, mode, indexed)
try:
domain = domain_map[key]
except KeyError:
# Create domain
if indexed:
domain = vertexdomain.create_indexed_domain(*formats)
else:
domain = vertexdomain.create_domain(*formats)
domain.__formats = formats
domain_map[key] = domain
self._draw_list_dirty = True
return domain
def _add_group(self, group):
self.group_map[group] = {}
if group.parent is None:
self.top_groups.append(group)
else:
if group.parent not in self.group_map:
self._add_group(group.parent)
if group.parent not in self.group_children:
self.group_children[group.parent] = []
self.group_children[group.parent].append(group)
self._draw_list_dirty = True
def _update_draw_list(self):
'''Visit group tree in preorder and create a list of bound methods
to call.
'''
def visit(group):
draw_list = []
# Draw domains using this group
domain_map = self.group_map[group]
for (formats, mode, indexed), domain in list(domain_map.items()):
# Remove unused domains from batch
if domain._is_empty():
del domain_map[(formats, mode, indexed)]
continue
draw_list.append(
(lambda d, m: lambda: d.draw(m))(domain, mode))
# Sort and visit child groups of this group
children = self.group_children.get(group)
if children:
children.sort()
for child in list(children):
draw_list.extend(visit(child))
if children or domain_map:
return [group.set_state] + draw_list + [group.unset_state]
else:
# Remove unused group from batch
del self.group_map[group]
if group.parent:
self.group_children[group.parent].remove(group)
try:
del self.group_children[group]
except KeyError:
pass
try:
self.top_groups.remove(group)
except ValueError:
pass
return []
self._draw_list = []
self.top_groups.sort()
for group in list(self.top_groups):
self._draw_list.extend(visit(group))
self._draw_list_dirty = False
if _debug_graphics_batch:
self._dump_draw_list()
def _dump_draw_list(self):
def dump(group, indent=''):
print indent, 'Begin group', group
domain_map = self.group_map[group]
for _, domain in domain_map.items():
print indent, ' ', domain
for start, size in zip(*domain.allocator.get_allocated_regions()):
print indent, ' ', 'Region %d size %d:' % (start, size)
for key, attribute in domain.attribute_names.items():
print indent, ' ',
try:
region = attribute.get_region(attribute.buffer,
start, size)
print key, region.array[:]
except:
print key, '(unmappable)'
for child in self.group_children.get(group, ()):
dump(child, indent + ' ')
print indent, 'End group', group
print 'Draw list for %r:' % self
for group in self.top_groups:
dump(group)
def draw(self):
'''Draw the batch.
'''
if self._draw_list_dirty:
self._update_draw_list()
for func in self._draw_list:
func()
def draw_subset(self, vertex_lists):
'''Draw only some vertex lists in the batch.
The use of this method is highly discouraged, as it is quite
inefficient. Usually an application can be redesigned so that batches
can always be drawn in their entirety, using `draw`.
The given vertex lists must belong to this batch; behaviour is
undefined if this condition is not met.
:Parameters:
`vertex_lists` : sequence of `VertexList` or `IndexedVertexList`
Vertex lists to draw.
'''
# Horrendously inefficient.
def visit(group):
group.set_state()
# Draw domains using this group
domain_map = self.group_map[group]
for (_, mode, _), domain in domain_map.items():
for alist in vertex_lists:
if alist.domain is domain:
alist.draw(mode)
# Sort and visit child groups of this group
children = self.group_children.get(group)
if children:
children.sort()
for child in children:
visit(child)
group.unset_state()
self.top_groups.sort()
for group in self.top_groups:
visit(group)
class Group(object):
'''Group of common OpenGL state.
Before a vertex list is rendered, its group's OpenGL state is set; as are
that state's ancestors' states. This can be defined arbitrarily on
subclasses; the default state change has no effect, and groups vertex
lists only in the order in which they are drawn.
'''
def __init__(self, parent=None):
'''Create a group.
:Parameters:
`parent` : `Group`
Group to contain this group; its state will be set before this
state's.
'''
self.parent = parent
def __lt__(self, other):
return hash(self) < hash(other)
def set_state(self):
'''Apply the OpenGL state change.
The default implementation does nothing.'''
pass
def unset_state(self):
'''Repeal the OpenGL state change.
The default implementation does nothing.'''
pass
def set_state_recursive(self):
'''Set this group and its ancestry.
Call this method if you are using a group in isolation: the
parent groups will be called in top-down order, with this class's
`set` being called last.
'''
if self.parent:
self.parent.set_state_recursive()
self.set_state()
def unset_state_recursive(self):
'''Unset this group and its ancestry.
The inverse of `set_state_recursive`.
'''
self.unset_state()
if self.parent:
self.parent.unset_state_recursive()
class NullGroup(Group):
'''The default group class used when ``None`` is given to a batch.
This implementation has no effect.
'''
pass
#: The default group.
#:
#: :type: `Group`
null_group = NullGroup()
class TextureGroup(Group):
'''A group that enables and binds a texture.
Texture groups are equal if their textures' targets and names are equal.
'''
# Don't use this, create your own group classes that are more specific.
# This is just an example.
def __init__(self, texture, parent=None):
'''Create a texture group.
:Parameters:
`texture` : `Texture`
Texture to bind.
`parent` : `Group`
Parent group.
'''
super(TextureGroup, self).__init__(parent)
self.texture = texture
def set_state(self):
glEnable(self.texture.target)
glBindTexture(self.texture.target, self.texture.id)
def unset_state(self):
glDisable(self.texture.target)
def __hash__(self):
return hash((self.texture.target, self.texture.id, self.parent))
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.texture.target == other.texture.target and
self.texture.id == other.texture.id and
self.parent == other.parent)
def __repr__(self):
return '%s(id=%d)' % (self.__class__.__name__, self.texture.id)
class OrderedGroup(Group):
'''A group with partial order.
Ordered groups with a common parent are rendered in ascending order of
their ``order`` field. This is a useful way to render multiple layers of
a scene within a single batch.
'''
# This can be useful as a top-level group, or as a superclass for other
# groups that need to be ordered.
#
# As a top-level group it's useful because graphics can be composited in a
# known order even if they don't know about each other or share any known
# group.
def __init__(self, order, parent=None):
'''Create an ordered group.
:Parameters:
`order` : int
Order of this group.
`parent` : `Group`
Parent of this group.
'''
super(OrderedGroup, self).__init__(parent)
self.order = order
def __lt__(self, other):
if isinstance(other, OrderedGroup):
return self.order < other.order
return super(OrderedGroup, self).__lt__(other)
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.order == other.order and
self.parent == other.parent)
def __hash__(self):
return hash((self.order, self.parent))
def __repr__(self):
return '%s(%d)' % (self.__class__.__name__, self.order)
| mit |
mnahm5/django-estore | Lib/site-packages/boto/regioninfo.py | 128 | 6214 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
import boto
from boto.compat import json
from boto.exception import BotoClientError
def load_endpoint_json(path):
"""
Loads a given JSON file & returns it.
:param path: The path to the JSON file
:type path: string
:returns: The loaded data
"""
with open(path, 'r') as endpoints_file:
return json.load(endpoints_file)
def merge_endpoints(defaults, additions):
"""
Given an existing set of endpoint data, this will deep-update it with
any similarly structured data in the additions.
:param defaults: The existing endpoints data
:type defaults: dict
:param defaults: The additional endpoints data
:type defaults: dict
:returns: The modified endpoints data
:rtype: dict
"""
# We can't just do an ``defaults.update(...)`` here, as that could
# *overwrite* regions if present in both.
# We'll iterate instead, essentially doing a deeper merge.
for service, region_info in additions.items():
# Set the default, if not present, to an empty dict.
defaults.setdefault(service, {})
defaults[service].update(region_info)
return defaults
def load_regions():
"""
Actually load the region/endpoint information from the JSON files.
By default, this loads from the default included ``boto/endpoints.json``
file.
Users can override/extend this by supplying either a ``BOTO_ENDPOINTS``
environment variable or a ``endpoints_path`` config variable, either of
which should be an absolute path to the user's JSON file.
:returns: The endpoints data
:rtype: dict
"""
# Load the defaults first.
endpoints = load_endpoint_json(boto.ENDPOINTS_PATH)
additional_path = None
# Try the ENV var. If not, check the config file.
if os.environ.get('BOTO_ENDPOINTS'):
additional_path = os.environ['BOTO_ENDPOINTS']
elif boto.config.get('Boto', 'endpoints_path'):
additional_path = boto.config.get('Boto', 'endpoints_path')
# If there's a file provided, we'll load it & additively merge it into
# the endpoints.
if additional_path:
additional = load_endpoint_json(additional_path)
endpoints = merge_endpoints(endpoints, additional)
return endpoints
def get_regions(service_name, region_cls=None, connection_cls=None):
"""
Given a service name (like ``ec2``), returns a list of ``RegionInfo``
objects for that service.
This leverages the ``endpoints.json`` file (+ optional user overrides) to
configure/construct all the objects.
:param service_name: The name of the service to construct the ``RegionInfo``
objects for. Ex: ``ec2``, ``s3``, ``sns``, etc.
:type service_name: string
:param region_cls: (Optional) The class to use when constructing. By
default, this is ``RegionInfo``.
:type region_cls: class
:param connection_cls: (Optional) The connection class for the
``RegionInfo`` object. Providing this allows the ``connect`` method on
the ``RegionInfo`` to work. Default is ``None`` (no connection).
:type connection_cls: class
:returns: A list of configured ``RegionInfo`` objects
:rtype: list
"""
endpoints = load_regions()
if service_name not in endpoints:
raise BotoClientError(
"Service '%s' not found in endpoints." % service_name
)
if region_cls is None:
region_cls = RegionInfo
region_objs = []
for region_name, endpoint in endpoints.get(service_name, {}).items():
region_objs.append(
region_cls(
name=region_name,
endpoint=endpoint,
connection_cls=connection_cls
)
)
return region_objs
class RegionInfo(object):
"""
Represents an AWS Region
"""
def __init__(self, connection=None, name=None, endpoint=None,
connection_cls=None):
self.connection = connection
self.name = name
self.endpoint = endpoint
self.connection_cls = connection_cls
def __repr__(self):
return 'RegionInfo:%s' % self.name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'regionName':
self.name = value
elif name == 'regionEndpoint':
self.endpoint = value
else:
setattr(self, name, value)
def connect(self, **kw_params):
"""
Connect to this Region's endpoint. Returns an connection
object pointing to the endpoint associated with this region.
You may pass any of the arguments accepted by the connection
class's constructor as keyword arguments and they will be
passed along to the connection object.
:rtype: Connection object
:return: The connection to this regions endpoint
"""
if self.connection_cls:
return self.connection_cls(region=self, **kw_params)
| mit |
colinmcd94/kickdata | lib/bs4/oldbs4/tests/test_builder_registry.py | 2 | 4954 | """Tests of the builder registry."""
import unittest
from bs4 import BeautifulSoup
from bs4.builder import (
builder_registry as registry,
HTMLParserTreeBuilder,
LXMLTreeBuilderForXML,
LXMLTreeBuilder,
TreeBuilderRegistry,
)
try:
from bs4.builder import HTML5TreeBuilder
HTML5LIB_PRESENT = True
except ImportError:
HTML5LIB_PRESENT = False
class BuiltInRegistryTest(unittest.TestCase):
"""Test the built-in registry with the default builders registered."""
def test_combination(self):
self.assertEqual(registry.lookup('fast', 'html'),
LXMLTreeBuilder)
self.assertEqual(registry.lookup('permissive', 'xml'),
LXMLTreeBuilderForXML)
self.assertEqual(registry.lookup('strict', 'html'),
HTMLParserTreeBuilder)
if HTML5LIB_PRESENT:
self.assertEqual(registry.lookup('permissive', 'html'),
HTML5TreeBuilder)
def test_lookup_by_markup_type(self):
if HTML5LIB_PRESENT:
self.assertEqual(registry.lookup('html'), HTML5TreeBuilder)
else:
self.assertEqual(registry.lookup('html'), LXMLTreeBuilder)
self.assertEqual(registry.lookup('xml'), LXMLTreeBuilderForXML)
def test_named_library(self):
self.assertEqual(registry.lookup('lxml', 'xml'),
LXMLTreeBuilderForXML)
self.assertEqual(registry.lookup('lxml', 'html'),
LXMLTreeBuilder)
if HTML5LIB_PRESENT:
self.assertEqual(registry.lookup('html5lib'),
HTML5TreeBuilder)
self.assertEqual(registry.lookup('html.parser'),
HTMLParserTreeBuilder)
def test_beautifulsoup_constructor_does_lookup(self):
# You can pass in a string.
BeautifulSoup("", features="html")
# Or a list of strings.
BeautifulSoup("", features=["html", "fast"])
# You'll get an exception if BS can't find an appropriate
# builder.
self.assertRaises(ValueError, BeautifulSoup,
"", features="no-such-feature")
class RegistryTest(unittest.TestCase):
"""Test the TreeBuilderRegistry class in general."""
def setUp(self):
self.registry = TreeBuilderRegistry()
def builder_for_features(self, *feature_list):
cls = type('Builder_' + '_'.join(feature_list),
(object,), {'features' : feature_list})
self.registry.register(cls)
return cls
def test_register_with_no_features(self):
builder = self.builder_for_features()
# Since the builder advertises no features, you can't find it
# by looking up features.
self.assertEqual(self.registry.lookup('foo'), None)
# But you can find it by doing a lookup with no features, if
# this happens to be the only registered builder.
self.assertEqual(self.registry.lookup(), builder)
def test_register_with_features_makes_lookup_succeed(self):
builder = self.builder_for_features('foo', 'bar')
self.assertEqual(self.registry.lookup('foo'), builder)
self.assertEqual(self.registry.lookup('bar'), builder)
def test_lookup_fails_when_no_builder_implements_feature(self):
builder = self.builder_for_features('foo', 'bar')
self.assertEqual(self.registry.lookup('baz'), None)
def test_lookup_gets_most_recent_registration_when_no_feature_specified(self):
builder1 = self.builder_for_features('foo')
builder2 = self.builder_for_features('bar')
self.assertEqual(self.registry.lookup(), builder2)
def test_lookup_fails_when_no_tree_builders_registered(self):
self.assertEqual(self.registry.lookup(), None)
def test_lookup_gets_most_recent_builder_supporting_all_features(self):
has_one = self.builder_for_features('foo')
has_the_other = self.builder_for_features('bar')
has_both_early = self.builder_for_features('foo', 'bar', 'baz')
has_both_late = self.builder_for_features('foo', 'bar', 'quux')
lacks_one = self.builder_for_features('bar')
has_the_other = self.builder_for_features('foo')
# There are two builders featuring 'foo' and 'bar', but
# the one that also features 'quux' was registered later.
self.assertEqual(self.registry.lookup('foo', 'bar'),
has_both_late)
# There is only one builder featuring 'foo', 'bar', and 'baz'.
self.assertEqual(self.registry.lookup('foo', 'bar', 'baz'),
has_both_early)
def test_lookup_fails_when_cannot_reconcile_requested_features(self):
builder1 = self.builder_for_features('foo', 'bar')
builder2 = self.builder_for_features('foo', 'baz')
self.assertEqual(self.registry.lookup('bar', 'baz'), None)
| apache-2.0 |
zetalog/linux | drivers/comedi/drivers/ni_routing/tools/csv_collection.py | 548 | 1272 | # SPDX-License-Identifier: GPL-2.0+
# vim: ts=2:sw=2:et:tw=80:nowrap
import os, csv, glob
class CSVCollection(dict):
delimiter=';'
quotechar='"'
source_column_name = 'Sources / Destinations'
"""
This class is a dictionary representation of the collection of sheets that
exist in a given .ODS file.
"""
def __init__(self, pattern, skip_commented_lines=True, strip_lines=True):
super(CSVCollection, self).__init__()
self.pattern = pattern
C = '#' if skip_commented_lines else 'blahblahblah'
if strip_lines:
strip = lambda s:s.strip()
else:
strip = lambda s:s
# load all CSV files
key = self.source_column_name
for fname in glob.glob(pattern):
with open(fname) as F:
dR = csv.DictReader(F, delimiter=self.delimiter,
quotechar=self.quotechar)
name = os.path.basename(fname).partition('.')[0]
D = {
r[key]:{f:strip(c) for f,c in r.items()
if f != key and f[:1] not in ['', C] and
strip(c)[:1] not in ['', C]}
for r in dR if r[key][:1] not in ['', C]
}
# now, go back through and eliminate all empty dictionaries
D = {k:v for k,v in D.items() if v}
self[name] = D
| gpl-2.0 |
TimBuckley/effective_django | django/contrib/gis/geos/linestring.py | 105 | 5813 | from django.contrib.gis.geos.base import numpy
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos import prototypes as capi
from django.utils.six.moves import xrange
class LineString(GEOSGeometry):
_init_func = capi.create_linestring
_minlength = 2
#### Python 'magic' routines ####
def __init__(self, *args, **kwargs):
"""
Initializes on the given sequence -- may take lists, tuples, NumPy arrays
of X,Y pairs, or Point objects. If Point objects are used, ownership is
_not_ transferred to the LineString object.
Examples:
ls = LineString((1, 1), (2, 2))
ls = LineString([(1, 1), (2, 2)])
ls = LineString(array([(1, 1), (2, 2)]))
ls = LineString(Point(1, 1), Point(2, 2))
"""
# If only one argument provided, set the coords array appropriately
if len(args) == 1:
coords = args[0]
else:
coords = args
if isinstance(coords, (tuple, list)):
# Getting the number of coords and the number of dimensions -- which
# must stay the same, e.g., no LineString((1, 2), (1, 2, 3)).
ncoords = len(coords)
if coords:
ndim = len(coords[0])
else:
raise TypeError('Cannot initialize on empty sequence.')
self._checkdim(ndim)
# Incrementing through each of the coordinates and verifying
for i in xrange(1, ncoords):
if not isinstance(coords[i], (tuple, list, Point)):
raise TypeError('each coordinate should be a sequence (list or tuple)')
if len(coords[i]) != ndim:
raise TypeError('Dimension mismatch.')
numpy_coords = False
elif numpy and isinstance(coords, numpy.ndarray):
shape = coords.shape # Using numpy's shape.
if len(shape) != 2:
raise TypeError('Too many dimensions.')
self._checkdim(shape[1])
ncoords = shape[0]
ndim = shape[1]
numpy_coords = True
else:
raise TypeError('Invalid initialization input for LineStrings.')
# Creating a coordinate sequence object because it is easier to
# set the points using GEOSCoordSeq.__setitem__().
cs = GEOSCoordSeq(capi.create_cs(ncoords, ndim), z=bool(ndim == 3))
for i in xrange(ncoords):
if numpy_coords:
cs[i] = coords[i, :]
elif isinstance(coords[i], Point):
cs[i] = coords[i].tuple
else:
cs[i] = coords[i]
# If SRID was passed in with the keyword arguments
srid = kwargs.get('srid', None)
# Calling the base geometry initialization with the returned pointer
# from the function.
super(LineString, self).__init__(self._init_func(cs.ptr), srid=srid)
def __iter__(self):
"Allows iteration over this LineString."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of points in this LineString."
return len(self._cs)
def _get_single_external(self, index):
return self._cs[index]
_get_single_internal = _get_single_external
def _set_list(self, length, items):
ndim = self._cs.dims
hasz = self._cs.hasz # I don't understand why these are different
# create a new coordinate sequence and populate accordingly
cs = GEOSCoordSeq(capi.create_cs(length, ndim), z=hasz)
for i, c in enumerate(items):
cs[i] = c
ptr = self._init_func(cs.ptr)
if ptr:
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(self.srid)
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._checkindex(index)
self._cs[index] = value
def _checkdim(self, dim):
if dim not in (2, 3):
raise TypeError('Dimension mismatch.')
#### Sequence Properties ####
@property
def tuple(self):
"Returns a tuple version of the geometry from the coordinate sequence."
return self._cs.tuple
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function. Will return a numpy array if possible.
"""
lst = [func(i) for i in xrange(len(self))]
if numpy:
return numpy.array(lst) # ARRRR!
else:
return lst
@property
def array(self):
"Returns a numpy array for the LineString."
return self._listarr(self._cs.__getitem__)
@property
def merged(self):
"Returns the line merge of this LineString."
return self._topology(capi.geos_linemerge(self.ptr))
@property
def x(self):
"Returns a list or numpy array of the X variable."
return self._listarr(self._cs.getX)
@property
def y(self):
"Returns a list or numpy array of the Y variable."
return self._listarr(self._cs.getY)
@property
def z(self):
"Returns a list or numpy array of the Z variable."
if not self.hasz:
return None
else:
return self._listarr(self._cs.getZ)
# LinearRings are LineStrings used within Polygons.
class LinearRing(LineString):
_minLength = 4
_init_func = capi.create_linearring
| bsd-3-clause |
battlecat/Spirit | spirit/category/admin/views.py | 11 | 1660 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.utils.translation import ugettext as _
from ...core.utils.decorators import administrator_required
from ..models import Category
from .forms import CategoryForm
User = get_user_model()
@administrator_required
def index(request):
categories = Category.objects.filter(parent=None, is_private=False)
context = {'categories': categories, }
return render(request, 'spirit/category/admin/index.html', context)
@administrator_required
def create(request):
if request.method == 'POST':
form = CategoryForm(data=request.POST)
if form.is_valid():
form.save()
return redirect(reverse("spirit:admin:category:index"))
else:
form = CategoryForm()
context = {'form': form, }
return render(request, 'spirit/category/admin/create.html', context)
@administrator_required
def update(request, category_id):
category = get_object_or_404(Category, pk=category_id)
if request.method == 'POST':
form = CategoryForm(data=request.POST, instance=category)
if form.is_valid():
form.save()
messages.info(request, _("The category has been updated!"))
return redirect(reverse("spirit:admin:category:index"))
else:
form = CategoryForm(instance=category)
context = {'form': form, }
return render(request, 'spirit/category/admin/update.html', context)
| mit |
kawasaki2013/python-for-android-x86 | python3-alpha/python3-src/Lib/encodings/shift_jis_2004.py | 816 | 1059 | #
# shift_jis_2004.py: Python Unicode Codec for SHIFT_JIS_2004
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jis_2004')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jis_2004',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
kose-y/pylearn2 | pylearn2/scripts/dbm/show_samples.py | 37 | 8032 | #!/usr/bin/env python
"""
Usage: python show_samples <path_to_a_saved_DBM.pkl>
Displays a batch of data from the DBM's training set.
Then interactively allows the user to run Gibbs steps
starting from that seed data to see how the DBM's MCMC
sampling changes the data.
"""
from __future__ import print_function
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
import numpy as np
import sys
import time
from pylearn2.config import yaml_parse
from pylearn2.expr.basic import is_binary
from pylearn2.gui.patch_viewer import PatchViewer
from pylearn2.utils import serial
from theano import function
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.compat.six.moves import input, xrange
def init_viewer(dataset, rows, cols):
"""
Initialisation of the PatchViewer with given rows and columns.
Parameters
----------
dataset: pylearn2 dataset
rows: int
cols: int
"""
m = rows * cols
vis_batch = dataset.get_batch_topo(m)
_, patch_rows, patch_cols, channels = vis_batch.shape
assert _ == m
mapback = hasattr(dataset, 'mapback_for_viewer')
pv = PatchViewer((rows, cols*(1+mapback)),
(patch_rows, patch_cols),
is_color=(channels == 3))
return pv
def get_mapped_batch(dataset, design_batch):
"""
Get mapped batch if 'mapback_for_viewer' is available with the dataset.
Parameters
----------
dataset: pylearn2 dataset
design_batch: numpy array
"""
if design_batch.ndim != 2:
design_batch = dataset.get_design_matrix(design_batch)
mapped_batch_design = dataset.mapback_for_viewer(design_batch)
mapped_batch = dataset.get_topological_view(mapped_batch_design)
return mapped_batch
def update_viewer(dataset, pv, vis_batch, rows, cols):
"""
Function to update the viewer with a new visible batch.
Parameters
----------
dataset: pylearn2 dataset
pv: PatchViewer
vis_batch: numpy array
rows: int
cols: int
"""
mapback = hasattr(dataset, 'mapback_for_viewer')
display_batch = dataset.adjust_for_viewer(vis_batch)
if display_batch.ndim == 2:
display_batch = dataset.get_topological_view(display_batch)
if mapback:
mapped_batch = get_mapped_batch(dataset, vis_batch)
for i in xrange(rows):
row_start = cols * i
for j in xrange(cols):
pv.add_patch(display_batch[row_start+j, :, :, :],
rescale=False)
if mapback:
pv.add_patch(mapped_batch[row_start+j, :, :, :],
rescale=False)
def validate_all_samples(model, layer_to_state):
"""
Validate samples
Parameters
----------
model: pylearn2 DBM model
layer_to_state: dict
"""
# Run some checks on the samples, this should help catch any bugs
layers = [model.visible_layer] + model.hidden_layers
def check_batch_size(l):
if isinstance(l, (list, tuple)):
map(check_batch_size, l)
else:
assert l.get_value().shape[0] == m
for layer in layers:
state = layer_to_state[layer]
space = layer.get_total_state_space()
space.validate(state)
if 'DenseMaxPool' in str(type(layer)):
p, h = state
p = p.get_value()
h = h.get_value()
assert np.all(p == h)
assert is_binary(p)
if 'BinaryVisLayer' in str(type(layer)):
v = state.get_value()
assert is_binary(v)
if 'Softmax' in str(type(layer)):
y = state.get_value()
assert is_binary(y)
s = y.sum(axis=1)
assert np.all(s == 1)
if 'Ising' in str(type(layer)):
s = state.get_value()
assert is_binary((s + 1.) / 2.)
def get_sample_func(model, layer_to_state, x):
"""
Construct the sample theano function.
Parameters
----------
model: pylearn2 model
layer_to_state: dict
x: int
"""
theano_rng = MRG_RandomStreams(2012+9+18)
if x > 0:
sampling_updates = model.get_sampling_updates(
layer_to_state,
theano_rng,
layer_to_clamp={model.visible_layer: True},
num_steps=x)
t1 = time.time()
sample_func = function([], updates=sampling_updates)
t2 = time.time()
print('Clamped sampling function compilation took', t2-t1)
sample_func()
# Now compile the full sampling update
sampling_updates = model.get_sampling_updates(layer_to_state,
theano_rng)
assert layer_to_state[model.visible_layer] in sampling_updates
t1 = time.time()
sample_func = function([], updates=sampling_updates)
t2 = time.time()
print('Sampling function compilation took', t2-t1)
return sample_func
def load_model(model_path, m):
"""
Load given model.
Parameters
----------
model_path: str
Path of the model to load.
m: int
Size of the batch.
"""
print('Loading model...')
model = serial.load(model_path)
model.set_batch_size(m)
return model
def show_samples(m, model_path):
"""
Show samples given a DBM model.
Parameters
----------
m: int
rows * cols
model_path: str
Path of the model.
"""
model = load_model(model_path, m)
print('Loading data (used for setting up visualization '
'and seeding gibbs chain) ...')
dataset_yaml_src = model.dataset_yaml_src
dataset = yaml_parse.load(dataset_yaml_src)
pv = init_viewer(dataset, rows, cols)
if hasattr(model.visible_layer, 'beta'):
beta = model.visible_layer.beta.get_value()
print('beta: ', (beta.min(), beta.mean(), beta.max()))
print('showing seed data...')
vis_batch = dataset.get_batch_topo(m)
update_viewer(dataset, pv, vis_batch, rows, cols)
pv.show()
print('How many Gibbs steps should I run with the seed data clamped?'
'(negative = ignore seed data)')
x = int(input())
# Make shared variables representing the sampling state of the model
layer_to_state = model.make_layer_to_state(m)
# Seed the sampling with the data batch
vis_sample = layer_to_state[model.visible_layer]
validate_all_samples(model, layer_to_state)
if x >= 0:
if vis_sample.ndim == 4:
vis_sample.set_value(vis_batch)
else:
design_matrix = dataset.get_design_matrix(vis_batch)
vis_sample.set_value(design_matrix)
validate_all_samples(model, layer_to_state)
sample_func = get_sample_func(model, layer_to_state, x)
while True:
print('Displaying samples. '
'How many steps to take next? (q to quit, ENTER=1)')
while True:
x = input()
if x == 'q':
quit()
if x == '':
x = 1
break
else:
try:
x = int(x)
break
except ValueError:
print('Invalid input, try again')
for i in xrange(x):
print(i)
sample_func()
validate_all_samples(model, layer_to_state)
vis_batch = vis_sample.get_value()
update_viewer(dataset, pv, vis_batch, rows, cols)
pv.show()
if 'Softmax' in str(type(model.hidden_layers[-1])):
state = layer_to_state[model.hidden_layers[-1]]
value = state.get_value()
y = np.argmax(value, axis=1)
assert y.ndim == 1
for i in xrange(0, y.shape[0], cols):
print(y[i:i+cols])
if __name__ == '__main__':
rows = 10
cols = 10
m = rows * cols
_, model_path = sys.argv
show_samples(m, model_path)
| bsd-3-clause |
ismail-s/warehouse | tests/unit/test_config.py | 1 | 15964 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import pretend
import pytest
import zope.interface
from pyramid import renderers
from warehouse import config
from warehouse.utils.wsgi import ProxyFixer, VhmRootRemover
class TestCSPTween:
def test_csp_policy(self):
response = pretend.stub(headers={})
handler = pretend.call_recorder(lambda request: response)
registry = pretend.stub(
settings={
"csp": {
"default-src": ["*"],
"style-src": ["'self'", "example.net"],
},
},
)
tween = config.content_security_policy_tween_factory(handler, registry)
request = pretend.stub(path="/project/foobar/")
assert tween(request) is response
assert response.headers == {
"Content-Security-Policy":
"default-src *; style-src 'self' example.net",
}
def test_csp_policy_debug_disables(self):
response = pretend.stub(headers={})
handler = pretend.call_recorder(lambda request: response)
registry = pretend.stub(
settings={
"csp": {
"default-src": ["*"],
"style-src": ["'self'", "example.net"],
},
},
)
tween = config.content_security_policy_tween_factory(handler, registry)
request = pretend.stub(path="/_debug_toolbar/foo/")
assert tween(request) is response
assert response.headers == {}
class TestRequireHTTPSTween:
def test_noops_when_disabled(self):
handler = pretend.stub()
registry = pretend.stub(
settings=pretend.stub(
get=pretend.call_recorder(lambda k, v: False),
),
)
assert config.require_https_tween_factory(handler, registry) is handler
assert registry.settings.get.calls == [
pretend.call("enforce_https", True),
]
@pytest.mark.parametrize(
("params", "scheme"),
[
({}, "https"),
({":action": "thing"}, "https"),
({}, "http"),
],
)
def test_allows_through(self, params, scheme):
request = pretend.stub(params=params, scheme=scheme)
response = pretend.stub()
handler = pretend.call_recorder(lambda req: response)
registry = pretend.stub(
settings=pretend.stub(
get=lambda k, v: True,
),
)
tween = config.require_https_tween_factory(handler, registry)
assert tween(request) is response
assert handler.calls == [pretend.call(request)]
@pytest.mark.parametrize(
("params", "scheme"),
[
({":action": "thing"}, "http"),
],
)
def test_rejects(self, params, scheme):
request = pretend.stub(params=params, scheme=scheme)
handler = pretend.stub()
registry = pretend.stub(
settings=pretend.stub(
get=lambda k, v: True,
),
)
tween = config.require_https_tween_factory(handler, registry)
resp = tween(request)
assert resp.status == "403 SSL is required"
assert resp.headers["X-Fastly-Error"] == "803"
assert resp.content_type == "text/plain"
assert resp.body == b"SSL is required."
@pytest.mark.parametrize(
("path", "expected"),
[
("/foo/bar/", True),
("/static/wat/", False),
("/_debug_toolbar/thing/", False),
],
)
def test_activate_hook(path, expected):
request = pretend.stub(path=path)
assert config.activate_hook(request) == expected
@pytest.mark.parametrize(
("environ", "name", "envvar", "coercer", "default", "expected"),
[
({}, "test.foo", "TEST_FOO", None, None, {}),
(
{"TEST_FOO": "bar"}, "test.foo", "TEST_FOO", None, None,
{"test.foo": "bar"},
),
(
{"TEST_INT": "1"}, "test.int", "TEST_INT", int, None,
{"test.int": 1},
),
({}, "test.foo", "TEST_FOO", None, "lol", {"test.foo": "lol"}),
(
{"TEST_FOO": "bar"}, "test.foo", "TEST_FOO", None, "lol",
{"test.foo": "bar"},
),
],
)
def test_maybe_set(monkeypatch, environ, name, envvar, coercer, default,
expected):
for key, value in environ.items():
monkeypatch.setenv(key, value)
settings = {}
config.maybe_set(settings, name, envvar, coercer=coercer, default=default)
assert settings == expected
@pytest.mark.parametrize(
("environ", "base", "name", "envvar", "expected"),
[
({}, "test", "foo", "TEST_FOO", {}),
({"TEST_FOO": "bar"}, "test", "foo", "TEST_FOO", {"test.foo": "bar"}),
(
{"TEST_FOO": "bar thing=other"}, "test", "foo", "TEST_FOO",
{"test.foo": "bar", "test.thing": "other"},
),
(
{"TEST_FOO": "bar thing=other wat=\"one two\""},
"test", "foo", "TEST_FOO",
{"test.foo": "bar", "test.thing": "other", "test.wat": "one two"},
),
],
)
def test_maybe_set_compound(monkeypatch, environ, base, name, envvar,
expected):
for key, value in environ.items():
monkeypatch.setenv(key, value)
settings = {}
config.maybe_set_compound(settings, base, name, envvar)
assert settings == expected
@pytest.mark.parametrize("factory", [None, pretend.stub()])
def test_find_service_factory(monkeypatch, factory):
context_iface = pretend.stub()
provided_by = pretend.call_recorder(lambda context: context_iface)
monkeypatch.setattr(zope.interface, "providedBy", provided_by)
config_or_request = pretend.stub(
registry=pretend.stub(
adapters=pretend.stub(
lookup=pretend.call_recorder(lambda *a, **kw: factory),
),
),
)
if factory is None:
with pytest.raises(ValueError):
config.find_service_factory(config_or_request)
else:
assert config.find_service_factory(config_or_request) is factory
@pytest.mark.parametrize(
("settings", "environment", "other_settings"),
[
(None, config.Environment.production, {}),
({}, config.Environment.production, {}),
(
{"my settings": "the settings value"},
config.Environment.production,
{},
),
(None, config.Environment.development, {}),
({}, config.Environment.development, {}),
(
{"my settings": "the settings value"},
config.Environment.development,
{},
),
(None, config.Environment.production, {"warehouse.theme": "my_theme"}),
],
)
def test_configure(monkeypatch, settings, environment, other_settings):
json_renderer_obj = pretend.stub()
json_renderer_cls = pretend.call_recorder(lambda **kw: json_renderer_obj)
monkeypatch.setattr(renderers, "JSON", json_renderer_cls)
xmlrpc_renderer_obj = pretend.stub()
xmlrpc_renderer_cls = pretend.call_recorder(
lambda **kw: xmlrpc_renderer_obj
)
monkeypatch.setattr(config, "XMLRPCRenderer", xmlrpc_renderer_cls)
if environment == config.Environment.development:
monkeypatch.setenv("WAREHOUSE_ENV", "development")
class FakeRegistry(dict):
def __init__(self):
self.settings = {
"warehouse.token": "insecure token",
"warehouse.env": environment,
"camo.url": "http://camo.example.com/",
"pyramid.reload_assets": False,
"dirs.packages": "/srv/data/pypi/packages/",
}
configurator_settings = other_settings.copy()
configurator_obj = pretend.stub(
registry=FakeRegistry(),
include=pretend.call_recorder(lambda include: None),
add_directive=pretend.call_recorder(lambda name, fn: None),
add_wsgi_middleware=pretend.call_recorder(lambda m, *a, **kw: None),
add_renderer=pretend.call_recorder(lambda name, renderer: None),
add_request_method=pretend.call_recorder(lambda fn: None),
add_jinja2_renderer=pretend.call_recorder(lambda renderer: None),
add_jinja2_search_path=pretend.call_recorder(lambda path, name: None),
get_settings=lambda: configurator_settings,
add_settings=pretend.call_recorder(
lambda d: configurator_settings.update(d)
),
add_tween=pretend.call_recorder(lambda tween_factory: None),
add_static_view=pretend.call_recorder(lambda name, path, **kw: None),
scan=pretend.call_recorder(lambda ignore: None),
)
configurator_cls = pretend.call_recorder(lambda settings: configurator_obj)
monkeypatch.setattr(config, "Configurator", configurator_cls)
cachebuster_obj = pretend.stub()
cachebuster_cls = pretend.call_recorder(lambda p, reload: cachebuster_obj)
monkeypatch.setattr(config, "ManifestCacheBuster", cachebuster_cls)
transaction_manager = pretend.stub()
transaction = pretend.stub(
TransactionManager=pretend.call_recorder(lambda: transaction_manager),
)
monkeypatch.setattr(config, "transaction", transaction)
result = config.configure(settings=settings)
expected_settings = {
"warehouse.env": environment,
"warehouse.commit": None,
"site.name": "Warehouse",
}
if environment == config.Environment.development:
expected_settings.update({
"enforce_https": False,
"pyramid.reload_templates": True,
"pyramid.reload_assets": True,
"pyramid.prevent_http_cache": True,
"debugtoolbar.hosts": ["0.0.0.0/0"],
"debugtoolbar.panels": [
"pyramid_debugtoolbar.panels.versions.VersionDebugPanel",
"pyramid_debugtoolbar.panels.settings.SettingsDebugPanel",
"pyramid_debugtoolbar.panels.headers.HeaderDebugPanel",
(
"pyramid_debugtoolbar.panels.request_vars."
"RequestVarsDebugPanel"
),
"pyramid_debugtoolbar.panels.renderings.RenderingsDebugPanel",
"pyramid_debugtoolbar.panels.logger.LoggingPanel",
(
"pyramid_debugtoolbar.panels.performance."
"PerformanceDebugPanel"
),
"pyramid_debugtoolbar.panels.routes.RoutesDebugPanel",
"pyramid_debugtoolbar.panels.sqla.SQLADebugPanel",
"pyramid_debugtoolbar.panels.tweens.TweensDebugPanel",
(
"pyramid_debugtoolbar.panels.introspection."
"IntrospectionDebugPanel"
),
],
})
if settings is not None:
expected_settings.update(settings)
assert configurator_cls.calls == [pretend.call(settings=expected_settings)]
assert result is configurator_obj
assert configurator_obj.add_wsgi_middleware.calls == [
pretend.call(ProxyFixer, token="insecure token"),
pretend.call(VhmRootRemover),
]
assert configurator_obj.include.calls == (
[
pretend.call(x) for x in [
(
"pyramid_debugtoolbar"
if environment == config.Environment.development else None
),
]
if x is not None
]
+
[
pretend.call(".logging"),
pretend.call("pyramid_jinja2"),
pretend.call("pyramid_tm"),
pretend.call("pyramid_services"),
pretend.call("pyramid_rpc.xmlrpc"),
pretend.call(".legacy.action_routing"),
pretend.call(".i18n"),
pretend.call(".db"),
pretend.call(".search"),
pretend.call(".aws"),
pretend.call(".celery"),
pretend.call(".sessions"),
pretend.call(".cache.http"),
pretend.call(".cache.origin"),
pretend.call(".csrf"),
pretend.call(".accounts"),
pretend.call(".packaging"),
pretend.call(".redirects"),
pretend.call(".routes"),
pretend.call(".raven"),
]
+
[
pretend.call(x) for x in [
configurator_settings.get("warehouse.theme"),
]
if x
]
)
assert configurator_obj.add_jinja2_renderer.calls == [
pretend.call(".html"),
pretend.call(".txt"),
pretend.call(".xml"),
]
assert configurator_obj.add_jinja2_search_path.calls == [
pretend.call("warehouse:templates", name=".html"),
pretend.call("warehouse:templates", name=".txt"),
pretend.call("warehouse:templates", name=".xml"),
]
assert configurator_obj.add_settings.calls == [
pretend.call({"jinja2.newstyle": True}),
pretend.call({
"tm.attempts": 3,
"tm.manager_hook": mock.ANY,
"tm.activate_hook": config.activate_hook,
"tm.annotate_user": False,
}),
pretend.call({
"csp": {
"connect-src": ["'self'"],
"default-src": ["'none'"],
"font-src": ["'self'", "fonts.gstatic.com"],
"frame-ancestors": ["'none'"],
"img-src": [
"'self'",
"http://camo.example.com/",
"https://secure.gravatar.com",
],
"referrer": ["origin-when-cross-origin"],
"reflected-xss": ["block"],
"report-uri": [None],
"script-src": ["'self'"],
"style-src": ["'self'", "fonts.googleapis.com"],
},
}),
]
add_settings_dict = configurator_obj.add_settings.calls[1].args[0]
assert add_settings_dict["tm.manager_hook"](pretend.stub()) is \
transaction_manager
assert configurator_obj.add_directive.calls == [
pretend.call("find_service_factory", config.find_service_factory),
]
assert configurator_obj.add_request_method.calls == [
pretend.call(config.find_service_factory),
]
assert configurator_obj.add_tween.calls == [
pretend.call("warehouse.config.content_security_policy_tween_factory"),
pretend.call("warehouse.config.require_https_tween_factory"),
]
assert configurator_obj.add_static_view.calls == [
pretend.call(
name="static",
path="warehouse:static/dist/",
cache_max_age=0,
cachebust=cachebuster_obj,
),
pretend.call(name="locales", path="warehouse:locales/"),
]
assert cachebuster_cls.calls == [
pretend.call("warehouse:static/dist/manifest.json", reload=False),
]
assert configurator_obj.scan.calls == [
pretend.call(ignore=["warehouse.migrations.env", "warehouse.wsgi"]),
]
assert configurator_obj.add_renderer.calls == [
pretend.call("json", json_renderer_obj),
pretend.call("xmlrpc", xmlrpc_renderer_obj),
]
assert json_renderer_cls.calls == [
pretend.call(sort_keys=True, separators=(",", ":")),
]
assert xmlrpc_renderer_cls.calls == [pretend.call(allow_none=True)]
| apache-2.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_toolkits/axisartist/angle_helper.py | 6 | 14365 | from __future__ import print_function
from math import floor
import numpy as np
import math
A = np.array
from mpl_toolkits.axisartist.grid_finder import ExtremeFinderSimple
def select_step_degree(dv):
degree_limits_ = [1.5, 3, 7, 13, 20, 40, 70, 120, 270, 520]
degree_steps_ = [ 1, 2, 5, 10, 15, 30, 45, 90, 180, 360]
degree_factors = [1.] * len(degree_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 8, 11, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 5, 10, 15, 20, 30]
minute_limits_ = A(minsec_limits_)*(1./60.)
minute_factors = [60.] * len(minute_limits_)
second_limits_ = A(minsec_limits_)*(1./3600.)
second_factors = [3600.] * len(second_limits_)
degree_limits = np.concatenate([second_limits_,
minute_limits_,
degree_limits_])
degree_steps = np.concatenate([minsec_steps_,
minsec_steps_,
degree_steps_])
degree_factors = np.concatenate([second_factors,
minute_factors,
degree_factors])
n = degree_limits.searchsorted(dv)
step = degree_steps[n]
factor = degree_factors[n]
return step, factor
def select_step_hour(dv):
hour_limits_ = [1.5, 2.5, 3.5, 5, 7, 10, 15, 21, 36]
hour_steps_ = [1, 2 , 3, 4, 6, 8, 12, 18, 24]
hour_factors = [1.] * len(hour_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 4.5, 5.5, 8, 11, 14, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30]
minute_limits_ = A(minsec_limits_)*(1./60.)
minute_factors = [60.] * len(minute_limits_)
second_limits_ = A(minsec_limits_)*(1./3600.)
second_factors = [3600.] * len(second_limits_)
hour_limits = np.concatenate([second_limits_,
minute_limits_,
hour_limits_])
hour_steps = np.concatenate([minsec_steps_,
minsec_steps_,
hour_steps_])
hour_factors = np.concatenate([second_factors,
minute_factors,
hour_factors])
n = hour_limits.searchsorted(dv)
step = hour_steps[n]
factor = hour_factors[n]
return step, factor
def select_step_sub(dv):
# subarcsec or degree
tmp = 10.**(int(math.log10(dv))-1.)
factor = 1./tmp
if 1.5*tmp >= dv:
step = 1
elif 3.*tmp >= dv:
step = 2
elif 7.*tmp >= dv:
step = 5
else:
step = 1
factor = 0.1*factor
return step, factor
def select_step(v1, v2, nv, hour=False, include_last=True,
threshold_factor=3600.):
if v1 > v2:
v1, v2 = v2, v1
dv = float(v2 - v1) / nv
if hour:
_select_step = select_step_hour
cycle = 24.
else:
_select_step = select_step_degree
cycle = 360.
# for degree
if dv > 1./threshold_factor:
#print "degree"
step, factor = _select_step(dv)
else:
step, factor = select_step_sub(dv*threshold_factor)
#print "feac", step, factor
factor = factor * threshold_factor
f1, f2, fstep = v1*factor, v2*factor, step/factor
levs = np.arange(math.floor(f1/step), math.ceil(f2/step)+0.5,
1, dtype="i") * step
# n : number of valid levels. If there is a cycle, e.g., [0, 90, 180,
# 270, 360], the grid line needs to be extended from 0 to 360, so
# we need to return the whole array. However, the last level (360)
# needs to be ignored often. In this case, so we return n=4.
n = len(levs)
# we need to check the range of values
# for example, -90 to 90, 0 to 360,
if factor == 1. and (levs[-1] >= levs[0]+cycle): # check for cycle
nv = int(cycle / step)
if include_last:
levs = levs[0] + np.arange(0, nv+1, 1) * step
else:
levs = levs[0] + np.arange(0, nv, 1) * step
n = len(levs)
return np.array(levs), n, factor
def select_step24(v1, v2, nv, include_last=True, threshold_factor=3600):
v1, v2 = v1/15., v2/15.
levs, n, factor = select_step(v1, v2, nv, hour=True,
include_last=include_last,
threshold_factor=threshold_factor)
return levs*15., n, factor
def select_step360(v1, v2, nv, include_last=True, threshold_factor=3600):
return select_step(v1, v2, nv, hour=False,
include_last=include_last,
threshold_factor=threshold_factor)
class LocatorBase(object):
def __init__(self, den, include_last=True):
self.den = den
self._include_last = include_last
def _get_nbins(self):
return self.den
def _set_nbins(self, v):
self.den = v
nbins = property(_get_nbins, _set_nbins)
def set_params(self, **kwargs):
if "nbins" in kwargs:
self.den = int(kwargs.pop("nbins"))
if kwargs:
raise ValueError("Following keys are not processed: %s" % \
", ".join([str(k) for k in kwargs.keys()]))
class LocatorHMS(LocatorBase):
def __call__(self, v1, v2):
return select_step24(v1, v2, self.den, self._include_last)
class LocatorHM(LocatorBase):
def __call__(self, v1, v2):
return select_step24(v1, v2, self.den, self._include_last,
threshold_factor=60)
class LocatorH(LocatorBase):
def __call__(self, v1, v2):
return select_step24(v1, v2, self.den, self._include_last,
threshold_factor=1)
class LocatorDMS(LocatorBase):
def __call__(self, v1, v2):
return select_step360(v1, v2, self.den, self._include_last)
class LocatorDM(LocatorBase):
def __call__(self, v1, v2):
return select_step360(v1, v2, self.den, self._include_last,
threshold_factor=60)
class LocatorD(LocatorBase):
def __call__(self, v1, v2):
return select_step360(v1, v2, self.den, self._include_last,
threshold_factor=1)
class FormatterDMS(object):
deg_mark = "^{\circ}"
min_mark = "^{\prime}"
sec_mark = "^{\prime\prime}"
fmt_d = "$%d"+deg_mark+"$"
fmt_ds = r"$%d.\!\!"+deg_mark+"%s$"
# %s for signe
fmt_d_m = r"$%s%d"+deg_mark+"\,%02d"+min_mark+"$"
fmt_d_ms = r"$%s%d"+deg_mark+"\,%02d.\mkern-4mu"+min_mark+"%s$"
fmt_d_m_partial = "$%s%d"+deg_mark+"\,%02d"+min_mark+"\,"
fmt_s_partial = "%02d"+sec_mark+"$"
fmt_ss_partial = "%02d.\!\!"+sec_mark+"%s$"
def _get_number_fraction(self, factor):
## check for fractional numbers
number_fraction = None
# check for 60
for threshold in [1, 60, 3600]:
if factor <= threshold:
break
d = factor // threshold
int_log_d = int(floor(math.log10(d)))
if 10**int_log_d == d and d!=1:
number_fraction = int_log_d
factor = factor // 10**int_log_d
return factor, number_fraction
return factor, number_fraction
def __call__(self, direction, factor, values):
if len(values) == 0:
return []
#ss = [[-1, 1][v>0] for v in values] #not py24 compliant
values = np.asarray(values)
ss = np.where(values>0, 1, -1)
sign_map = {(-1, True):"-"}
signs = [sign_map.get((s, v!=0), "") for s, v in zip(ss, values)]
factor, number_fraction = self._get_number_fraction(factor)
values = np.abs(values)
if number_fraction is not None:
values, frac_part = divmod(values, 10**number_fraction)
frac_fmt = "%%0%dd" % (number_fraction,)
frac_str = [frac_fmt % (f1,) for f1 in frac_part]
if factor == 1:
if number_fraction is None:
return [self.fmt_d % (s*int(v),) for (s, v) in zip(ss, values)]
else:
return [self.fmt_ds % (s*int(v), f1) for (s, v, f1) in \
zip(ss, values, frac_str)]
elif factor == 60:
deg_part, min_part = divmod(values, 60)
if number_fraction is None:
return [self.fmt_d_m % (s1, d1, m1) \
for s1, d1, m1 in zip(signs, deg_part, min_part)]
else:
return [self.fmt_d_ms % (s, d1, m1, f1) \
for s, d1, m1, f1 in zip(signs, deg_part, min_part, frac_str)]
elif factor == 3600:
if ss[-1] == -1:
inverse_order = True
values = values[::-1]
sings = signs[::-1]
else:
inverse_order = False
l_hm_old = ""
r = []
deg_part, min_part_ = divmod(values, 3600)
min_part, sec_part = divmod(min_part_, 60)
if number_fraction is None:
sec_str = [self.fmt_s_partial % (s1,) for s1 in sec_part]
else:
sec_str = [self.fmt_ss_partial % (s1, f1) for s1, f1 in zip(sec_part, frac_str)]
for s, d1, m1, s1 in zip(signs, deg_part, min_part, sec_str):
l_hm = self.fmt_d_m_partial % (s, d1, m1)
if l_hm != l_hm_old:
l_hm_old = l_hm
l = l_hm + s1 #l_s
else:
l = "$"+s1 #l_s
r.append(l)
if inverse_order:
return r[::-1]
else:
return r
else: # factor > 3600.
return [r"$%s^{\circ}$" % (str(v),) for v in ss*values]
class FormatterHMS(FormatterDMS):
deg_mark = "^\mathrm{h}"
min_mark = "^\mathrm{m}"
sec_mark = "^\mathrm{s}"
fmt_d = "$%d"+deg_mark+"$"
fmt_ds = r"$%d.\!\!"+deg_mark+"%s$"
# %s for signe
fmt_d_m = r"$%s%d"+deg_mark+"\,%02d"+min_mark+"$"
fmt_d_ms = r"$%s%d"+deg_mark+"\,%02d.\!\!"+min_mark+"%s$"
fmt_d_m_partial = "$%s%d"+deg_mark+"\,%02d"+min_mark+"\,"
fmt_s_partial = "%02d"+sec_mark+"$"
fmt_ss_partial = "%02d.\!\!"+sec_mark+"%s$"
def __call__(self, direction, factor, values): # hour
return FormatterDMS.__call__(self, direction, factor, np.asarray(values)/15.)
class ExtremeFinderCycle(ExtremeFinderSimple):
"""
When there is a cycle, e.g., longitude goes from 0-360.
"""
def __init__(self,
nx, ny,
lon_cycle = 360.,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (-90, 90)
):
#self.transfrom_xy = transform_xy
#self.inv_transfrom_xy = inv_transform_xy
self.nx, self.ny = nx, ny
self.lon_cycle, self.lat_cycle = lon_cycle, lat_cycle
self.lon_minmax = lon_minmax
self.lat_minmax = lat_minmax
def __call__(self, transform_xy, x1, y1, x2, y2):
"""
get extreme values.
x1, y1, x2, y2 in image coordinates (0-based)
nx, ny : number of divisions in each axis
"""
x_, y_ = np.linspace(x1, x2, self.nx), np.linspace(y1, y2, self.ny)
x, y = np.meshgrid(x_, y_)
lon, lat = transform_xy(np.ravel(x), np.ravel(y))
# iron out jumps, but algorithm should be improved.
# Tis is just naive way of doing and my fail for some cases.
if self.lon_cycle is not None:
lon0 = np.nanmin(lon)
lon -= 360. * ((lon - lon0) > 180.)
if self.lat_cycle is not None:
lat0 = np.nanmin(lat)
lat -= 360. * ((lat - lat0) > 180.)
lon_min, lon_max = np.nanmin(lon), np.nanmax(lon)
lat_min, lat_max = np.nanmin(lat), np.nanmax(lat)
lon_min, lon_max, lat_min, lat_max = \
self._adjust_extremes(lon_min, lon_max, lat_min, lat_max)
return lon_min, lon_max, lat_min, lat_max
def _adjust_extremes(self, lon_min, lon_max, lat_min, lat_max):
lon_min, lon_max, lat_min, lat_max = \
self._add_pad(lon_min, lon_max, lat_min, lat_max)
# check cycle
if self.lon_cycle:
lon_max = min(lon_max, lon_min + self.lon_cycle)
if self.lat_cycle:
lat_max = min(lat_max, lat_min + self.lat_cycle)
if self.lon_minmax is not None:
min0 = self.lon_minmax[0]
lon_min = max(min0, lon_min)
max0 = self.lon_minmax[1]
lon_max = min(max0, lon_max)
if self.lat_minmax is not None:
min0 = self.lat_minmax[0]
lat_min = max(min0, lat_min)
max0 = self.lat_minmax[1]
lat_max = min(max0, lat_max)
return lon_min, lon_max, lat_min, lat_max
if __name__ == "__main__":
#test2()
#print select_step360(21.2, 33.3, 5)
#print select_step360(20+21.2/60., 21+33.3/60., 5)
#print select_step360(20.5+21.2/3600., 20.5+33.3/3600., 5)
# test threshold factor
print(select_step360(20.5+11.2/3600., 20.5+53.3/3600., 5,
threshold_factor=60))
print(select_step360(20.5+11.2/3600., 20.5+53.3/3600., 5,
threshold_factor=1))
fmt = FormatterDMS()
#print fmt("left", 60, [0, -30, -60])
print(fmt("left", 600, [12301, 12302, 12303]))
print(select_step360(20.5+21.2/3600., 20.5+21.4/3600., 5))
print(fmt("left", 36000, [738210, 738215, 738220]))
print(fmt("left", 360000, [7382120, 7382125, 7382130]))
print(fmt("left", 1., [45, 46, 47]))
print(fmt("left", 10., [452, 453, 454]))
if 0:
print(select_step360(20+21.2/60., 21+33.3/60., 5))
print(select_step360(20.5+21.2/3600., 20.5+33.3/3600., 5))
print(select_step360(20+21.2/60., 20+53.3/60., 5))
###
levs, n, factor = select_step360(20.5+21.2/3600., 20.5+27.25/3600., 5)
levs = levs * 0.1
fmt = FormatterDMS()
#print fmt("left", 60, [0, -30, -60])
print(fmt("left", factor, levs))
print(select_step(-180, 180, 10, hour=False))
print(select_step(-12, 12, 10, hour=True))
fmt = FormatterDMS()
#print fmt("left", 60, [0, -30, -60])
print(fmt("left", 3600, [0, -30, -60]))
| mit |
gangadharkadam/johnfrappe | frappe/core/doctype/customize_form/test_customize_form.py | 18 | 6744 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, unittest, json
from frappe.test_runner import make_test_records_for_doctype
test_dependencies = ["Custom Field", "Property Setter"]
class TestCustomizeForm(unittest.TestCase):
def insert_custom_field(self):
frappe.get_doc({
"doctype": "Custom Field",
"dt": "User",
"label": "Test Custom Field",
"description": "A Custom Field for Testing",
"fieldtype": "Select",
"in_list_view": 1,
"options": "\nCustom 1\nCustom 2\nCustom 3",
"default": "Custom 3"
}).insert()
def setUp(self):
self.insert_custom_field()
frappe.db.commit()
frappe.clear_cache(doctype="User")
def tearDown(self):
frappe.delete_doc("Custom Field", "User-test_custom_field")
frappe.db.commit()
frappe.clear_cache(doctype="User")
def get_customize_form(self, doctype=None):
d = frappe.get_doc("Customize Form")
if doctype:
d.doc_type = doctype
d.run_method("fetch_to_customize")
return d
def test_fetch_to_customize(self):
d = self.get_customize_form()
self.assertEquals(d.doc_type, None)
self.assertEquals(len(d.get("customize_form_fields")), 0)
d = self.get_customize_form("Event")
self.assertEquals(d.doc_type, "Event")
self.assertEquals(len(d.get("customize_form_fields")), 30)
d = self.get_customize_form("User")
self.assertEquals(d.doc_type, "User")
self.assertEquals(len(d.get("customize_form_fields")), 55)
self.assertEquals(d.get("customize_form_fields")[-1].fieldname, "test_custom_field")
self.assertEquals(d.get("customize_form_fields", {"fieldname": "location"})[0].in_list_view, 1)
return d
def test_save_customization_idx(self):
d = self.get_customize_form("User")
original_sequence = [df.fieldname for df in d.get("customize_form_fields")]
# move field to last
location_field = d.get("customize_form_fields", {"fieldname": "location"})[0]
d.get("customize_form_fields").remove(location_field)
d.append("customize_form_fields", location_field)
d.run_method("save_customization")
frappe.clear_cache(doctype=d.doc_type)
property_setter_name, _idx = frappe.db.get_value("Property Setter",
{"doc_type": d.doc_type, "property": "_idx"}, ("name", "value"))
self.assertTrue(_idx)
_idx = json.loads(_idx)
for i, df in enumerate(frappe.get_meta(d.doc_type).get("fields")):
self.assertEquals(_idx[i], df.fieldname)
frappe.delete_doc("Property Setter", property_setter_name)
frappe.clear_cache(doctype=d.doc_type)
for i, df in enumerate(frappe.get_meta(d.doc_type).get("fields")):
self.assertEquals(original_sequence[i], df.fieldname)
def test_save_customization_property(self):
d = self.get_customize_form("User")
self.assertEquals(frappe.db.get_value("Property Setter",
{"doc_type": "User", "property": "allow_copy"}, "value"), None)
d.allow_copy = 1
d.run_method("save_customization")
self.assertEquals(frappe.db.get_value("Property Setter",
{"doc_type": "User", "property": "allow_copy"}, "value"), '1')
d.allow_copy = 0
d.run_method("save_customization")
self.assertEquals(frappe.db.get_value("Property Setter",
{"doc_type": "User", "property": "allow_copy"}, "value"), None)
def test_save_customization_field_property(self):
d = self.get_customize_form("User")
self.assertEquals(frappe.db.get_value("Property Setter",
{"doc_type": "User", "property": "reqd", "field_name": "location"}, "value"), None)
location_field = d.get("customize_form_fields", {"fieldname": "location"})[0]
location_field.reqd = 1
d.run_method("save_customization")
self.assertEquals(frappe.db.get_value("Property Setter",
{"doc_type": "User", "property": "reqd", "field_name": "location"}, "value"), '1')
location_field = d.get("customize_form_fields", {"fieldname": "location"})[0]
location_field.reqd = 0
d.run_method("save_customization")
self.assertEquals(frappe.db.get_value("Property Setter",
{"doc_type": "User", "property": "reqd", "field_name": "location"}, "value"), '0')
def test_save_customization_custom_field_property(self):
d = self.get_customize_form("User")
self.assertEquals(frappe.db.get_value("Custom Field", "User-test_custom_field", "reqd"), 0)
custom_field = d.get("customize_form_fields", {"fieldname": "test_custom_field"})[0]
custom_field.reqd = 1
d.run_method("save_customization")
self.assertEquals(frappe.db.get_value("Custom Field", "User-test_custom_field", "reqd"), 1)
custom_field = d.get("customize_form_fields", {"is_custom_field": True})[0]
custom_field.reqd = 0
d.run_method("save_customization")
self.assertEquals(frappe.db.get_value("Custom Field", "User-test_custom_field", "reqd"), 0)
def test_save_customization_new_field(self):
d = self.get_customize_form("User")
d.append("customize_form_fields", {
"label": "Test Add Custom Field Via Customize Form",
"fieldtype": "Data",
"__islocal": 1
})
d.run_method("save_customization")
self.assertEquals(frappe.db.get_value("Custom Field",
"User-test_add_custom_field_via_customize_form", "fieldtype"), "Data")
frappe.delete_doc("Custom Field", "User-test_add_custom_field_via_customize_form")
self.assertEquals(frappe.db.get_value("Custom Field",
"User-test_add_custom_field_via_customize_form"), None)
def test_save_customization_remove_field(self):
d = self.get_customize_form("User")
custom_field = d.get("customize_form_fields", {"fieldname": "test_custom_field"})[0]
d.get("customize_form_fields").remove(custom_field)
d.run_method("save_customization")
self.assertEquals(frappe.db.get_value("Custom Field", custom_field.name), None)
frappe.local.test_objects["Custom Field"] = []
make_test_records_for_doctype("Custom Field")
def test_reset_to_defaults(self):
d = frappe.get_doc("Customize Form")
d.doc_type = "User"
d.run_method('reset_to_defaults')
self.assertEquals(d.get("customize_form_fields", {"fieldname": "location"})[0].in_list_view, None)
frappe.local.test_objects["Property Setter"] = []
make_test_records_for_doctype("Property Setter")
def test_set_allow_on_submit(self):
d = self.get_customize_form("User")
d.get("customize_form_fields", {"fieldname": "first_name"})[0].allow_on_submit = 1
d.get("customize_form_fields", {"fieldname": "test_custom_field"})[0].allow_on_submit = 1
d.run_method("save_customization")
d = self.get_customize_form("User")
# don't allow for standard fields
self.assertEquals(d.get("customize_form_fields", {"fieldname": "first_name"})[0].allow_on_submit or 0, 0)
# allow for custom field
self.assertEquals(d.get("customize_form_fields", {"fieldname": "test_custom_field"})[0].allow_on_submit, 1)
| mit |
michalwerner/django-filebrowser-tinymce4 | tests/test_sites.py | 2 | 13320 | # coding: utf-8
from __future__ import with_statement
import os
import json
import shutil
from django.core.urlresolvers import reverse
try:
from django.utils.six.moves.urllib.parse import urlencode
except:
from django.utils.http import urlencode
from mock import patch
from filebrowser.settings import VERSIONS, DEFAULT_PERMISSIONS
from filebrowser.base import FileObject
from filebrowser.sites import site
from tests import FilebrowserTestCase as TestCase
class BrowseViewTests(TestCase):
def setUp(self):
super(BrowseViewTests, self).setUp()
self.url = reverse('filebrowser:fb_browse')
self.client.login(username=self.user.username, password='password')
def test_get(self):
response = self.client.get(self.url)
self.assertTrue(response.status_code == 200)
self.assertTrue('filebrowser/index.html' in [t.name for t in response.templates])
# Check directory was set correctly in the context. If this fails, it may indicate
# that two sites were instantiated with the same name.
self.assertTrue(site.directory == response.context['filebrowser_site'].directory)
def test_ckeditor_params_in_search_form(self):
"""
The CKEditor GET params must be included in the search form as hidden
inputs so they persist after searching.
"""
response = self.client.get(self.url, {
'pop': '3',
'type': 'image',
'CKEditor': 'id_body',
'CKEditorFuncNum': '1',
})
self.assertTrue(response.status_code == 200)
self.assertContains(response, '<input type="hidden" name="pop" value="3" />')
self.assertContains(response, '<input type="hidden" name="type" value="image" />')
self.assertContains(response, '<input type="hidden" name="CKEditor" value="id_body" />')
self.assertContains(response, '<input type="hidden" name="CKEditorFuncNum" value="1" />')
class CreateDirViewTests(TestCase):
def setUp(self):
super(CreateDirViewTests, self).setUp()
self.url = reverse('filebrowser:fb_createdir')
self.client.login(username=self.user.username, password='password')
def test_post(self):
self.assertFalse(site.storage.exists(self.CREATEFOLDER_PATH))
response = self.client.post(self.url, {'name': self.F_CREATEFOLDER.path_relative_directory})
self.assertTrue(response.status_code == 302)
self.assertTrue(site.storage.exists(self.CREATEFOLDER_PATH))
class UploadViewTests(TestCase):
def setUp(self):
super(UploadViewTests, self).setUp()
self.url = reverse('filebrowser:fb_upload')
self.client.login(username=self.user.username, password='password')
def test_get(self):
response = self.client.get(self.url, {'name': self.F_CREATEFOLDER.path_relative_directory})
self.assertTrue(response.status_code == 200)
self.assertTrue('filebrowser/upload.html' in [t.name for t in response.templates])
class UploadFileViewTests(TestCase):
def setUp(self):
super(UploadFileViewTests, self).setUp()
self.url = reverse('filebrowser:fb_do_upload')
self.url_bad_name = '?'.join([self.url, urlencode({'folder': self.F_SUBFOLDER.path_relative_directory, 'qqfile': 'TEST IMAGE 000.jpg'})])
self.client.login(username=self.user.username, password='password')
def test_post(self):
uploaded_path = os.path.join(self.F_SUBFOLDER.path, 'testimage.jpg')
self.assertFalse(site.storage.exists(uploaded_path))
url = '?'.join([self.url, urlencode({'folder': self.F_SUBFOLDER.path_relative_directory})])
with open(self.STATIC_IMG_PATH, "rb") as f:
file_size = os.path.getsize(f.name)
response = self.client.post(url, data={'qqfile': 'testimage.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check we get OK response
self.assertTrue(response.status_code == 200)
data = json.loads(response.content)
self.assertEqual(data["filename"], "testimage.jpg")
self.assertEqual(data["temp_filename"], None)
# Check the file now exists
self.testfile = FileObject(uploaded_path, site=site)
self.assertTrue(site.storage.exists(uploaded_path))
# Check the file has the correct size
self.assertTrue(file_size == site.storage.size(uploaded_path))
# Check permissions
# TODO: break out into separate test
if DEFAULT_PERMISSIONS is not None:
permissions_default = oct(DEFAULT_PERMISSIONS)
permissions_file = oct(os.stat(self.testfile.path_full).st_mode & 0o777)
self.assertTrue(permissions_default == permissions_file)
@patch('filebrowser.sites.UPLOAD_TEMPDIR', '_test/tempfolder')
def test_do_temp_upload(self):
"""
Test the temporary upload (used with the FileBrowseUploadField)
TODO: This is undocumented.
"""
uploaded_path = os.path.join(self.F_TEMPFOLDER.path, 'testimage.jpg')
self.assertFalse(site.storage.exists(uploaded_path))
# TODO: Why is folder required to be temp? Shouldn't it use tempfolder
# regardless of what is specified?
url = reverse('filebrowser:fb_do_upload')
url = '?'.join([url, urlencode({'folder': self.F_TEMPFOLDER.path_relative_directory, 'qqfile': 'testimage.jpg', 'temporary': 'true'})])
with open(self.STATIC_IMG_PATH, "rb") as f:
file_size = os.path.getsize(f.name)
response = self.client.post(url, data={'qqfile': 'testimage.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check we get OK response
self.assertTrue(response.status_code == 200)
data = json.loads(response.content)
self.assertEqual(data["filename"], "testimage.jpg")
self.assertEqual(data["temp_filename"], os.path.join(self.F_TEMPFOLDER.path_relative_directory, "testimage.jpg"))
# Check the file now exists
self.testfile = FileObject(uploaded_path, site=site)
self.assertTrue(site.storage.exists(uploaded_path))
# Check the file has the correct size
self.assertTrue(file_size == site.storage.size(uploaded_path))
# Check permissions
if DEFAULT_PERMISSIONS is not None:
permissions_default = oct(DEFAULT_PERMISSIONS)
permissions_file = oct(os.stat(self.testfile.path_full).st_mode & 0o777)
self.assertTrue(permissions_default == permissions_file)
@patch('filebrowser.sites.OVERWRITE_EXISTING', True)
def test_overwrite_existing_true(self):
shutil.copy(self.STATIC_IMG_PATH, self.SUBFOLDER_PATH)
self.assertEqual(site.storage.listdir(self.F_SUBFOLDER), ([], [u'testimage.jpg']))
url = '?'.join([self.url, urlencode({'folder': self.F_SUBFOLDER.path_relative_directory})])
with open(self.STATIC_IMG_PATH, "rb") as f:
self.client.post(url, data={'qqfile': 'testimage.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(site.storage.listdir(self.F_SUBFOLDER), ([], [u'testimage.jpg']))
@patch('filebrowser.sites.OVERWRITE_EXISTING', False)
def test_overwrite_existing_false(self):
shutil.copy(self.STATIC_IMG_PATH, self.SUBFOLDER_PATH)
self.assertEqual(site.storage.listdir(self.F_SUBFOLDER), ([], [u'testimage.jpg']))
url = '?'.join([self.url, urlencode({'folder': self.F_SUBFOLDER.path_relative_directory})])
with open(self.STATIC_IMG_PATH, "rb") as f:
self.client.post(url, data={'qqfile': 'testimage.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(len(site.storage.listdir(self.F_SUBFOLDER)[1]), 2)
@patch('filebrowser.utils.CONVERT_FILENAME', False)
@patch('filebrowser.utils.NORMALIZE_FILENAME', False)
def test_convert_false_normalize_false(self):
with open(self.STATIC_IMG_BAD_NAME_PATH, "rb") as f:
self.client.post(self.url_bad_name, data={'qqfile': 'TEST IMAGE 000.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(site.storage.listdir(self.F_SUBFOLDER), ([], [u'TEST IMAGE 000.jpg']))
@patch('filebrowser.utils.CONVERT_FILENAME', True)
@patch('filebrowser.utils.NORMALIZE_FILENAME', False)
def test_convert_true_normalize_false(self):
with open(self.STATIC_IMG_BAD_NAME_PATH, "rb") as f:
self.client.post(self.url_bad_name, data={'qqfile': 'TEST IMAGE 000.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(site.storage.listdir(self.F_SUBFOLDER), ([], [u'test_image_000.jpg']))
@patch('filebrowser.utils.CONVERT_FILENAME', False)
@patch('filebrowser.utils.NORMALIZE_FILENAME', True)
def test_convert_false_normalize_true(self):
with open(self.STATIC_IMG_BAD_NAME_PATH, "rb") as f:
self.client.post(self.url_bad_name, data={'qqfile': 'TEST IMAGE 000.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(site.storage.listdir(self.F_SUBFOLDER), ([], [u'TEST IMAGE 000.jpg']))
@patch('filebrowser.utils.CONVERT_FILENAME', True)
@patch('filebrowser.utils.NORMALIZE_FILENAME', True)
def test_convert_true_normalize_true(self):
with open(self.STATIC_IMG_BAD_NAME_PATH, "rb") as f:
self.client.post(self.url_bad_name, data={'qqfile': 'TEST IMAGE 000.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(site.storage.listdir(self.F_SUBFOLDER), ([], [u'test_image_000.jpg']))
class DetailViewTests(TestCase):
def setUp(self):
super(DetailViewTests, self).setUp()
self.url = reverse('filebrowser:fb_detail')
self.client.login(username=self.user.username, password='password')
shutil.copy(self.STATIC_IMG_PATH, self.FOLDER_PATH)
def test_get(self):
""" Check the detail view and version generation. Check also renaming of files. """
response = self.client.get(self.url, {'dir': self.F_IMAGE.dirname, 'filename': self.F_IMAGE.filename})
self.assertTrue(response.status_code == 200)
# At this moment all versions should be generated. Check that.
pre_rename_versions = []
for version_suffix in VERSIONS:
path = self.F_IMAGE.version_path(version_suffix)
pre_rename_versions.append(path)
self.assertTrue(site.storage.exists(path))
# Attemp renaming the file
url = '?'.join([self.url, urlencode({'dir': self.F_IMAGE.dirname, 'filename': self.F_IMAGE.filename})])
response = self.client.post(url, {'name': 'testpic.jpg'})
# Check we get 302 response for renaming
self.assertTrue(response.status_code == 302)
# Check the file was renamed correctly:
self.assertTrue(site.storage.exists(os.path.join(self.F_IMAGE.head, 'testpic.jpg')))
# Store the renamed file
self.F_IMAGE = FileObject(os.path.join(self.F_IMAGE.head, 'testpic.jpg'), site=site)
# Check if all pre-rename versions were deleted:
for path in pre_rename_versions:
self.assertFalse(site.storage.exists(path))
# Check if all post–rename versions were deleted (resp. not being generated):
for version_suffix in VERSIONS:
path = self.F_IMAGE.version_path(version_suffix)
self.assertFalse(site.storage.exists(path))
class DeleteConfirmViewTests(TestCase):
def setUp(self):
super(DeleteConfirmViewTests, self).setUp()
self.url = reverse('filebrowser:fb_delete_confirm')
self.client.login(username=self.user.username, password='password')
shutil.copy(self.STATIC_IMG_PATH, self.FOLDER_PATH)
def test_get(self):
""" Check that the delete view functions as expected. Does not check the deletion itself, that happens in test_delete(). """
response = self.client.get(self.url, {'dir': self.F_IMAGE.dirname, 'filename': self.F_IMAGE.filename})
self.assertTrue(response.status_code == 200)
self.assertTrue('filebrowser/delete_confirm.html' in [t.name for t in response.templates])
class DeleteViewTests(TestCase):
def setUp(self):
super(DeleteViewTests, self).setUp()
self.url = reverse('filebrowser:fb_delete')
self.client.login(username=self.user.username, password='password')
shutil.copy(self.STATIC_IMG_PATH, self.FOLDER_PATH)
def test_get(self):
"""
Generate all versions for the uploaded file and attempt a deletion of that file.
Finally, attempt a deletion of the tmp dir.
"""
versions = []
for version_suffix in VERSIONS:
versions.append(self.F_IMAGE.version_generate(version_suffix))
# Request the delete view
response = self.client.get(self.url, {'dir': self.F_IMAGE.dirname, 'filename': self.F_IMAGE.filename})
# Check we get 302 response for delete
self.assertTrue(response.status_code == 302)
# Check the file and its versions do not exist anymore
self.assertFalse(site.storage.exists(self.F_IMAGE.path))
for version in versions:
self.assertFalse(site.storage.exists(version.path))
| bsd-3-clause |
av8ramit/tensorflow | tensorflow/contrib/boosted_trees/python/ops/split_handler_ops.py | 78 | 1122 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Split handler custom ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.boosted_trees.python.ops import boosted_trees_ops_loader
# pylint: enable=unused-import
# pylint: disable=wildcard-import
from tensorflow.contrib.boosted_trees.python.ops.gen_split_handler_ops import *
# pylint: enable=wildcard-import
| apache-2.0 |
okwow123/djangol2 | example/env/lib/python2.7/site-packages/allauth/socialaccount/adapter.py | 6 | 7658 | from __future__ import absolute_import
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from . import app_settings
from ..account import app_settings as account_settings
from ..account.adapter import get_adapter as get_account_adapter
from ..account.app_settings import EmailVerificationMethod
from ..account.models import EmailAddress
from ..account.utils import user_email, user_field, user_username
from ..compat import is_authenticated, reverse
from ..utils import (
deserialize_instance,
email_address_exists,
import_attribute,
serialize_instance,
valid_email_or_none,
)
class DefaultSocialAccountAdapter(object):
error_messages = {
'email_taken':
_("An account already exists with this e-mail address."
" Please sign in to that account first, then connect"
" your %s account.")
}
def __init__(self, request=None):
self.request = request
def pre_social_login(self, request, sociallogin):
"""
Invoked just after a user successfully authenticates via a
social provider, but before the login is actually processed
(and before the pre_social_login signal is emitted).
You can use this hook to intervene, e.g. abort the login by
raising an ImmediateHttpResponse
Why both an adapter hook and the signal? Intervening in
e.g. the flow from within a signal handler is bad -- multiple
handlers may be active and are executed in undetermined order.
"""
pass
def authentication_error(self,
request,
provider_id,
error=None,
exception=None,
extra_context=None):
"""
Invoked when there is an error in the authentication cycle. In this
case, pre_social_login will not be reached.
You can use this hook to intervene, e.g. redirect to an
educational flow by raising an ImmediateHttpResponse.
"""
pass
def new_user(self, request, sociallogin):
"""
Instantiates a new User instance.
"""
return get_account_adapter().new_user(request)
def save_user(self, request, sociallogin, form=None):
"""
Saves a newly signed up social login. In case of auto-signup,
the signup form is not available.
"""
u = sociallogin.user
u.set_unusable_password()
if form:
get_account_adapter().save_user(request, u, form)
else:
get_account_adapter().populate_username(request, u)
sociallogin.save(request)
return u
def populate_user(self,
request,
sociallogin,
data):
"""
Hook that can be used to further populate the user instance.
For convenience, we populate several common fields.
Note that the user instance being populated represents a
suggested User instance that represents the social user that is
in the process of being logged in.
The User instance need not be completely valid and conflict
free. For example, verifying whether or not the username
already exists, is not a responsibility.
"""
username = data.get('username')
first_name = data.get('first_name')
last_name = data.get('last_name')
email = data.get('email')
name = data.get('name')
user = sociallogin.user
user_username(user, username or '')
user_email(user, valid_email_or_none(email) or '')
name_parts = (name or '').partition(' ')
user_field(user, 'first_name', first_name or name_parts[0])
user_field(user, 'last_name', last_name or name_parts[2])
return user
def get_connect_redirect_url(self, request, socialaccount):
"""
Returns the default URL to redirect to after successfully
connecting a social account.
"""
assert is_authenticated(request.user)
url = reverse('socialaccount_connections')
return url
def validate_disconnect(self, account, accounts):
"""
Validate whether or not the socialaccount account can be
safely disconnected.
"""
if len(accounts) == 1:
# No usable password would render the local account unusable
if not account.user.has_usable_password():
raise ValidationError(_("Your account has no password set"
" up."))
# No email address, no password reset
if app_settings.EMAIL_VERIFICATION \
== EmailVerificationMethod.MANDATORY:
if EmailAddress.objects.filter(user=account.user,
verified=True).count() == 0:
raise ValidationError(_("Your account has no verified"
" e-mail address."))
def is_auto_signup_allowed(self, request, sociallogin):
# If email is specified, check for duplicate and if so, no auto signup.
auto_signup = app_settings.AUTO_SIGNUP
if auto_signup:
email = user_email(sociallogin.user)
# Let's check if auto_signup is really possible...
if email:
if account_settings.UNIQUE_EMAIL:
if email_address_exists(email):
# Oops, another user already has this address.
# We cannot simply connect this social account
# to the existing user. Reason is that the
# email adress may not be verified, meaning,
# the user may be a hacker that has added your
# email address to their account in the hope
# that you fall in their trap. We cannot
# check on 'email_address.verified' either,
# because 'email_address' is not guaranteed to
# be verified.
auto_signup = False
# FIXME: We redirect to signup form -- user will
# see email address conflict only after posting
# whereas we detected it here already.
elif app_settings.EMAIL_REQUIRED:
# Nope, email is required and we don't have it yet...
auto_signup = False
return auto_signup
def is_open_for_signup(self, request, sociallogin):
"""
Checks whether or not the site is open for signups.
Next to simply returning True/False you can also intervene the
regular flow by raising an ImmediateHttpResponse
"""
return get_account_adapter(request).is_open_for_signup(request)
def get_signup_form_initial_data(self, sociallogin):
user = sociallogin.user
initial = {
'email': user_email(user) or '',
'username': user_username(user) or '',
'first_name': user_field(user, 'first_name') or '',
'last_name': user_field(user, 'last_name') or ''}
return initial
def deserialize_instance(self, model, data):
return deserialize_instance(model, data)
def serialize_instance(self, instance):
return serialize_instance(instance)
def get_adapter(request=None):
return import_attribute(app_settings.ADAPTER)(request)
| mit |
elijah513/django | django/utils/translation/__init__.py | 108 | 7111 | """
Internationalization support.
"""
from __future__ import unicode_literals
import re
from django.utils import six
from django.utils.decorators import ContextDecorator
from django.utils.encoding import force_text
from django.utils.functional import lazy
__all__ = [
'activate', 'deactivate', 'override', 'deactivate_all',
'get_language', 'get_language_from_request',
'get_language_info', 'get_language_bidi',
'check_for_language', 'to_locale', 'templatize', 'string_concat',
'gettext', 'gettext_lazy', 'gettext_noop',
'ugettext', 'ugettext_lazy', 'ugettext_noop',
'ngettext', 'ngettext_lazy',
'ungettext', 'ungettext_lazy',
'pgettext', 'pgettext_lazy',
'npgettext', 'npgettext_lazy',
'LANGUAGE_SESSION_KEY',
]
LANGUAGE_SESSION_KEY = '_language'
class TranslatorCommentWarning(SyntaxWarning):
pass
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
class Trans(object):
"""
The purpose of this class is to store the actual translation function upon
receiving the first call to that function. After this is done, changes to
USE_I18N will have no effect to which function is served upon request. If
your tests rely on changing USE_I18N, you can delete all the functions
from _trans.__dict__.
Note that storing the function with setattr will have a noticeable
performance effect, as access to the function goes the normal path,
instead of using __getattr__.
"""
def __getattr__(self, real_name):
from django.conf import settings
if settings.USE_I18N:
from django.utils.translation import trans_real as trans
else:
from django.utils.translation import trans_null as trans
setattr(self, real_name, getattr(trans, real_name))
return getattr(trans, real_name)
_trans = Trans()
# The Trans class is no more needed, so remove it from the namespace.
del Trans
def gettext_noop(message):
return _trans.gettext_noop(message)
ugettext_noop = gettext_noop
def gettext(message):
return _trans.gettext(message)
def ngettext(singular, plural, number):
return _trans.ngettext(singular, plural, number)
def ugettext(message):
return _trans.ugettext(message)
def ungettext(singular, plural, number):
return _trans.ungettext(singular, plural, number)
def pgettext(context, message):
return _trans.pgettext(context, message)
def npgettext(context, singular, plural, number):
return _trans.npgettext(context, singular, plural, number)
gettext_lazy = lazy(gettext, str)
ugettext_lazy = lazy(ugettext, six.text_type)
pgettext_lazy = lazy(pgettext, six.text_type)
def lazy_number(func, resultclass, number=None, **kwargs):
if isinstance(number, six.integer_types):
kwargs['number'] = number
proxy = lazy(func, resultclass)(**kwargs)
else:
class NumberAwareString(resultclass):
def __mod__(self, rhs):
if isinstance(rhs, dict) and number:
try:
number_value = rhs[number]
except KeyError:
raise KeyError('Your dictionary lacks key \'%s\'. '
'Please provide it, because it is required to '
'determine whether string is singular or plural.'
% number)
else:
number_value = rhs
kwargs['number'] = number_value
translated = func(**kwargs)
try:
translated = translated % rhs
except TypeError:
# String doesn't contain a placeholder for the number
pass
return translated
proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs)
return proxy
def ngettext_lazy(singular, plural, number=None):
return lazy_number(ngettext, str, singular=singular, plural=plural, number=number)
def ungettext_lazy(singular, plural, number=None):
return lazy_number(ungettext, six.text_type, singular=singular, plural=plural, number=number)
def npgettext_lazy(context, singular, plural, number=None):
return lazy_number(npgettext, six.text_type, context=context, singular=singular, plural=plural, number=number)
def activate(language):
return _trans.activate(language)
def deactivate():
return _trans.deactivate()
class override(ContextDecorator):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
def __enter__(self):
self.old_language = get_language()
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_locale(language):
return _trans.to_locale(language)
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path):
return _trans.get_language_from_path(path)
def templatize(src, origin=None):
return _trans.templatize(src, origin)
def deactivate_all():
return _trans.deactivate_all()
def _string_concat(*strings):
"""
Lazy variant of string concatenation, needed for translations that are
constructed from multiple parts.
"""
return ''.join(force_text(s) for s in strings)
string_concat = lazy(_string_concat, six.text_type)
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
lang_info = LANG_INFO[lang_code]
if 'fallback' in lang_info and 'name' not in lang_info:
info = get_language_info(lang_info['fallback'][0])
else:
info = lang_info
except KeyError:
if '-' not in lang_code:
raise KeyError("Unknown language code %s." % lang_code)
generic_lang_code = lang_code.split('-')[0]
try:
info = LANG_INFO[generic_lang_code]
except KeyError:
raise KeyError("Unknown language code %s and %s." % (lang_code, generic_lang_code))
if info:
info['name_translated'] = ugettext_lazy(info['name'])
return info
trim_whitespace_re = re.compile('\s*\n\s*')
def trim_whitespace(s):
return trim_whitespace_re.sub(' ', s.strip())
| bsd-3-clause |
piyush8311/ns3-arp | waf-tools/clang_compilation_database.py | 99 | 1830 | #!/usr/bin/env python
# encoding: utf-8
# Christoph Koke, 2013
"""
Writes the c and cpp compile commands into build/compile_commands.json
see http://clang.llvm.org/docs/JSONCompilationDatabase.html
Usage:
def configure(conf):
conf.load('compiler_cxx')
...
conf.load('clang_compilation_database')
"""
import sys, os, json, shlex, pipes
from waflib import Logs, TaskGen
from waflib.Tools import c, cxx
if sys.hexversion >= 0x3030000:
quote = shlex.quote
else:
quote = pipes.quote
@TaskGen.feature('*')
@TaskGen.after_method('process_use')
def collect_compilation_db_tasks(self):
"Add a compilation database entry for compiled tasks"
try:
clang_db = self.bld.clang_compilation_database_tasks
except AttributeError:
clang_db = self.bld.clang_compilation_database_tasks = []
self.bld.add_post_fun(write_compilation_database)
for task in getattr(self, 'compiled_tasks', []):
if isinstance(task, (c.c, cxx.cxx)):
clang_db.append(task)
def write_compilation_database(ctx):
"Write the clang compilation database as JSON"
database_file = ctx.bldnode.make_node('compile_commands.json')
Logs.info("Build commands will be stored in %s" % database_file.path_from(ctx.path))
try:
root = json.load(database_file)
except IOError:
root = []
clang_db = dict((x["file"], x) for x in root)
for task in getattr(ctx, 'clang_compilation_database_tasks', []):
try:
cmd = task.last_cmd
except AttributeError:
continue
directory = getattr(task, 'cwd', ctx.variant_dir)
f_node = task.inputs[0]
filename = os.path.relpath(f_node.abspath(), directory)
cmd = " ".join(map(quote, cmd))
entry = {
"directory": directory,
"command": cmd,
"file": filename,
}
clang_db[filename] = entry
root = list(clang_db.values())
database_file.write(json.dumps(root, indent=2))
| gpl-2.0 |
grandquista/rethinkdb | test/common/http_support/werkzeug/local.py | 147 | 14094 | # -*- coding: utf-8 -*-
"""
werkzeug.local
~~~~~~~~~~~~~~
This module implements context-local objects.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from werkzeug.wsgi import ClosingIterator
from werkzeug._compat import PY2, implements_bool
# since each thread has its own greenlet we can just use those as identifiers
# for the context. If greenlets are not available we fall back to the
# current thread ident depending on where it is.
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`LocalStack` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local(object):
__slots__ = ('__storage__', '__ident_func__')
def __init__(self):
object.__setattr__(self, '__storage__', {})
object.__setattr__(self, '__ident_func__', get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
try:
return self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
ident = self.__ident_func__()
storage = self.__storage__
try:
storage[ident][name] = value
except KeyError:
storage[ident] = {name: value}
def __delattr__(self, name):
try:
del self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
class LocalStack(object):
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value):
object.__setattr__(self._local, '__ident_func__', value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError('object unbound')
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, 'stack', None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
@property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalManager(object):
"""Local objects cannot manage themselves. For that you need a local
manager. You can pass a local manager multiple locals or add them later
by appending them to `manager.locals`. Everytime the manager cleans up
it, will clean up all the data left in the locals for this context.
The `ident_func` parameter can be added to override the default ident
function for the wrapped locals.
.. versionchanged:: 0.6.1
Instead of a manager the :func:`release_local` function can be used
as well.
.. versionchanged:: 0.7
`ident_func` was added.
"""
def __init__(self, locals=None, ident_func=None):
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals)
if ident_func is not None:
self.ident_func = ident_func
for local in self.locals:
object.__setattr__(local, '__ident_func__', ident_func)
else:
self.ident_func = get_ident
def get_ident(self):
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. versionchanged:: 0.7
Yu can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
"""
return self.ident_func()
def cleanup(self):
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
for local in self.locals:
release_local(local)
def make_middleware(self, app):
"""Wrap a WSGI application so that cleaning up happens after
request end.
"""
def application(environ, start_response):
return ClosingIterator(app(environ, start_response), self.cleanup)
return application
def middleware(self, func):
"""Like `make_middleware` but for decorating functions.
Example usage::
@manager.middleware
def application(environ, start_response):
...
The difference to `make_middleware` is that the function passed
will have all the arguments copied from the inner application
(name, docstring, module).
"""
return update_wrapper(self.make_middleware(func), func)
def __repr__(self):
return '<%s storages: %d>' % (
self.__class__.__name__,
len(self.locals)
)
@implements_bool
class LocalProxy(object):
"""Acts as a proxy for a werkzeug local. Forwards all operations to
a proxied object. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
Example usage::
from werkzeug.local import Local
l = Local()
# these are proxies
request = l('request')
user = l('user')
from werkzeug.local import LocalStack
_response_local = LocalStack()
# this is a proxy
response = _response_local()
Whenever something is bound to l.user / l.request the proxy objects
will forward all operations. If no object is bound a :exc:`RuntimeError`
will be raised.
To create proxies to :class:`Local` or :class:`LocalStack` objects,
call the object as shown above. If you want to have a proxy to an
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
a function to the :class:`LocalProxy` constructor::
session = LocalProxy(lambda: get_current_request().session)
.. versionchanged:: 0.6.1
The class can be instanciated with a callable as well now.
"""
__slots__ = ('__local', '__dict__', '__name__')
def __init__(self, local, name=None):
object.__setattr__(self, '_LocalProxy__local', local)
object.__setattr__(self, '__name__', name)
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, '__release_local__'):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__)
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError('__dict__')
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError:
return '<%s unbound>' % self.__class__.__name__
return repr(obj)
def __bool__(self):
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __unicode__(self):
try:
return unicode(self._get_current_object())
except RuntimeError:
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
if PY2:
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o)
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object())
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o)
__enter__ = lambda x: x._get_current_object().__enter__()
__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw)
__radd__ = lambda x, o: o + x._get_current_object()
__rsub__ = lambda x, o: o - x._get_current_object()
__rmul__ = lambda x, o: o * x._get_current_object()
__rdiv__ = lambda x, o: o / x._get_current_object()
if PY2:
__rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o)
else:
__rtruediv__ = __rdiv__
__rfloordiv__ = lambda x, o: o // x._get_current_object()
__rmod__ = lambda x, o: o % x._get_current_object()
__rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o)
| apache-2.0 |
code-sauce/tensorflow | tensorflow/python/kernel_tests/concat_op_test.py | 27 | 27222 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Concat Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ConcatOpTest(test.TestCase):
def testHStack(self):
with self.test_session():
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
c = array_ops.concat([p1, p2], 0)
params = {
p1: np.random.rand(4, 4).astype("f"),
p2: np.random.rand(4, 4).astype("f")
}
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:4, :], params[p1])
self.assertAllEqual(result[4:, :], params[p2])
def testVStack(self):
with self.test_session():
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
c = array_ops.concat([p1, p2], 1)
params = {
p1: np.random.rand(4, 4).astype("f"),
p2: np.random.rand(4, 4).astype("f")
}
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:, :4], params[p1])
self.assertAllEqual(result[:, 4:], params[p2])
def testInt32GPU(self):
with self.test_session(use_gpu=True):
p1 = np.random.rand(2, 3).astype("i")
p2 = np.random.rand(2, 3).astype("i")
x1 = constant_op.constant(p1)
x2 = constant_op.constant(p2)
c = array_ops.concat([x1, x2], 0)
result = c.eval()
self.assertAllEqual(result[:2, :], p1)
self.assertAllEqual(result[2:, :], p2)
def testRefType(self):
with self.test_session():
p1 = np.random.rand(4, 4).astype("f")
p2 = np.random.rand(4, 4).astype("f")
v1 = variables.Variable(p1)
v2 = variables.Variable(p2)
c = array_ops.concat([v1, v2], 0)
variables.global_variables_initializer().run()
result = c.eval()
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:4, :], p1)
self.assertAllEqual(result[4:, :], p2)
def _testRandom(self, dtype, use_gpu=False):
# Random dims of rank 5
shape = np.random.randint(1, 5, size=5)
# Random number of tensors, but always > 1.
num_tensors = np.random.randint(2, 10)
# Random dim to concat on
concat_dim = np.random.randint(5)
params = {}
if dtype == dtypes.bfloat16:
dtype_feed = dtypes.float32
else:
dtype_feed = dtype
with self.test_session(use_gpu=use_gpu):
p = []
for i in np.arange(num_tensors):
input_shape = shape
input_shape[concat_dim] = np.random.randint(1, 5)
placeholder = array_ops.placeholder(dtype_feed, shape=input_shape)
p.append(placeholder)
t = dtype_feed.as_numpy_dtype
params[placeholder] = np.random.rand(*input_shape).astype(t)
if dtype != dtype_feed:
concat_inputs = [math_ops.cast(p_i, dtype) for p_i in p]
else:
concat_inputs = p
c = array_ops.concat(concat_inputs, concat_dim)
if dtype != dtype_feed:
c = math_ops.cast(c, dtype_feed)
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
cur_offset = 0
for i in np.arange(num_tensors):
# The index into the result is the ':' along all dimensions
# except the concat_dim. slice(0, size) is used for ':', and
# a list of slices is used to index into result.
ind = [slice(0, params[p[i]].shape[j]) for j in np.arange(5)]
ind[concat_dim] = slice(cur_offset,
cur_offset + params[p[i]].shape[concat_dim])
cur_offset += params[p[i]].shape[concat_dim]
if dtype == dtype_feed:
self.assertAllEqual(result[ind], params[p[i]])
else:
self.assertAllClose(result[ind], params[p[i]], 0.01)
def testRandom(self):
self._testRandom(dtypes.float32)
self._testRandom(dtypes.float32, use_gpu=True)
self._testRandom(dtypes.int16)
self._testRandom(dtypes.int32, use_gpu=True)
self._testRandom(dtypes.bfloat16)
self._testRandom(dtypes.bfloat16, use_gpu=True)
def testInvalidConcatDimTypeAndShape(self):
a = variables.Variable(constant_op.constant(1.0, shape=[1]))
b = variables.Variable(constant_op.constant(2.0, shape=[1]))
with self.assertRaises(ValueError):
array_ops.concat(b, a)
with self.assertRaises(TypeError):
array_ops.concat(1, 4.2)
with self.assertRaises(ValueError):
array_ops.concat(1, a)
with self.assertRaises(TypeError):
array_ops.concat([a, b], a)
with self.assertRaises(ValueError):
array_ops.concat([a, b], [3])
with self.assertRaises(ValueError):
array_ops.concat([], 0)
# An integer tensor for shape dim should throw no error.
array_ops.concat(1, constant_op.constant(0, shape=[]))
# A non-scalar tensor for shape should throw ValueError.
with self.assertRaises(ValueError):
array_ops.concat(1, constant_op.constant(0, shape=[1]))
def _testGradientsSimple(self, use_gpu):
# Test both positive and negative concat axis.
# -2 and 1 correspond to the same axis for 3-dimensional tensors.
for axis in [-2, 1]:
with self.test_session(use_gpu=use_gpu):
inp = []
inp_tensors = []
for x in [1, 2, 6]:
shape = [10, x, 2]
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
[float(y) for y in t.flatten()],
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, axis)
output_shape = [10, 9, 2]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = concated_grad.eval()
self.assertAllEqual(result, grad_inp)
def testGradientsSimpleAll(self):
self._testGradientsSimple(use_gpu=True)
self._testGradientsSimple(use_gpu=False)
def _testGradientsFirstDim(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
inp = []
inp_tensors = []
for x in [1, 2, 6]:
shape = [x, 10, 2]
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
[float(y) for y in t.flatten()],
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, 0)
output_shape = [9, 10, 2]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, 0)
result = concated_grad.eval()
self.assertAllEqual(result, grad_inp)
def testGradientsFirstDimAll(self):
self._testGradientsFirstDim(use_gpu=False)
self._testGradientsFirstDim(use_gpu=True)
def _testGradientsLastDim(self, use_gpu):
# Test both positive and negative concat axis.
# -1 and 2 correspond to the same axis for 3-dimensional tensors.
for axis in [-1, 2]:
with self.test_session(use_gpu=use_gpu):
inp = []
inp_tensors = []
for x in [1, 2, 6]:
shape = [10, 2, x]
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
[float(y) for y in t.flatten()],
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, 2)
output_shape = [10, 2, 9]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = concated_grad.eval()
self.assertAllEqual(result, grad_inp)
def testGradientsLastDimAll(self):
self._testGradientsLastDim(use_gpu=False)
self._testGradientsLastDim(use_gpu=True)
def _RunAndVerifyGradientsRandom(self, use_gpu):
# Random dims of rank 5
input_shape = np.random.randint(1, 5, size=5)
# Random number of tensors
num_tensors = np.random.randint(12, 20)
# Random dim to concat on
concat_dim = np.random.randint(5)
concat_dim_sizes = np.random.randint(1, 5, size=num_tensors)
with self.test_session(use_gpu=use_gpu):
inp = []
inp_tensors = []
for x in concat_dim_sizes:
shape = input_shape
shape[concat_dim] = x
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
[float(y) for y in t.flatten()],
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, concat_dim)
output_shape = input_shape
output_shape[concat_dim] = concat_dim_sizes.sum()
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, concat_dim)
result = concated_grad.eval()
self.assertAllEqual(result, grad_inp)
def testGradientsRandom(self):
for _ in range(5):
self._RunAndVerifyGradientsRandom(use_gpu=False)
self._RunAndVerifyGradientsRandom(use_gpu=True)
def testGradientWithUnknownInputDim(self):
with self.test_session(use_gpu=True):
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
c = array_ops.concat([x, y], 2)
output_shape = [10, 2, 9]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(inp) for inp in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], [x, y], [grad_tensor])
concated_grad = array_ops.concat(grad, 2)
params = {
x: np.random.rand(10, 2, 3).astype("f"),
y: np.random.rand(10, 2, 6).astype("f")
}
result = concated_grad.eval(feed_dict=params)
self.assertAllEqual(result, grad_inp)
def testShapeError(self):
# Rank doesn't match.
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[4, 4, 4, 4]),
constant_op.constant(20.0, shape=[4, 4, 4])
], 1)
# Dimensions don't match in a non-concat dim.
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[1, 2, 1]),
constant_op.constant(20.0, shape=[3, 2, 1])
], 1)
# concat_dim out of range.
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[4, 4, 4]),
constant_op.constant(20.0, shape=[4, 4, 4])
], 3)
# concat_dim out of range
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[4, 4, 4]),
constant_op.constant(20.0, shape=[4, 4, 4])
], -4)
def testShapeWithUnknownConcatDim(self):
p1 = array_ops.placeholder(dtypes.float32)
c1 = constant_op.constant(10.0, shape=[4, 4, 4, 4])
p2 = array_ops.placeholder(dtypes.float32)
c2 = constant_op.constant(20.0, shape=[4, 4, 4, 4])
dim = array_ops.placeholder(dtypes.int32)
concat = array_ops.concat([p1, c1, p2, c2], dim)
self.assertEqual(4, concat.get_shape().ndims)
# All dimensions unknown.
concat2 = array_ops.concat([p1, p2], dim)
self.assertEqual(None, concat2.get_shape())
# Rank doesn't match.
c3 = constant_op.constant(30.0, shape=[4, 4, 4])
with self.assertRaises(ValueError):
array_ops.concat([p1, c1, p2, c3], dim)
def testZeroSize(self):
# Verify that concat doesn't crash and burn for zero size inputs
np.random.seed(7)
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu) as sess:
for shape0 in (), (2,):
axis = len(shape0)
for shape1 in (), (3,):
for n0 in 0, 1, 2:
for n1 in 0, 1, 2:
x0 = np.random.randn(*(shape0 + (n0,) + shape1))
x1 = np.random.randn(*(shape0 + (n1,) + shape1))
correct = np.concatenate([x0, x1], axis=axis)
# TODO(irving): Make tf.concat handle map, then drop list().
xs = list(map(constant_op.constant, [x0, x1]))
c = array_ops.concat(xs, axis)
self.assertAllEqual(c.eval(), correct)
# Check gradients
dc = np.random.randn(*c.get_shape().as_list())
dxs = sess.run(gradients_impl.gradients(c, xs, dc))
self.assertAllEqual(dc, np.concatenate(dxs, axis=axis))
def testTensorConcatDim0Grad(self):
x_shapes = [[20, 7, 3], [10, 7, 3], [14, 7, 3]]
output_shape = [44, 7, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.test_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
output = array_ops.concat(xs, 0)
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
def testTensorConcatDim1Grad(self):
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
output_shape = [20, 11, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.test_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
output = array_ops.concat(xs, 1)
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
def testIndexedSlicesConcatDim0Grad(self):
x_shapes = [[20, 7, 3], [10, 7, 3], [14, 7, 3]]
output_shape = [4, 7, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.test_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
x_concat = array_ops.concat(xs, 0)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
def testIndexedSlicesConcatDim1Grad(self):
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
output_shape = [4, 11, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.test_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
x_concat = array_ops.concat(xs, 1)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
def testIndexedSlicesConcatDim2Grad(self):
x_shapes = [[20, 7, 3], [20, 7, 1], [20, 7, 2]]
output_shape = [4, 7, 6]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.test_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
x_concat = array_ops.concat(xs, 2)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
def testIndexedSlicesConcatDim1Grad_UnknownInputDim(self):
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
output_shape = [4, 11, 3]
with self.test_session():
x_1 = array_ops.placeholder(dtypes.float64)
x_2 = array_ops.placeholder(dtypes.float64)
x_3 = array_ops.placeholder(dtypes.float64)
xs = [x_1, x_2, x_3]
x_concat = array_ops.concat(xs, 1)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
params = {
x_1: np.random.random_sample(x_shapes[0]).astype(np.float64),
x_2: np.random.random_sample(x_shapes[1]).astype(np.float64),
x_3: np.random.random_sample(x_shapes[2]).astype(np.float64)
}
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape,
extra_feed_dict=params)
self.assertLess(err, 1e-11)
def testConcatTuple(self):
c1 = np.random.rand(4, 4)
c2 = np.random.rand(4, 4)
with self.test_session():
concat_list_t = array_ops.concat([c1, c2], 0)
concat_tuple_t = array_ops.concat((c1, c2), 0)
self.assertAllEqual(concat_list_t.eval(), concat_tuple_t.eval())
def testConcatNoScalars(self):
with self.test_session():
scalar = constant_op.constant(7)
dim = array_ops.placeholder(dtypes.int32)
with self.assertRaisesRegexp(
ValueError, r"Can't concatenate scalars \(use tf\.stack instead\)"):
array_ops.concat([scalar, scalar, scalar], dim)
# important as gpu implementation could fail if
# shared memory is not large for all the inputs
def testConcatLargeNumberOfTensors(self):
with self.test_session(use_gpu=True):
for concat_dim in range(2):
params = {}
p = []
shape = np.array([7, 13])
if test.is_gpu_available():
num_tensors = 10000
else:
num_tensors = 1000
for i in np.arange(num_tensors):
input_shape = shape
placeholder = array_ops.placeholder(dtypes.float32, shape=input_shape)
p.append(placeholder)
params[placeholder] = np.random.rand(*input_shape).astype(np.float32)
concat_inputs = p
c = array_ops.concat(concat_inputs, concat_dim)
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
cur_offset = 0
for i in np.arange(num_tensors):
# The index into the result is the ':' along all dimensions
# except the concat_dim. slice(0, size) is used for ':', and
# a list of slices is used to index into result.
index = [slice(0, params[p[i]].shape[j]) for j in np.arange(2)]
index[concat_dim] = slice(cur_offset,
cur_offset + params[p[i]].shape[concat_dim])
cur_offset += params[p[i]].shape[concat_dim]
self.assertAllEqual(result[index], params[p[i]])
def testConcatEmpty(self):
with self.test_session(use_gpu=True):
t1 = []
t2 = []
output = gen_array_ops._concat_v2([t1, t2], 0).eval()
self.assertFalse(output) # Checks that output is empty
def testConcatInvalidAxis(self):
with self.assertRaises(ValueError):
with self.test_session(use_gpu=True):
t1 = [1]
t2 = [2]
gen_array_ops._concat_v2([t1, t2], 1).eval()
def testConcatNegativeAxis(self):
with self.test_session(use_gpu=True):
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
c = gen_array_ops._concat_v2([t1, t2], -2)
self.assertEqual([4, 3], c.get_shape().as_list())
output = c.eval()
self.assertAllEqual([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],
output)
c = gen_array_ops._concat_v2([t1, t2], -1)
self.assertEqual([2, 6], c.get_shape().as_list())
output = c.eval()
self.assertAllEqual([[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], output)
def _testGradientsForAxis(
self, inp_tensors, axis, output_shape, feed_dict=None):
with self.test_session():
c = array_ops.concat(inp_tensors, axis)
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = concated_grad.eval(feed_dict=feed_dict)
self.assertAllEqual(result, grad_inp)
def _testIndexedSlicesGradientsForAxis(
self, inp_tensors, axis, output_shape, gather_indexes, feed_dict=None):
with self.test_session():
c = array_ops.gather(
array_ops.concat(inp_tensors, axis), gather_indexes)
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.gather(
array_ops.concat(grad, axis), gather_indexes)
result = concated_grad.eval(feed_dict=feed_dict)
self.assertAllEqual(result, grad_inp)
def testGradientsNegativeAxis(self):
x1 = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]
x2 = [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]
inp_tensors = [constant_op.constant(x1, shape=(2, 3), dtype=dtypes.float32),
constant_op.constant(x2, shape=(2, 3), dtype=dtypes.float32)]
# Test concat gradient with axis == -2
self._testGradientsForAxis(inp_tensors, -2, output_shape=[4, 3])
# Test concat gradient with unknown-shape tensors.
x1_placeholder = array_ops.placeholder(dtypes.float32)
x2_placeholder = array_ops.placeholder(dtypes.float32)
inp_tensors_placeholders = [x1_placeholder, x2_placeholder]
feed_dict = {x1_placeholder: x1, x2_placeholder: x2}
self._testGradientsForAxis(
inp_tensors_placeholders, -1, output_shape=[2, 6], feed_dict=feed_dict)
# Test IndexedSlices concat gradient.
self._testIndexedSlicesGradientsForAxis(
inp_tensors, -2, output_shape=[2, 3], gather_indexes=[2, 0])
# We don't support calculating IndexedSlices concat gradient for
# negative indexes when rank is not known.
with self.assertRaises(ValueError):
self._testIndexedSlicesGradientsForAxis(
inp_tensors_placeholders, -2, output_shape=[2, 3],
gather_indexes=[2, 0], feed_dict=feed_dict)
class ConcatOffsetTest(test.TestCase):
def testBasic(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu) as sess:
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
ans = sess.run(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
def testNotVector(self):
with self.test_session() as sess:
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([[2, 3, 5]], dtypes.int32)
s1 = constant_op.constant([[2, 7, 5]], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"should be a vector"):
sess.run(off)
def testConcatDimOutOfRange(self):
with self.test_session() as sess:
cdim = constant_op.constant(4, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Concat dim is out of range: 4 vs. 3"):
sess.run(off)
def testDimMismatch(self):
with self.test_session() as sess:
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5, 10], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"should contain 3 elem"):
sess.run(off)
def testSizeMismatch(self):
with self.test_session() as sess:
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 10], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
r"All dimensions except 1 must match. Input 1 has shape \[2 7 10\] "
r"and doesn't match input 0 with shape \[2 3 5\]."):
sess.run(off)
def testNegativeDim(self):
with self.test_session(use_gpu=True) as sess:
cdim = constant_op.constant(-2, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
ans = sess.run(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
cdim = constant_op.constant(-3, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([1, 3, 5], dtypes.int32)
s2 = constant_op.constant([3, 3, 5], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
ans = sess.run(off)
self.assertAllEqual(ans, [[0, 0, 0], [2, 0, 0], [3, 0, 0]])
if __name__ == "__main__":
test.main()
| apache-2.0 |
vitorio/bite-project | deps/gdata-python-client/samples/apps/marketplace_sample/gdata/tlslite/utils/Cryptlib_TripleDES.py | 359 | 1408 | """Cryptlib 3DES implementation."""
from cryptomath import *
from TripleDES import *
if cryptlibpyLoaded:
def new(key, mode, IV):
return Cryptlib_TripleDES(key, mode, IV)
class Cryptlib_TripleDES(TripleDES):
def __init__(self, key, mode, IV):
TripleDES.__init__(self, key, mode, IV, "cryptlib")
self.context = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, cryptlib_py.CRYPT_ALGO_3DES)
cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_MODE, cryptlib_py.CRYPT_MODE_CBC)
cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_KEYSIZE, len(key))
cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_KEY, key)
cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_IV, IV)
def __del__(self):
cryptlib_py.cryptDestroyContext(self.context)
def encrypt(self, plaintext):
TripleDES.encrypt(self, plaintext)
bytes = stringToBytes(plaintext)
cryptlib_py.cryptEncrypt(self.context, bytes)
return bytesToString(bytes)
def decrypt(self, ciphertext):
TripleDES.decrypt(self, ciphertext)
bytes = stringToBytes(ciphertext)
cryptlib_py.cryptDecrypt(self.context, bytes)
return bytesToString(bytes) | apache-2.0 |
Simage/shinken | test/test_regenerator.py | 14 | 7568 | #!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
import time
from shinken_test import ShinkenTest, unittest
from shinken.objects import Service
from shinken.misc.regenerator import Regenerator
class TestRegenerator(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_regenerator.cfg')
def look_for_same_values(self):
# Look at Regenerator values
print "Hosts:", self.rg.hosts.__dict__
for h in self.rg.hosts:
orig_h = self.sched.hosts.find_by_name(h.host_name)
print h.state, orig_h.state
# Look for same states
self.assertEqual(orig_h.state, h.state)
self.assertEqual(orig_h.state_type, h.state_type)
# Look for same impacts
for i in h.impacts:
print "Got impact", i.get_name()
same_impacts = i.get_name() in [j.get_name() for j in orig_h.impacts]
self.assertTrue(same_impacts)
# And look for same source problems
for i in h.source_problems:
print "Got source pb", i.get_name()
same_pbs = i.get_name() in [j.get_name() for j in orig_h.source_problems]
self.assertTrue(same_pbs)
print "Services:", self.rg.services.__dict__
for s in self.rg.services:
orig_s = self.sched.services.find_srv_by_name_and_hostname(s.host.host_name, s.service_description)
print s.state, orig_s.state
self.assertEqual(orig_s.state, s.state)
self.assertEqual(orig_s.state_type, s.state_type)
# Look for same impacts too
for i in s.impacts:
print "Got impact", i.get_name()
same_impacts = i.get_name() in [j.get_name() for j in orig_s.impacts]
self.assertTrue(same_impacts)
# And look for same source problems
for i in s.source_problems:
print "Got source pb", i.get_name()
same_pbs = i.get_name() in [j.get_name() for j in orig_s.source_problems]
self.assertTrue(same_pbs)
# Look for same host
self.assertEqual(orig_s.host.get_name(), s.host.get_name())
def test_regenerator(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
# for h in self.sched.hosts:
# h.realm = h.realm.get_name()
self.sched.conf.skip_initial_broks = False
self.sched.brokers['Default-Broker'] = {'broks' : {}, 'has_full_broks' : False}
self.sched.fill_initial_broks('Default-Broker')
self.rg = Regenerator()
# Got the initial creation ones
ids = self.sched.broks.keys()
ids.sort()
t0 = time.time()
for i in ids:
b = self.sched.broks[i]
print "Manage b", b.type
b.prepare()
self.rg.manage_brok(b)
t1 = time.time()
print 'First inc', t1 - t0, len(self.sched.broks)
self.sched.broks.clear()
self.look_for_same_values()
print "Get the hosts and services"
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(3, [[host, 2, 'DOWN | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 2, 'BAD | value1=0 value2=0']])
self.assertEqual('DOWN', host.state)
self.assertEqual('HARD', host.state_type)
ids = self.sched.broks.keys()
ids.sort()
t0 = time.time()
for i in ids:
b = self.sched.broks[i]
print "Manage b", b.type
b.prepare()
self.rg.manage_brok(b)
t1 = time.time()
print 'Time', t1 - t0
self.sched.broks.clear()
self.look_for_same_values()
print 'Time', t1 - t0
b = svc.get_initial_status_brok()
b.prepare()
print "GO BENCH!"
t0 = time.time()
for i in xrange(1, 1000):
b = svc.get_initial_status_brok()
b.prepare()
s = Service({})
for (prop, value) in b.data.iteritems():
setattr(s, prop, value)
t1 = time.time()
print "Bench end:", t1 - t0
times = {}
sizes = {}
import cPickle
data = {}
cls = svc.__class__
start = time.time()
for i in xrange(1, 10000):
for prop, entry in svc.__class__.properties.items():
# Is this property intended for brokking?
if 'full_status' in entry.fill_brok:
data[prop] = svc.get_property_value_for_brok(prop, cls.properties)
if not prop in times:
times[prop] = 0
sizes[prop] = 0
t0 = time.time()
tmp = cPickle.dumps(data[prop], 0)
sizes[prop] += len(tmp)
times[prop] += time.time() - t0
print "Times"
for (k, v) in times.iteritems():
print "\t%s: %s" % (k, v)
print "\n\n"
print "Sizes"
for (k, v) in sizes.iteritems():
print "\t%s: %s" % (k, v)
print "\n"
print "total time", time.time() - start
def test_regenerator_load_from_scheduler(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
# for h in self.sched.hosts:
# h.realm = h.realm.get_name()
self.rg = Regenerator()
self.rg.load_from_scheduler(self.sched)
self.sched.conf.skip_initial_broks = False
self.sched.brokers['Default-Broker'] = {'broks' : {}, 'has_full_broks' : False}
self.sched.fill_initial_broks('Default-Broker')
# Got the initial creation ones
ids = self.sched.broks.keys()
ids.sort()
t0 = time.time()
for i in ids:
b = self.sched.broks[i]
print "Manage b", b.type
b.prepare()
self.rg.manage_brok(b)
t1 = time.time()
print 'First inc', t1 - t0, len(self.sched.broks)
self.sched.broks.clear()
self.look_for_same_values()
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
NickelMedia/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/download.py | 113 | 18403 | # Copyright (c) 2009, 2011 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.tool import steps
from webkitpy.common.checkout.changelog import ChangeLog
from webkitpy.common.config import urls
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
from webkitpy.tool.commands.stepsequence import StepSequence
from webkitpy.tool.comments import bug_comment_from_commit_text
from webkitpy.tool.grammar import pluralize
from webkitpy.tool.multicommandtool import Command
_log = logging.getLogger(__name__)
class Clean(AbstractSequencedCommand):
name = "clean"
help_text = "Clean the working copy"
steps = [
steps.DiscardLocalChanges,
]
def _prepare_state(self, options, args, tool):
options.force_clean = True
class Update(AbstractSequencedCommand):
name = "update"
help_text = "Update working copy (used internally)"
steps = [
steps.DiscardLocalChanges,
steps.Update,
]
class Build(AbstractSequencedCommand):
name = "build"
help_text = "Update working copy and build"
steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.Build,
]
def _prepare_state(self, options, args, tool):
options.build = True
class BuildAndTest(AbstractSequencedCommand):
name = "build-and-test"
help_text = "Update working copy, build, and run the tests"
steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.Build,
steps.RunTests,
]
class Land(AbstractSequencedCommand):
name = "land"
help_text = "Land the current working directory diff and updates the associated bug if any"
argument_names = "[BUGID]"
show_in_main_help = True
steps = [
steps.AddSvnMimetypeForPng,
steps.UpdateChangeLogsWithReviewer,
steps.ValidateReviewer,
steps.ValidateChangeLogs, # We do this after UpdateChangeLogsWithReviewer to avoid not having to cache the diff twice.
steps.Build,
steps.RunTests,
steps.Commit,
steps.CloseBugForLandDiff,
]
long_help = """land commits the current working copy diff (just as svn or git commit would).
land will NOT build and run the tests before committing, but you can use the --build option for that.
If a bug id is provided, or one can be found in the ChangeLog land will update the bug after committing."""
def _prepare_state(self, options, args, tool):
changed_files = self._tool.scm().changed_files(options.git_commit)
return {
"changed_files": changed_files,
"bug_id": (args and args[0]) or tool.checkout().bug_id_for_this_commit(options.git_commit, changed_files),
}
class LandCowhand(AbstractSequencedCommand):
# Gender-blind term for cowboy, see: http://en.wiktionary.org/wiki/cowhand
name = "land-cowhand"
help_text = "Prepares a ChangeLog and lands the current working directory diff."
steps = [
steps.PrepareChangeLog,
steps.EditChangeLog,
steps.CheckStyle,
steps.ConfirmDiff,
steps.Build,
steps.RunTests,
steps.Commit,
steps.CloseBugForLandDiff,
]
def _prepare_state(self, options, args, tool):
options.check_style_filter = "-changelog"
class LandCowboy(LandCowhand):
name = "land-cowboy"
def _prepare_state(self, options, args, tool):
_log.warning("land-cowboy is deprecated, use land-cowhand instead.")
LandCowhand._prepare_state(self, options, args, tool)
class CheckStyleLocal(AbstractSequencedCommand):
name = "check-style-local"
help_text = "Run check-webkit-style on the current working directory diff"
steps = [
steps.CheckStyle,
]
class AbstractPatchProcessingCommand(Command):
# Subclasses must implement the methods below. We don't declare them here
# because we want to be able to implement them with mix-ins.
#
# pylint: disable=E1101
# def _fetch_list_of_patches_to_process(self, options, args, tool):
# def _prepare_to_process(self, options, args, tool):
# def _process_patch(self, options, args, tool):
@staticmethod
def _collect_patches_by_bug(patches):
bugs_to_patches = {}
for patch in patches:
bugs_to_patches[patch.bug_id()] = bugs_to_patches.get(patch.bug_id(), []) + [patch]
return bugs_to_patches
def execute(self, options, args, tool):
self._prepare_to_process(options, args, tool)
patches = self._fetch_list_of_patches_to_process(options, args, tool)
# It's nice to print out total statistics.
bugs_to_patches = self._collect_patches_by_bug(patches)
_log.info("Processing %s from %s." % (pluralize("patch", len(patches)), pluralize("bug", len(bugs_to_patches))))
for patch in patches:
self._process_patch(patch, options, args, tool)
class AbstractPatchSequencingCommand(AbstractPatchProcessingCommand):
prepare_steps = None
main_steps = None
def __init__(self):
options = []
self._prepare_sequence = StepSequence(self.prepare_steps)
self._main_sequence = StepSequence(self.main_steps)
options = sorted(set(self._prepare_sequence.options() + self._main_sequence.options()))
AbstractPatchProcessingCommand.__init__(self, options)
def _prepare_to_process(self, options, args, tool):
try:
self.state = self._prepare_state(options, args, tool)
except ScriptError, e:
_log.error(e.message_with_output())
self._exit(e.exit_code or 2)
self._prepare_sequence.run_and_handle_errors(tool, options, self.state)
def _process_patch(self, patch, options, args, tool):
state = {}
state.update(self.state or {})
state["patch"] = patch
self._main_sequence.run_and_handle_errors(tool, options, state)
def _prepare_state(self, options, args, tool):
return None
class ProcessAttachmentsMixin(object):
def _fetch_list_of_patches_to_process(self, options, args, tool):
return map(lambda patch_id: tool.bugs.fetch_attachment(patch_id), args)
class ProcessBugsMixin(object):
def _fetch_list_of_patches_to_process(self, options, args, tool):
all_patches = []
for bug_id in args:
patches = tool.bugs.fetch_bug(bug_id).reviewed_patches()
_log.info("%s found on bug %s." % (pluralize("reviewed patch", len(patches)), bug_id))
all_patches += patches
if not all_patches:
_log.info("No reviewed patches found, looking for unreviewed patches.")
for bug_id in args:
patches = tool.bugs.fetch_bug(bug_id).patches()
_log.info("%s found on bug %s." % (pluralize("patch", len(patches)), bug_id))
all_patches += patches
return all_patches
class ProcessURLsMixin(object):
def _fetch_list_of_patches_to_process(self, options, args, tool):
all_patches = []
for url in args:
bug_id = urls.parse_bug_id(url)
if bug_id:
patches = tool.bugs.fetch_bug(bug_id).patches()
_log.info("%s found on bug %s." % (pluralize("patch", len(patches)), bug_id))
all_patches += patches
attachment_id = urls.parse_attachment_id(url)
if attachment_id:
all_patches += tool.bugs.fetch_attachment(attachment_id)
return all_patches
class CheckStyle(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
name = "check-style"
help_text = "Run check-webkit-style on the specified attachments"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
main_steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.ApplyPatch,
steps.CheckStyle,
]
class BuildAttachment(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
name = "build-attachment"
help_text = "Apply and build patches from bugzilla"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
main_steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.ApplyPatch,
steps.Build,
]
class BuildAndTestAttachment(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
name = "build-and-test-attachment"
help_text = "Apply, build, and test patches from bugzilla"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
main_steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.ApplyPatch,
steps.Build,
steps.RunTests,
]
class AbstractPatchApplyingCommand(AbstractPatchSequencingCommand):
prepare_steps = [
steps.EnsureLocalCommitIfNeeded,
steps.CleanWorkingDirectory,
steps.Update,
]
main_steps = [
steps.ApplyPatchWithLocalCommit,
]
long_help = """Updates the working copy.
Downloads and applies the patches, creating local commits if necessary."""
class ApplyAttachment(AbstractPatchApplyingCommand, ProcessAttachmentsMixin):
name = "apply-attachment"
help_text = "Apply an attachment to the local working directory"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
show_in_main_help = True
class ApplyFromBug(AbstractPatchApplyingCommand, ProcessBugsMixin):
name = "apply-from-bug"
help_text = "Apply reviewed patches from provided bugs to the local working directory"
argument_names = "BUGID [BUGIDS]"
show_in_main_help = True
class ApplyWatchList(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
name = "apply-watchlist"
help_text = "Applies the watchlist to the specified attachments"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
main_steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.ApplyPatch,
steps.ApplyWatchList,
]
long_help = """"Applies the watchlist to the specified attachments.
Downloads the attachment, applies it locally, runs the watchlist against it, and updates the bug with the result."""
class AbstractPatchLandingCommand(AbstractPatchSequencingCommand):
main_steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.ApplyPatch,
steps.ValidateChangeLogs,
steps.ValidateReviewer,
steps.Build,
steps.RunTests,
steps.Commit,
steps.ClosePatch,
steps.CloseBug,
]
long_help = """Checks to make sure builders are green.
Updates the working copy.
Applies the patch.
Builds.
Runs the layout tests.
Commits the patch.
Clears the flags on the patch.
Closes the bug if no patches are marked for review."""
class LandAttachment(AbstractPatchLandingCommand, ProcessAttachmentsMixin):
name = "land-attachment"
help_text = "Land patches from bugzilla, optionally building and testing them first"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
show_in_main_help = True
class LandFromBug(AbstractPatchLandingCommand, ProcessBugsMixin):
name = "land-from-bug"
help_text = "Land all patches on the given bugs, optionally building and testing them first"
argument_names = "BUGID [BUGIDS]"
show_in_main_help = True
class LandFromURL(AbstractPatchLandingCommand, ProcessURLsMixin):
name = "land-from-url"
help_text = "Land all patches on the given URLs, optionally building and testing them first"
argument_names = "URL [URLS]"
class ValidateChangelog(AbstractSequencedCommand):
name = "validate-changelog"
help_text = "Validate that the ChangeLogs and reviewers look reasonable"
long_help = """Examines the current diff to see whether the ChangeLogs
and the reviewers listed in the ChangeLogs look reasonable.
"""
steps = [
steps.ValidateChangeLogs,
steps.ValidateReviewer,
]
class AbstractRolloutPrepCommand(AbstractSequencedCommand):
argument_names = "REVISION [REVISIONS] REASON"
def _commit_info(self, revision):
commit_info = self._tool.checkout().commit_info_for_revision(revision)
if commit_info and commit_info.bug_id():
# Note: Don't print a bug URL here because it will confuse the
# SheriffBot because the SheriffBot just greps the output
# of create-rollout for bug URLs. It should do better
# parsing instead.
_log.info("Preparing rollout for bug %s." % commit_info.bug_id())
else:
_log.info("Unable to parse bug number from diff.")
return commit_info
def _prepare_state(self, options, args, tool):
revision_list = []
for revision in str(args[0]).split():
if revision.isdigit():
revision_list.append(int(revision))
else:
raise ScriptError(message="Invalid svn revision number: " + revision)
revision_list.sort()
# We use the earliest revision for the bug info
earliest_revision = revision_list[0]
state = {
"revision": earliest_revision,
"revision_list": revision_list,
"reason": args[1],
}
commit_info = self._commit_info(earliest_revision)
if commit_info:
state["bug_id"] = commit_info.bug_id()
cc_list = sorted([party.bugzilla_email()
for party in commit_info.responsible_parties()
if party.bugzilla_email()])
# FIXME: We should used the list as the canonical representation.
state["bug_cc"] = ",".join(cc_list)
return state
class PrepareRollout(AbstractRolloutPrepCommand):
name = "prepare-rollout"
help_text = "Revert the given revision(s) in the working copy and prepare ChangeLogs with revert reason"
long_help = """Updates the working copy.
Applies the inverse diff for the provided revision(s).
Creates an appropriate rollout ChangeLog, including a trac link and bug link.
"""
steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.RevertRevision,
steps.PrepareChangeLogForRevert,
]
class CreateRollout(AbstractRolloutPrepCommand):
name = "create-rollout"
help_text = "Creates a bug to track the broken SVN revision(s) and uploads a rollout patch."
steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.RevertRevision,
steps.CreateBug,
steps.PrepareChangeLogForRevert,
steps.PostDiffForRevert,
]
def _prepare_state(self, options, args, tool):
state = AbstractRolloutPrepCommand._prepare_state(self, options, args, tool)
# Currently, state["bug_id"] points to the bug that caused the
# regression. We want to create a new bug that blocks the old bug
# so we move state["bug_id"] to state["bug_blocked"] and delete the
# old state["bug_id"] so that steps.CreateBug will actually create
# the new bug that we want (and subsequently store its bug id into
# state["bug_id"])
state["bug_blocked"] = state["bug_id"]
del state["bug_id"]
state["bug_title"] = "REGRESSION(r%s): %s" % (state["revision"], state["reason"])
state["bug_description"] = "%s broke the build:\n%s" % (urls.view_revision_url(state["revision"]), state["reason"])
# FIXME: If we had more context here, we could link to other open bugs
# that mention the test that regressed.
if options.parent_command == "sheriff-bot":
state["bug_description"] += """
This is an automatic bug report generated by the sheriff-bot. If this bug
report was created because of a flaky test, please file a bug for the flaky
test (if we don't already have one on file) and dup this bug against that bug
so that we can track how often these flaky tests case pain.
"Only you can prevent forest fires." -- Smokey the Bear
"""
return state
class Rollout(AbstractRolloutPrepCommand):
name = "rollout"
show_in_main_help = True
help_text = "Revert the given revision(s) in the working copy and optionally commit the revert and re-open the original bug"
long_help = """Updates the working copy.
Applies the inverse diff for the provided revision.
Creates an appropriate rollout ChangeLog, including a trac link and bug link.
Opens the generated ChangeLogs in $EDITOR.
Shows the prepared diff for confirmation.
Commits the revert and updates the bug (including re-opening the bug if necessary)."""
steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.RevertRevision,
steps.PrepareChangeLogForRevert,
steps.EditChangeLog,
steps.ConfirmDiff,
steps.Build,
steps.Commit,
steps.ReopenBugAfterRollout,
]
| bsd-3-clause |
pwong-mapr/private-hue | desktop/core/ext-py/Django-1.4.5/django/core/paginator.py | 94 | 5058 | from math import ceil
class InvalidPage(Exception):
pass
class PageNotAnInteger(InvalidPage):
pass
class EmptyPage(InvalidPage):
pass
class Paginator(object):
def __init__(self, object_list, per_page, orphans=0, allow_empty_first_page=True):
self.object_list = object_list
self.per_page = int(per_page)
self.orphans = int(orphans)
self.allow_empty_first_page = allow_empty_first_page
self._num_pages = self._count = None
def validate_number(self, number):
"Validates the given 1-based page number."
try:
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
if number > self.num_pages:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return number
def page(self, number):
"Returns a Page object for the given 1-based page number."
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return Page(self.object_list[bottom:top], number, self)
def _get_count(self):
"Returns the total number of objects, across all pages."
if self._count is None:
try:
self._count = self.object_list.count()
except (AttributeError, TypeError):
# AttributeError if object_list has no count() method.
# TypeError if object_list.count() requires arguments
# (i.e. is of type list).
self._count = len(self.object_list)
return self._count
count = property(_get_count)
def _get_num_pages(self):
"Returns the total number of pages."
if self._num_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self._num_pages = 0
else:
hits = max(1, self.count - self.orphans)
self._num_pages = int(ceil(hits / float(self.per_page)))
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
"""
Returns a 1-based range of pages for iterating through within
a template for loop.
"""
return range(1, self.num_pages + 1)
page_range = property(_get_page_range)
QuerySetPaginator = Paginator # For backwards-compatibility.
class Page(object):
def __init__(self, object_list, number, paginator):
self.object_list = object_list
self.number = number
self.paginator = paginator
def __repr__(self):
return '<Page %s of %s>' % (self.number, self.paginator.num_pages)
def __len__(self):
return len(self.object_list)
def __getitem__(self, index):
# The object_list is converted to a list so that if it was a QuerySet
# it won't be a database hit per __getitem__.
return list(self.object_list)[index]
# The following four methods are only necessary for Python <2.6
# compatibility (this class could just extend 2.6's collections.Sequence).
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def index(self, value):
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
return sum([1 for v in self if v == value])
# End of compatibility methods.
def has_next(self):
return self.number < self.paginator.num_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
def next_page_number(self):
return self.number + 1
def previous_page_number(self):
return self.number - 1
def start_index(self):
"""
Returns the 1-based index of the first object on this page,
relative to total objects in the paginator.
"""
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
return (self.paginator.per_page * (self.number - 1)) + 1
def end_index(self):
"""
Returns the 1-based index of the last object on this page,
relative to total objects found (hits).
"""
# Special case for the last page because there can be orphans.
if self.number == self.paginator.num_pages:
return self.paginator.count
return self.number * self.paginator.per_page
| apache-2.0 |
tsdmgz/ansible | lib/ansible/modules/network/ios/ios_interface.py | 19 | 14246 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_interface
version_added: "2.4"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage Interface on Cisco IOS network devices
description:
- This module provides declarative management of Interfaces
on Cisco IOS network devices.
notes:
- Tested against IOS 15.6
options:
name:
description:
- Name of the Interface.
required: true
description:
description:
- Description of Interface.
enabled:
description:
- Interface link status.
speed:
description:
- Interface link speed.
mtu:
description:
- Maximum size of transmit packet.
duplex:
description:
- Interface link status
default: auto
choices: ['full', 'half', 'auto']
tx_rate:
description:
- Transmit rate in bits per second (bps).
rx_rate:
description:
- Receiver rate in bits per second (bps).
neighbors:
description:
- Check the operational state of given interface C(name) for LLDP neighbor.
- The following suboptions are available.
suboptions:
host:
description:
- "LLDP neighbor host for given interface C(name)."
port:
description:
- "LLDP neighbor port to which given interface C(name) is connected."
aggregate:
description: List of Interfaces definitions.
delay:
description:
- Time in seconds to wait before checking for the operational state on remote
device. This wait is applicable for operational state argument which are
I(state) with values C(up)/C(down), I(tx_rate) and I(rx_rate).
default: 10
state:
description:
- State of the Interface configuration, C(up) means present and
operationally up and C(down) means present and operationally C(down)
default: present
choices: ['present', 'absent', 'up', 'down']
"""
EXAMPLES = """
- name: configure interface
ios_interface:
name: GigabitEthernet0/2
description: test-interface
speed: 100
duplex: half
mtu: 512
- name: remove interface
ios_interface:
name: Loopback9
state: absent
- name: make interface up
ios_interface:
name: GigabitEthernet0/2
enabled: True
- name: make interface down
ios_interface:
name: GigabitEthernet0/2
enabled: False
- name: Check intent arguments
ios_interface:
name: GigabitEthernet0/2
state: up
tx_rate: ge(0)
rx_rate: le(0)
- name: Check neighbors intent arguments
ios_interface:
name: Gi0/0
neighbors:
- port: eth0
host: netdev
- name: Config + intent
ios_interface:
name: GigabitEthernet0/2
enabled: False
state: down
- name: Add interface using aggregate
ios_interface:
aggregate:
- { name: GigabitEthernet0/1, mtu: 256, description: test-interface-1 }
- { name: GigabitEthernet0/2, mtu: 516, description: test-interface-2 }
duplex: full
speed: 100
state: present
- name: Delete interface using aggregate
ios_interface:
aggregate:
- name: Loopback9
- name: Loopback10
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- interface GigabitEthernet0/2
- description test-interface
- duplex half
- mtu 512
"""
import re
from copy import deepcopy
from time import sleep
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import exec_command
from ansible.module_utils.ios import get_config, load_config
from ansible.module_utils.ios import ios_argument_spec, check_args
from ansible.module_utils.netcfg import NetworkConfig
from ansible.module_utils.network_common import conditional, remove_default_spec
def validate_mtu(value, module):
if value and not 64 <= int(value) <= 9600:
module.fail_json(msg='mtu must be between 64 and 9600')
def validate_param_values(module, obj, param=None):
if param is None:
param = module.params
for key in obj:
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
validator(param.get(key), module)
def parse_shutdown(configobj, name):
cfg = configobj['interface %s' % name]
cfg = '\n'.join(cfg.children)
match = re.search(r'shutdown', cfg, re.M)
if match:
return True
else:
return False
def parse_config_argument(configobj, name, arg=None):
cfg = configobj['interface %s' % name]
cfg = '\n'.join(cfg.children)
match = re.search(r'%s (.+)$' % arg, cfg, re.M)
if match:
return match.group(1)
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
return None
def add_command_to_interface(interface, cmd, commands):
if interface not in commands:
commands.append(interface)
commands.append(cmd)
def map_config_to_obj(module):
config = get_config(module)
configobj = NetworkConfig(indent=1, contents=config)
match = re.findall(r'^interface (\S+)', config, re.M)
if not match:
return list()
instances = list()
for item in set(match):
obj = {
'name': item,
'description': parse_config_argument(configobj, item, 'description'),
'speed': parse_config_argument(configobj, item, 'speed'),
'duplex': parse_config_argument(configobj, item, 'duplex'),
'mtu': parse_config_argument(configobj, item, 'mtu'),
'disable': True if parse_shutdown(configobj, item) else False,
'state': 'present'
}
instances.append(obj)
return instances
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
validate_param_values(module, item, item)
d = item.copy()
if d['enabled']:
d['disable'] = False
else:
d['disable'] = True
obj.append(d)
else:
params = {
'name': module.params['name'],
'description': module.params['description'],
'speed': module.params['speed'],
'mtu': module.params['mtu'],
'duplex': module.params['duplex'],
'state': module.params['state'],
'delay': module.params['delay'],
'tx_rate': module.params['tx_rate'],
'rx_rate': module.params['rx_rate'],
'neighbors': module.params['neighbors']
}
validate_param_values(module, params)
if module.params['enabled']:
params.update({'disable': False})
else:
params.update({'disable': True})
obj.append(params)
return obj
def map_obj_to_commands(updates):
commands = list()
want, have = updates
args = ('speed', 'description', 'duplex', 'mtu')
for w in want:
name = w['name']
disable = w['disable']
state = w['state']
obj_in_have = search_obj_in_list(name, have)
interface = 'interface ' + name
if state == 'absent' and obj_in_have:
commands.append('no ' + interface)
elif state in ('present', 'up', 'down'):
if obj_in_have:
for item in args:
candidate = w.get(item)
running = obj_in_have.get(item)
if candidate != running:
if candidate:
cmd = item + ' ' + str(candidate)
add_command_to_interface(interface, cmd, commands)
if disable and not obj_in_have.get('disable', False):
add_command_to_interface(interface, 'shutdown', commands)
elif not disable and obj_in_have.get('disable', False):
add_command_to_interface(interface, 'no shutdown', commands)
else:
commands.append(interface)
for item in args:
value = w.get(item)
if value:
commands.append(item + ' ' + str(value))
if disable:
commands.append('no shutdown')
return commands
def check_declarative_intent_params(module, want, result):
failed_conditions = []
have_neighbors = None
for w in want:
want_state = w.get('state')
want_tx_rate = w.get('tx_rate')
want_rx_rate = w.get('rx_rate')
want_neighbors = w.get('neighbors')
if want_state not in ('up', 'down') and not want_tx_rate and not want_rx_rate and not want_neighbors:
continue
if result['changed']:
sleep(w['delay'])
command = 'show interfaces %s' % w['name']
rc, out, err = exec_command(module, command)
if rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc)
if want_state in ('up', 'down'):
match = re.search(r'%s (\w+)' % 'line protocol is', out, re.M)
have_state = None
if match:
have_state = match.group(1)
if have_state is None or not conditional(want_state, have_state.strip()):
failed_conditions.append('state ' + 'eq(%s)' % want_state)
if want_tx_rate:
match = re.search(r'%s (\d+)' % 'output rate', out, re.M)
have_tx_rate = None
if match:
have_tx_rate = match.group(1)
if have_tx_rate is None or not conditional(want_tx_rate, have_tx_rate.strip(), cast=int):
failed_conditions.append('tx_rate ' + want_tx_rate)
if want_rx_rate:
match = re.search(r'%s (\d+)' % 'input rate', out, re.M)
have_rx_rate = None
if match:
have_rx_rate = match.group(1)
if have_rx_rate is None or not conditional(want_rx_rate, have_rx_rate.strip(), cast=int):
failed_conditions.append('rx_rate ' + want_rx_rate)
if want_neighbors:
have_host = []
have_port = []
if have_neighbors is None:
rc, have_neighbors, err = exec_command(module, 'show lldp neighbors detail')
if rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc)
if have_neighbors:
lines = have_neighbors.strip().split('Local Intf: ')
for line in lines:
field = line.split('\n')
if field[0].strip() == w['name']:
for item in field:
if item.startswith('System Name:'):
have_host.append(item.split(':')[1].strip())
if item.startswith('Port Description:'):
have_port.append(item.split(':')[1].strip())
for item in want_neighbors:
host = item.get('host')
port = item.get('port')
if host and host not in have_host:
failed_conditions.append('host ' + host)
if port and port not in have_port:
failed_conditions.append('port ' + port)
return failed_conditions
def main():
""" main entry point for module execution
"""
neighbors_spec = dict(
host=dict(),
port=dict()
)
element_spec = dict(
name=dict(),
description=dict(),
speed=dict(),
mtu=dict(),
duplex=dict(choices=['full', 'half', 'auto']),
enabled=dict(default=True, type='bool'),
tx_rate=dict(),
rx_rate=dict(),
neighbors=dict(type='list', elements='dict', options=neighbors_spec),
delay=dict(default=10, type='int'),
state=dict(default='present',
choices=['present', 'absent', 'up', 'down'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(ios_argument_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have))
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
failed_conditions = check_declarative_intent_params(module, want, result)
if failed_conditions:
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
jamesblunt/IbPy | ib/ext/Execution.py | 9 | 2355 | #!/usr/bin/env python
""" generated source for module Execution """
#
# Original file copyright original author(s).
# This file copyright Troy Melhase, troy@gci.net.
#
# WARNING: all changes to this file will be lost.
from ib.lib.overloading import overloaded
#
# * Execution.java
# *
#
# package: com.ib.client
class Execution(object):
""" generated source for class Execution """
m_orderId = 0
m_clientId = 0
m_execId = ""
m_time = ""
m_acctNumber = ""
m_exchange = ""
m_side = ""
m_shares = 0
m_price = float()
m_permId = 0
m_liquidation = 0
m_cumQty = 0
m_avgPrice = float()
m_orderRef = ""
m_evRule = ""
m_evMultiplier = float()
@overloaded
def __init__(self):
""" generated source for method __init__ """
self.m_orderId = 0
self.m_clientId = 0
self.m_shares = 0
self.m_price = 0
self.m_permId = 0
self.m_liquidation = 0
self.m_cumQty = 0
self.m_avgPrice = 0
self.m_evMultiplier = 0
@__init__.register(object, int, int, str, str, str, str, str, int, float, int, int, int, float, str, str, float)
def __init___0(self, p_orderId, p_clientId, p_execId, p_time, p_acctNumber, p_exchange, p_side, p_shares, p_price, p_permId, p_liquidation, p_cumQty, p_avgPrice, p_orderRef, p_evRule, p_evMultiplier):
""" generated source for method __init___0 """
self.m_orderId = p_orderId
self.m_clientId = p_clientId
self.m_execId = p_execId
self.m_time = p_time
self.m_acctNumber = p_acctNumber
self.m_exchange = p_exchange
self.m_side = p_side
self.m_shares = p_shares
self.m_price = p_price
self.m_permId = p_permId
self.m_liquidation = p_liquidation
self.m_cumQty = p_cumQty
self.m_avgPrice = p_avgPrice
self.m_orderRef = p_orderRef
self.m_evRule = p_evRule
self.m_evMultiplier = p_evMultiplier
def __eq__(self, p_other):
""" generated source for method equals """
l_bRetVal = False
if p_other is None:
l_bRetVal = False
elif self is p_other:
l_bRetVal = True
else:
l_theOther = p_other
l_bRetVal = self.m_execId == l_theOther.m_execId
return l_bRetVal
| bsd-3-clause |
edespino/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/filerep/Filerep_Resync/fault/genFault.py | 7 | 4002 | """
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tinctest
import unittest2 as unittest
from time import sleep
from tinctest.lib import local_path
from gppylib.commands.base import Command
from mpp.lib.PSQL import PSQL
from mpp.lib.config import GPDBConfig
from subprocess import Popen, PIPE
'''
Creates Faults for the scenario
'''
class Fault(Command):
def __init__(self, cmd = None):
Command.__init__(self, 'Running fault command', cmd)
def _run_sys_cmd(self, cmd_str, validate = False):
'''helper function to run a sys cmd'''
tinctest.logger.info("execute:" +cmd_str)
cmd = Fault(cmd_str)
cmd.run(validateAfter = validate)
return cmd.get_results()
def get_host_port_mapping(self,role):
"""
Returns a dictionary having key as hostname and value as a list of port nos.
For e.g {'vm9':['22001','22000'] , 'vm10':{'42000','42001'}...}
"""
config = GPDBConfig()
no_of_segments = config.get_countprimarysegments()
hosts_dict = {}
counter = 0
while counter < no_of_segments:
(host,port) = config.get_hostandport_of_segment(counter,role)
if hosts_dict.has_key(host):
hosts_dict[host].append(port)
else:
hosts_dict[host] = list()
hosts_dict[host].append(port)
counter += 1
return hosts_dict
def kill_processes_with_role(self,role = 'm'):
'''kills all the data segment processes'''
hosts_dict = self.get_host_port_mapping(role)
# Kill all the postgres intsances of the concerned segment role running
# on a particular host using the port nos.
for host in hosts_dict:
ports_list = hosts_dict[host]
# Create a structure ('port1'|'port2'|...) from the list of ports
# this is further passed as pattern to the grep expression.
segment_ports = "("
for port in ports_list[:-1]:
segment_ports += port + "|"
segment_ports = segment_ports + ports_list[len(ports_list)-1] + ")"
sys_cmd = "gpssh -h %s -e ' ps aux | egrep '\\\''postgres -D.*-p %s'\\\'' | awk '\\\''{print \"kill -9 \"$2}'\\\'' | sh' "%(host,segment_ports)
tinctest.logger.info("kill process command : %s"%sys_cmd)
result = self._run_sys_cmd(sys_cmd)
def run_recovery(self):
'''Runs the incremental recovery'''
tinctest.logger.info('Invoking gprecoverseg to bring up the mirrors')
cmd_str = "gprecoverseg -a"
result = self._run_sys_cmd(cmd_str, True)
'''sleep introduced so that the gpdb tables are updated before querying'''
tinctest.logger.info('Delaying next step for 30 secs..')
sleep(30)
return result.stdout
def are_mirrors_up(self):
'''Checks if the mirrors are up or not after recovery'''
tinctest.logger.info('Checking if the mirrors are up or not')
cmd_str = "select 'down_segment' from gp_segment_configuration where preferred_role = 'm' and status = 'd'"
out = PSQL.run_sql_command(cmd_str).count('down_segment') - 1
tinctest.logger.info(str(out)+' down segments found.')
if out > 0:
return False
else:
return True
| apache-2.0 |
jamesiter/JimV-N | models/event_process.py | 1 | 9567 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import libvirt
from models.initialize import guest_event_emit
from models import Guest
__author__ = 'James Iter'
__date__ = '2017/6/15'
__contact__ = 'james.iter.cn@gmail.com'
__copyright__ = '(c) 2017 by James Iter.'
class EventProcess(object):
conn = None
guest_callbacks = list()
VIR_DOMAIN_EVENT_SHUTDOWN_GUEST = 1
VIR_DOMAIN_EVENT_SHUTDOWN_HOST = 2
def __init__(self):
pass
@classmethod
def guest_event_callback(cls, conn, dom, event, detail, opaque):
if not isinstance(dom, libvirt.virDomain):
# 跳过已经不再本宿主机的 guest
return
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED and detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_MIGRATED:
# Guest 从本宿主机迁出完成后不做状态通知
return
Guest.guest_state_report(dom=dom)
if event == libvirt.VIR_DOMAIN_EVENT_DEFINED:
if detail == libvirt.VIR_DOMAIN_EVENT_DEFINED_ADDED:
# 创建出一个 Guest 后被触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_DEFINED_UPDATED:
# 更新 Guest 配置后被触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_DEFINED_RENAMED:
# 变更 Guest 名称,待测试。猜测为 Guest 变更为新名称时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_DEFINED_FROM_SNAPSHOT:
# Config was restored from a snapshot 待测试。猜测为,依照一个 Guest 快照的当前配置,创建一个新的 Guest
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_UNDEFINED:
if detail == libvirt.VIR_DOMAIN_EVENT_UNDEFINED_REMOVED:
# 删除一个 Guest 定义
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_UNDEFINED_RENAMED:
# 变更 Guest 名称,待测试。猜测为 Guest 旧名称消失时触发
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
if detail == libvirt.VIR_DOMAIN_EVENT_STARTED_BOOTED:
# 正常启动
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STARTED_MIGRATED:
# Guest 从另一个宿主机迁入时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STARTED_RESTORED:
# 从一个状态文件中恢复 Guest
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STARTED_FROM_SNAPSHOT:
# 从快照中恢复 Guest 时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STARTED_WAKEUP:
# 唤醒时触发,待测试。
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
if detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_PAUSED:
# 管理员暂停 Guest 时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED:
# 为了在线迁移,临时暂停当前准备迁出的 Guest 时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_IOERROR:
# 磁盘 IO 错误时,被暂停时触发,待测试
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_WATCHDOG:
# 触发看门狗时触发,待测试
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_RESTORED:
# 从暂停的 Guest 状态文件恢复时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_FROM_SNAPSHOT:
# 从暂停的 Guest 快照恢复时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR:
# 调用 libvirt API 失败后触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY:
# 以 post-copy 模式迁移 Guest,被暂停时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY_FAILED:
# post-copy 模式迁移失败时触发
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
if detail == libvirt.VIR_DOMAIN_EVENT_RESUMED_UNPAUSED:
# 取消暂停,正常恢复时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_RESUMED_MIGRATED:
# Guest 迁移的目标宿主机,迁移完成时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_RESUMED_FROM_SNAPSHOT:
# 从快照恢复时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_RESUMED_POSTCOPY:
# 恢复,但迁移任然在 post-copy 模式下进行,待测试
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
if detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_SHUTDOWN:
# 正常关机时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_DESTROYED:
# 从宿主机中强行断开 Guest 电源时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_CRASHED:
# Guest 崩溃时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_MIGRATED:
# Guest 从本宿主机迁出完成后触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_SAVED:
# 保存 Guest 为状态文件后触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_FAILED:
# 宿主机上的模拟器或管理器失败时触发
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT:
# 加载完离线快照后触发,待测试
pass
elif event == libvirt.VIR_DOMAIN_EVENT_SHUTDOWN:
if detail == libvirt.VIR_DOMAIN_EVENT_SHUTDOWN_FINISHED:
# Guest 正常关机后触发
pass
elif detail == cls.VIR_DOMAIN_EVENT_SHUTDOWN_GUEST:
# Guest 自己触发关机信号后触发(即,此时硬件还运行着,系统已经被关闭。有别于 poweroff),待测试
pass
elif detail == cls.VIR_DOMAIN_EVENT_SHUTDOWN_HOST:
# 从宿主机通过信号方式关闭 Guest 后触发
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_PMSUSPENDED:
if detail == libvirt.VIR_DOMAIN_EVENT_PMSUSPENDED_MEMORY:
# Guest 的内存被电源管理器暂停
pass
elif detail == libvirt.VIR_DOMAIN_EVENT_PMSUSPENDED_DISK:
# Guest 的磁盘被电源管理器暂停
pass
else:
pass
elif event == libvirt.VIR_DOMAIN_EVENT_CRASHED:
if detail == libvirt.VIR_DOMAIN_EVENT_CRASHED_PANICKED:
# Guest 奔溃时触发
pass
else:
pass
else:
pass
@staticmethod
def guest_event_migration_iteration_callback(conn, dom, iteration, opaque):
try:
migrate_info = dict()
migrate_info['type'], migrate_info['time_elapsed'], migrate_info['time_remaining'], \
migrate_info['data_total'], migrate_info['data_processed'], migrate_info['data_remaining'], \
migrate_info['mem_total'], migrate_info['mem_processed'], migrate_info['mem_remaining'], \
migrate_info['file_total'], migrate_info['file_processed'], migrate_info['file_remaining'] = \
dom.jobInfo()
guest_event_emit.migrating(uuid=dom.UUIDString(), migrating_info=migrate_info)
except libvirt.libvirtError as e:
pass
@staticmethod
def guest_event_device_added_callback(conn, dom, dev, opaque):
Guest.update_xml(dom=dom)
@staticmethod
def guest_event_device_removed_callback(conn, dom, dev, opaque):
Guest.update_xml(dom=dom)
@classmethod
def guest_event_register(cls):
cls.conn = libvirt.open()
cls.conn.domainEventRegister(cls.guest_event_callback, None)
# 参考地址:https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainEventID
cls.guest_callbacks.append(cls.conn.domainEventRegisterAny(
None, libvirt.VIR_DOMAIN_EVENT_ID_MIGRATION_ITERATION,
cls.guest_event_migration_iteration_callback, None))
cls.guest_callbacks.append(cls.conn.domainEventRegisterAny(
None, libvirt.VIR_DOMAIN_EVENT_ID_DEVICE_ADDED,
cls.guest_event_device_added_callback, None))
cls.guest_callbacks.append(cls.conn.domainEventRegisterAny(
None, libvirt.VIR_DOMAIN_EVENT_ID_DEVICE_REMOVED,
cls.guest_event_device_removed_callback, None))
@classmethod
def guest_event_deregister(cls):
cls.conn.domainEventDeregister(cls.guest_event_callback)
for eid in cls.guest_callbacks:
cls.conn.domainEventDeregisterAny(eid)
| gpl-3.0 |
bunop/pyEnsemblRest | ensemblrest/exceptions.py | 2 | 2231 | """
This file is part of pyEnsemblRest.
Copyright (C) 2013-2016, Steve Moss
pyEnsemblRest is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyEnsemblRest is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyEnsemblRest. If not, see <http://www.gnu.org/licenses/>.
Implements custom exceptions for the EnsEMBL REST API
"""
from .ensembl_config import ensembl_http_status_codes
class EnsemblRestError(Exception):
"""
Generic error class, catch-all for most EnsemblRest issues.
Special cases are handled by EnsemblRestRateLimitError and EnsemblRestServiceUnavailable.
"""
def __init__(self, msg, error_code=None, rate_reset=None, rate_limit=None, rate_remaining=None, retry_after=None):
self.error_code = error_code
if error_code is not None and error_code in ensembl_http_status_codes:
msg = 'EnsEMBL REST API returned a %s (%s): %s' % \
(error_code, ensembl_http_status_codes[error_code][0], msg)
super(EnsemblRestError, self).__init__(msg)
@property
def msg(self):
return self.args[0]
class EnsemblRestRateLimitError(EnsemblRestError):
"""
Raised when you've hit a rate limit.
The amount of seconds to retry your request in will be appended to the message.
"""
def __init__(self, msg, error_code=None, rate_reset=None, rate_limit=None, rate_remaining=None, retry_after=None):
if isinstance(retry_after, float):
msg = '%s (Rate limit hit: Retry after %d seconds)' % (msg, retry_after)
EnsemblRestError.__init__(self, msg, error_code=error_code)
class EnsemblRestServiceUnavailable(EnsemblRestError):
"""
Raised when the service is down.
"""
pass
| gpl-3.0 |
night-ghost/ardupilot | Tools/scripts/configure_all.py | 7 | 1110 | #!/usr/bin/env python
"""
script to run configre for all hwdef.dat, to check for syntax errors
"""
import os
import shutil
import subprocess
import sys
import fnmatch
board_pattern = '*'
# allow argument for pattern of boards to build
if len(sys.argv)>1:
board_pattern = sys.argv[1]
os.environ['PYTHONUNBUFFERED'] = '1'
def get_board_list():
'''add boards based on existance of hwdef-bl.dat in subdirectories for ChibiOS'''
board_list = []
dirname, dirlist, filenames = next(os.walk('libraries/AP_HAL_ChibiOS/hwdef'))
for d in dirlist:
hwdef = os.path.join(dirname, d, 'hwdef.dat')
if os.path.exists(hwdef):
board_list.append(d)
return board_list
def run_program(cmd_list):
print("Running (%s)" % " ".join(cmd_list))
retcode = subprocess.call(cmd_list)
if retcode != 0:
print("Build failed: %s" % ' '.join(cmd_list))
sys.exit(1)
for board in get_board_list():
if not fnmatch.fnmatch(board, board_pattern):
continue
print("Building for %s" % board)
run_program(["./waf", "configure", "--board", board])
| gpl-3.0 |
tjamet/dotfiles | vim/eclim/autoload/eclim/python/rope/contrib/fixmodnames.py | 91 | 2256 | """Fix the name of modules
This module is useful when you want to rename many of the modules in
your project. That can happen specially when you want to change their
naming style.
For instance::
fixer = FixModuleNames(project)
changes = fixer.get_changes(fixer=str.lower)
project.do(changes)
Here it renames all modules and packages to use lower-cased chars.
You can tell it to use any other style by using the ``fixer``
argument.
"""
from rope.base import change, taskhandle
from rope.contrib import changestack
from rope.refactor import rename
class FixModuleNames(object):
def __init__(self, project):
self.project = project
def get_changes(self, fixer=str.lower,
task_handle=taskhandle.NullTaskHandle()):
"""Fix module names
`fixer` is a function that takes and returns a `str`. Given
the name of a module, it should return the fixed name.
"""
stack = changestack.ChangeStack(self.project, 'Fixing module names')
jobset = task_handle.create_jobset('Fixing module names',
self._count_fixes(fixer) + 1)
try:
while True:
for resource in self._tobe_fixed(fixer):
jobset.started_job(resource.path)
renamer = rename.Rename(self.project, resource)
changes = renamer.get_changes(fixer(self._name(resource)))
stack.push(changes)
jobset.finished_job()
break
else:
break
finally:
jobset.started_job('Reverting to original state')
stack.pop_all()
jobset.finished_job()
return stack.merged()
def _count_fixes(self, fixer):
return len(list(self._tobe_fixed(fixer)))
def _tobe_fixed(self, fixer):
for resource in self.project.pycore.get_python_files():
modname = self._name(resource)
if modname != fixer(modname):
yield resource
def _name(self, resource):
modname = resource.name.rsplit('.', 1)[0]
if modname == '__init__':
modname = resource.parent.name
return modname
| gpl-3.0 |
petrvanblokland/Xierpa3 | xierpa3/sites/examples/helloworldblueprint/make.py | 1 | 9548 | # -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
# xierpa server
# Copyright (c) 2014+ buro@petr.com, www.petr.com, www.xierpa.com
#
# X I E R P A 3
# Distribution by the MIT License.
#
# -----------------------------------------------------------------------------
#
# make.py
#
import webbrowser
from xierpa3.toolbox.transformer import TX
from xierpa3.components import Theme, Page, Column
from xierpa3.builders.cssbuilder import CssBuilder
from xierpa3.builders.htmlbuilder import HtmlBuilder
from xierpa3.attributes import Em, Margin, Perc, Color
from xierpa3.descriptors.media import Media
from xierpa3.descriptors.blueprint import BluePrint
BODYFAMILY = 'Impact, Verdana, sans'
CAPTIONFAMILY = 'Georgia, serif'
class HelloWorldBluePrintText(Column):
# Get Constants->Config as class variable, so inheriting classes can redefine values.
C = Theme.C
# The BluePrint defined the parameters for the component. They can be adjusted by parent
# components who implement this component on a page, or by inheriting classes that
# only want to redefine part of the parameters. The actual self.style is created during
# compilation of the start (not during run-time) as cascading result of all parent BLUEPRINT
# dictionaries.
# Furthermore the documentation builder is using the BluePrint instance to visualize
# the interface of each component available.
#
BLUEPRINT = BluePrint(
# Attribute, documentation about the attribute.
# Main div block
bodyFamily=BODYFAMILY, doc_bodyFamily=u'Body font family of this example. For now, in this example we only use system fonts.',
fontSize=Em(4), doc_fontSize=u'Font size of the body text, relative to the body font size.',
lineHeight=Em(1.2), doc_lineHeight=u'Line height (leading) of body text.',
textAlign=C.CENTER, doc_textAlign=u'Horizontal alignment of text.',
color=Color('yellow'), doc_color=u'Color of the main column.',
colorTablet=Color('orange'), doc_colorTablet=u'Text color of the main column for tablet.',
colorMobile=Color('red'), doc_colorMobile=u'Text color of the main column for mobile.',
backgroundColor=Color('red'), doc_backgroundColor=u'Background color of the main column',
backgroundColorTablet=Color('green'), doc_backgroundColorTablet=u'Background color of the main column for tablet.',
backgroundColorMobile=Color('#BBB'), doc_backgroundColorMobile=u'Background color of the main column for mobile.',
paddingTop=Em(0.5), doc_paddingTop=u'Padding on top of the page',
paddingBottom=Em(0.5), doc_paddingBottom=u'Padding at bottom of the page.',
margin=Margin(0, C.AUTO), doc_margin=u'Page margin of the column. In this case, horizontally centered on the page.',
width=Perc(80), doc_width=u'Width of the main column. Default is 80% os the page with.',
maxWidth=700, doc_maxWidth=u'Maximal width of the column.',
minWidth=300, doc_minWidth=u'Minimal width of the column.',
# Caption
captionFont=CAPTIONFAMILY, doc_captionFont=u'Caption font family for this example. For now, in this example we only use system fonts.',
captionColor=Color('#888'), doc_captionColor=u'Color of the caption.',
captionPaddingTop=Em(0.2), doc_captionPaddingTop=u'Padding top of the caption.',
)
def buildBlock(self, b):
u"""Build the column, using the parameters from the class BluePrint instance.
This dictionary is builds the **self.style()** by cascading all BlurPrint instances
of the parent classes. The result is a complete specification of all the parameters
the direction the style and behavior of this component."""
s = self.style
b.div(class_=self.getClassName(), color=s.color, margin=s.margin,
width=s.width, maxwidth=s.maxWidth, minwidth=s.minWidth, backgroundcolor=s.backgroundColor,
paddingtop=s.paddingTop, paddingbottom=s.paddingBottom, fontfamily=s.bodyFamily,
fontsize=s.fontSize, textalign=s.textAlign, lineheight=s.lineHeight,
# Now define the @media parameters, where they belong: inside the definition of the element.
# The media parameters are collected and sorted for output at the end of the CSS document.
media=(
# Example for table, show lighter background, change color of text and smaller size.
Media(min=self.C.M_TABLET_MIN, max=self.C.M_TABLET_MAX, backgroundcolor=s.backgroundColorTablet,
color=s.colorTablet, fontsize=Em(3), width=self.C.AUTO, float=self.C.NONE),
# For mobile, even more lighter background, change color of text and smaller size.
Media(max=self.C.M_MOBILE_MAX, backgroundcolor=s.backgroundColorMobile,
color=s.colorMobile, fontsize=Em(2), width=self.C.AUTO, float=self.C.NONE)
))
b.text('Hello parametric world.')
# One of the advantages of using a real programming language to generate
# HTML/CSS code, is that repetitions can be written as a loop. Not necessary
# fewer lines, but more expandable and less redundant distribution of
# knowledge in the code.
data = (
# class, minWidth, maxWidth, text
('c1', self.C.M_DESKTOP_MIN, None, 'Responsive desktop mode.' ),
('c2', self.C.M_TABLET_MIN, self.C.M_TABLET_MAX, 'Responsive tablet mode.' ),
('c3', None, self.C.M_MOBILE_MAX, 'Responsive mobile mode..' ),
)
for class_, minWidth, maxWidth, text in data:
b.div(class_=class_, display=self.C.NONE, fontsize=Em(0.7), color=Color(self.C.WHITE),
media=Media(min=minWidth, max=maxWidth, display=self.C.BLOCK))
b.text(text)
b._div()
b._div()
b.div(class_=self.C.CLASS_CAPTION, color=s.captionColor, margin=Margin(0, self.C.AUTO),
width=Perc(100), maxwidth=700, minwidth=300,
paddingtop=s.captionPaddingTop, fontfamily=s.captionFont, fontsize=Em(0.9),
textalign=s.textAlign, fontstyle=self.C.ITALIC,
# Change background color of the line to indicate the illustrate the difference for mobile size.
#media=Media(max=self.M_MOBILE_MAX, backgroundcolor='yellow', color='#222', fontsize=Em(1),
# margin=0, width=Perc(100),
)
b.text('Responsive page, generated by Xierpa3. Using BluePrint parameters.')
b._div()
class HelloWorldBluePrint(Theme):
u"""The **HelloWorldResponsive** class implements a basic "Hello, world!" page, running as
batch process, saving the result as an HTML file. Double click the generated file or
drag to a browser see the result."""
TITLE = u'The responsive "Hello, world!" page using BluePrint styling.' # Use as title of window.
def baseComponents(self):
u"""Create a theme site with just one single template home page. Answer a list
of page instances that are used as templates for this site."""
# Create an instance (=object) of the text component to be placed on the page.
hw = HelloWorldBluePrintText()
# Create an instance (=object) of the page, containing the "hw" component.
# The class is also the page name in the url.
# Components can be a single component or a list of components.
homePage = Page(class_=self.C.TEMPLATE_INDEX, components=hw, title=self.TITLE)
# Answer a list of types of pages for this site.
return [homePage]
def make(self, root):
u"""The instance of this class builds CSS and HTML files at the optional path **root**.
If not defined, then the default ~/Desktop/Xierpa3Examples/[component.name] is used as export path,
as set by Builder.DEFAULT_ROOTPATH"""
# Create an "instance" (=object) of type "HelloWorldLayout". The type (=class) defines
# the behavior of the object that is made by calling the class.
if root is None:
root = TX.asDir(self.C.PATH_EXAMPLES) # Expand user path to full directory path.
# C S S
# Create the main CSS builder instance to build the SASS/CSS part of the site.
cssBuilder = CssBuilder()
# Compile (=build) the SCSS to CSS and save the file in "css/style.css".
self.build(cssBuilder) # Build from entire site theme, not just from template. Result is stream in builder.
cssBuilder.save(self, root)
# H T M L
# Create the main HTML builder instance to build the HTML part of the site.
htmlBuilder = HtmlBuilder()
# Compile the HTML and save the resulting HTML file in "helloWorld.html".
self.build(htmlBuilder) # Build from entire site theme, not just from template. Result is stream in builder.
# Answer the path, so we can directly open the file with a browser.
return htmlBuilder.save(self, root)
if __name__ == '__main__':
# This construction "__name__ == '__main__'" makes this Python file only
# be executed when called in direct mode, such as "python make.py" in the terminal.
# Since no rootPath is added to make(), the file export is in ~/Desktop/Xierpa3Examples/HelloWorldBluePrint/
site = HelloWorldBluePrint()
path = site.make()
webbrowser.open(path) # Open file path with browser
| mit |
DhruvSarma/retrogamelib | BubbMan/gamelib/objects.py | 3 | 10795 | import pygame
import os
import sys
import math
sys.path.insert(0, "..")
from retrogamelib import gameobject
from retrogamelib import button
from retrogamelib.constants import *
from retrogamelib.util import *
class Collidable(gameobject.Object):
def __init__(self):
gameobject.Object.__init__(self, self.groups)
self.offsetx = 0
self.offsety = 0
self.always_update = False
def draw(self, surface, camera):
surface.blit(self.image, (self.rect.x - camera.x + self.offsetx,
self.rect.y - camera.y + self.offsety))
def on_collision(self, dx, dy):
pass
def get_surrounding(self, pos):
center = (pos[0], pos[1])
topleft = (pos[0]-1, pos[1]-1)
midtop = (pos[0], pos[1]-1)
topright = (pos[0]+1, pos[1]-1)
midleft = (pos[0]-1, pos[1])
midright = (pos[0]+1, pos[1])
bottomleft = (pos[0]-1, pos[1]+1)
midbottom = (pos[0], pos[1]+1)
bottomright = (pos[0]+1, pos[1]+1)
return (topleft, midtop, topright, midleft, midright,
bottomleft, midbottom, bottomright, center)
def move(self, dx, dy, tiles):
sides = [0, 0, 0, 0]
tile_pos = (self.rect.centerx//16, self.rect.centery//16)
coltiles = []
for pos in self.get_surrounding(tile_pos):
if pos[0] > -1 and pos[0] < len(tiles[0]) and \
pos[1] > -1 and pos[1] < len(tiles):
tile = tiles[pos[1]][pos[0]]
if isinstance(tile, Platform):
coltiles.append(tile)
if dx != 0:
self.__move(dx, 0, coltiles)
if dy != 0:
self.__move(0, dy, coltiles)
def __move(self, dx, dy, tiles):
self.rect.x += dx
self.rect.y += dy
collided = False
for tile in tiles:
if self.rect.colliderect(tile.rect):
if tile.slant == 0:
self.rect_respond(dx, dy, tile)
else:
self.slant_respond(dx, dy, tile)
def rect_respond(self, dx, dy, tile):
if dx > 0:
self.rect.right = tile.rect.left
elif dx < 0:
self.rect.left = tile.rect.right
if dy > 0:
self.rect.bottom = tile.rect.top
elif dy < 0:
self.rect.top = tile.rect.bottom
self.on_collision(dx, dy)
def slant_respond(self, dx, dy, tile):
top = None
if tile.slant < 0:
if self.rect.left >= tile.rect.left:
x = self.rect.left - tile.rect.left
top = tile.rect.top+x-1
if tile.slant > 0:
if self.rect.right <= tile.rect.right:
x = tile.rect.right - self.rect.right
top = tile.rect.top+x-1
if top:
if self.rect.bottom > top:
self.rect.bottom = top
self.on_collision(0, dy)
class Player(Collidable):
def __init__(self):
Collidable.__init__(self)
self.right_images = [
load_image("data/bubbman-1.png"),
load_image("data/bubbman-2.png"),
load_image("data/bubbman-4.png"),
load_image("data/bubbman-5.png"),
]
self.left_images = []
for img in self.right_images:
self.left_images.append(pygame.transform.flip(img, 1, 0))
self.images = self.right_images
self.image = self.images[0]
self.rect = pygame.Rect(8, 16, 6, 16)
self.facing = 1
self.jump_speed = 0
self.frame = 0
self.jumping = True
self.punch_time = 0
self.offsetx = -5
self.z = 0
def punch(self):
if self.punch_time <= 0:
self.punch_time = 5
def on_collision(self, dx, dy):
if dy > 0 or dy < 0:
self.jump_speed = 2
if dy > 0:
self.jumping = False
def update(self, tiles):
self.frame += 1
imgframe = 0
if self.punch_time > 0:
self.punch_time -= 1
moving = False
if button.is_held(LEFT) and self.punch_time <= 0:
self.facing = -1
moving = True
self.move(-2, 0, tiles)
if button.is_held(RIGHT) and self.punch_time <= 0:
self.facing = 1
moving = True
self.move(2, 0, tiles)
if button.is_pressed(A_BUTTON) and self.punch_time <= 0:
if not self.jumping:
play_sound("data/jump.ogg")
self.jump_speed = -5
self.jumping = True
if button.is_pressed(B_BUTTON):
self.punch()
if self.facing < 0:
self.images = self.left_images
else:
self.images = self.right_images
if moving:
imgframe = self.frame/3%2
if self.jumping:
imgframe = 1
if self.punch_time > 0:
imgframe = 3
if self.punch_time == 3:
Punch(self)
play_sound("data/swoosh.ogg")
if self.punch_time > 3:
imgframe = 2
self.image = self.images[imgframe]
if button.is_held(A_BUTTON):
self.jump_speed += 0.4
else:
self.jump_speed += 0.8
if self.jump_speed > 5:
self.jump_speed = 5
if self.punch_time <= 0:
self.move(0, self.jump_speed, tiles)
if self.jump_speed > 3:
self.jumping = True
class Punch(Collidable):
def __init__(self, player):
Collidable.__init__(self)
self.image = pygame.Surface((1, 1))
self.image.set_alpha(0)
self.rect = pygame.Rect(0, 0, 12, 12)
self.life = 2
self.player = player
self.always_update = True
def update(self, tiles):
gameobject.Object.update(self)
self.rect.center = (self.player.rect.centerx + (8*self.player.facing), self.player.rect.centery)
self.life -= 1
if self.life <= 0:
self.rect.centerx += self.player.facing*4
Poof(self.rect.center)
self.kill()
class Platform(Collidable):
def __init__(self, pos, imagepos, slant=0):
Collidable.__init__(self)
self.sheet = load_image("data/platform.png")
self.image = pygame.Surface((16, 16))
self.image.set_colorkey((0, 0, 0), pygame.RLEACCEL)
self.image.blit(self.sheet, (-imagepos[0]*16,
-imagepos[1]*16, 16, 16))
self.rect = self.image.get_rect(topleft = pos)
self.slant = slant #1 for up slope right, -1 for down slope right
self.z = -3
def update(self, tiles):
gameobject.Object.update(self)
class Baddie(Collidable):
def __init__(self, pos):
Collidable.__init__(self)
self.left_images = [
load_image("data/baddie-1.png"),
load_image("data/baddie-2.png"),
]
self.right_images = []
for img in self.left_images:
self.right_images.append(pygame.transform.flip(img, 1, 0))
self.images = self.left_images
self.image = self.images[0]
self.rect = pygame.Rect(pos[0], pos[1], 8, 11)
self.offsetx = -2
self.frame = 0
self.dx = -1
self.z = -1
def update(self, tiles):
self.frame += 1
self.image = self.images[self.frame/4%2]
self.move(self.dx, 3, tiles)
if self.dx > 0:
self.images = self.right_images
else:
self.images = self.left_images
def on_collision(self, dx, dy):
if dx < 0 or dx > 0:
self.dx = -self.dx
class Coin(Collidable):
def __init__(self, pos):
Collidable.__init__(self)
self.images = [
load_image("data/coin-1.png"), load_image("data/coin-2.png"),
load_image("data/coin-3.png"), load_image("data/coin-4.png"),
]
self.image = self.images[0]
self.rect = self.image.get_rect(topleft = pos)
self.frame = 0
self.always_update = True
self.z = -2
def update(self, tiles):
self.frame += 1
self.image = self.images[self.frame/4%4]
class Points(Collidable):
def __init__(self, score, pos, font):
Collidable.__init__(self)
self.image = font.render("%d" % score)
self.rect = self.image.get_rect(center = pos)
self.life = 20
self.z = 10
def update(self, tiles):
self.life -= 1
if self.life <= 0:
self.kill()
self.rect.move_ip(0, -1)
class Poof(Collidable):
def __init__(self, pos):
Collidable.__init__(self)
self.images = [
load_image("data/poof-1.png"), load_image("data/poof-2.png"),
load_image("data/poof-3.png"),
]
self.image = self.images[0]
self.rect = self.image.get_rect(center = pos)
self.frame = 0
def update(self, tiles):
self.frame += 1
self.image = self.images[self.frame/2%3]
if self.frame >= 6:
self.kill()
class BaddieDeath(Collidable):
def __init__(self, baddie, pos):
Collidable.__init__(self)
self.image = pygame.transform.flip(baddie.image, 0, 1)
self.rect = self.image.get_rect(center = pos)
self.frame = 0
self.dy = -3
def update(self, tiles):
self.dy += 0.5
self.rect.y += self.dy
if self.rect.y > 200:
self.kill()
class Death(Collidable):
def __init__(self, pos):
Collidable.__init__(self)
self.image = load_image("data/bubbman-3.png")
self.rect = self.image.get_rect(center = pos)
self.x = self.rect.centerx
self.jump_speed = -10
self.life = 100
def update(self, tiles):
if self.rect.top > 200:
self.kill()
self.jump_speed += 0.5
self.rect.move_ip(0, self.jump_speed)
class Spring(Collidable):
def __init__(self, pos):
Collidable.__init__(self)
self.images = [
load_image("data/spring-1.png"), load_image("data/spring-2.png"),
]
self.image = self.images[0]
self.rect = self.image.get_rect(topleft = pos)
self.bouncing = 0
def bounce(self):
self.bouncing = 2
def update(self, tiles):
if self.bouncing > 0:
self.bouncing -= 1
self.image = self.images[1]
else:
self.image = self.images[0]
| lgpl-2.1 |
BigBrother1984/android_external_chromium_org | chrome/tools/build/win/create_installer_archive.py | 34 | 25393 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to create Chrome Installer archive.
This script is used to create an archive of all the files required for a
Chrome install in appropriate directory structure. It reads chrome.release
file as input, creates chrome.7z archive, compresses setup.exe and
generates packed_files.txt for mini_installer project.
"""
import ConfigParser
import glob
import optparse
import os
import shutil
import subprocess
import sys
ARCHIVE_DIR = "installer_archive"
# suffix to uncompresed full archive file, appended to options.output_name
ARCHIVE_SUFFIX = ".7z"
BSDIFF_EXEC = "bsdiff.exe"
CHROME_DIR = "Chrome-bin"
CHROME_PATCH_FILE_SUFFIX = "_patch" # prefixed by options.output_name
# compressed full archive suffix, will be prefixed by options.output_name
COMPRESSED_ARCHIVE_SUFFIX = ".packed.7z"
COMPRESSED_FILE_EXT = ".packed.7z" # extension of patch archive file
COURGETTE_EXEC = "courgette.exe"
MINI_INSTALLER_INPUT_FILE = "packed_files.txt"
PATCH_FILE_EXT = '.diff'
SETUP_EXEC = "setup.exe"
SETUP_PATCH_FILE_PREFIX = "setup_patch"
TEMP_ARCHIVE_DIR = "temp_installer_archive"
VERSION_FILE = "VERSION"
def BuildVersion(build_dir):
"""Returns the full build version string constructed from information in
VERSION_FILE. Any segment not found in that file will default to '0'.
"""
major = 0
minor = 0
build = 0
patch = 0
for line in open(os.path.join(build_dir, '../../chrome', VERSION_FILE), 'r'):
line = line.rstrip()
if line.startswith('MAJOR='):
major = line[6:]
elif line.startswith('MINOR='):
minor = line[6:]
elif line.startswith('BUILD='):
build = line[6:]
elif line.startswith('PATCH='):
patch = line[6:]
return '%s.%s.%s.%s' % (major, minor, build, patch)
def CompressUsingLZMA(build_dir, compressed_file, input_file):
lzma_exec = GetLZMAExec(build_dir)
cmd = [lzma_exec,
'a', '-t7z',
# Flags equivalent to -mx9 (ultra) but with the bcj2 turned on (exe
# pre-filter). This results in a ~2.3MB decrease in installer size on
# a 24MB installer.
# Additionally, these settings reflect a 7zip 4.42 and up change in
# the definition of -mx9, increasting the dicionary size moving to
# 26bit = 64MB. This results in an additional ~3.5MB decrease.
# Older 7zip versions can support these settings, as these changes
# rely on existing functionality in the lzma format.
'-m0=BCJ2',
'-m1=LZMA:d27:fb128',
'-m2=LZMA:d22:fb128:mf=bt2',
'-m3=LZMA:d22:fb128:mf=bt2',
'-mb0:1',
'-mb0s1:2',
'-mb0s2:3',
compressed_file,
input_file,]
if os.path.exists(compressed_file):
os.remove(compressed_file)
RunSystemCommand(cmd)
def CopyAllFilesToStagingDir(config, distribution, staging_dir, build_dir,
enable_hidpi, enable_touch_ui):
"""Copies the files required for installer archive.
Copies all common files required for various distributions of Chromium and
also files for the specific Chromium build specified by distribution.
"""
CopySectionFilesToStagingDir(config, 'GENERAL', staging_dir, build_dir)
if distribution:
if len(distribution) > 1 and distribution[0] == '_':
distribution = distribution[1:]
CopySectionFilesToStagingDir(config, distribution.upper(),
staging_dir, build_dir)
if enable_hidpi == '1':
CopySectionFilesToStagingDir(config, 'HIDPI', staging_dir, build_dir)
if enable_touch_ui == '1':
CopySectionFilesToStagingDir(config, 'TOUCH', staging_dir, build_dir)
def CopySectionFilesToStagingDir(config, section, staging_dir, src_dir):
"""Copies installer archive files specified in section from src_dir to
staging_dir. This method reads section from config and copies all the
files specified from src_dir to staging dir.
"""
for option in config.options(section):
if option.endswith('dir'):
continue
dst_dir = os.path.join(staging_dir, config.get(section, option))
src_paths = glob.glob(os.path.join(src_dir, option))
if src_paths and not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for src_path in src_paths:
dst_path = os.path.join(dst_dir, os.path.basename(src_path))
if not os.path.exists(dst_path):
shutil.copy(src_path, dst_dir)
def GenerateDiffPatch(options, orig_file, new_file, patch_file):
if (options.diff_algorithm == "COURGETTE"):
exe_file = os.path.join(options.last_chrome_installer, COURGETTE_EXEC)
cmd = '%s -gen "%s" "%s" "%s"' % (exe_file, orig_file, new_file, patch_file)
else:
exe_file = os.path.join(options.build_dir, BSDIFF_EXEC)
cmd = [exe_file, orig_file, new_file, patch_file,]
RunSystemCommand(cmd)
def GetLZMAExec(build_dir):
lzma_exec = os.path.join(build_dir, "..", "..", "third_party",
"lzma_sdk", "Executable", "7za.exe")
return lzma_exec
def GetPrevVersion(build_dir, temp_dir, last_chrome_installer, output_name):
if not last_chrome_installer:
return ''
lzma_exec = GetLZMAExec(build_dir)
prev_archive_file = os.path.join(last_chrome_installer,
output_name + ARCHIVE_SUFFIX)
cmd = [lzma_exec,
'x',
'-o"%s"' % temp_dir,
prev_archive_file,
'Chrome-bin/*/chrome.dll',]
RunSystemCommand(cmd)
dll_path = glob.glob(os.path.join(temp_dir, 'Chrome-bin', '*', 'chrome.dll'))
return os.path.split(os.path.split(dll_path[0])[0])[1]
def MakeStagingDirectories(staging_dir):
"""Creates a staging path for installer archive. If directory exists already,
deletes the existing directory.
"""
file_path = os.path.join(staging_dir, TEMP_ARCHIVE_DIR)
if os.path.exists(file_path):
shutil.rmtree(file_path)
os.makedirs(file_path)
temp_file_path = os.path.join(staging_dir, TEMP_ARCHIVE_DIR)
if os.path.exists(temp_file_path):
shutil.rmtree(temp_file_path)
os.makedirs(temp_file_path)
return (file_path, temp_file_path)
def Readconfig(input_file, current_version):
"""Reads config information from input file after setting default value of
global variabes.
"""
variables = {}
variables['ChromeDir'] = CHROME_DIR
variables['VersionDir'] = os.path.join(variables['ChromeDir'],
current_version)
config = ConfigParser.SafeConfigParser(variables)
config.read(input_file)
return config
def RunSystemCommand(cmd, **kw):
print 'Running', cmd
exit_code = subprocess.call(cmd, **kw)
if (exit_code != 0):
raise Exception("Error while running cmd: %s, exit_code: %s" %
(cmd, exit_code))
def CreateArchiveFile(options, staging_dir, current_version, prev_version):
"""Creates a new installer archive file after deleting any existing old file.
"""
# First create an uncompressed archive file for the current build (chrome.7z)
lzma_exec = GetLZMAExec(options.build_dir)
archive_file = os.path.join(options.output_dir,
options.output_name + ARCHIVE_SUFFIX)
cmd = [lzma_exec,
'a',
'-t7z',
archive_file,
os.path.join(staging_dir, CHROME_DIR),
'-mx0',]
# There doesnt seem to be any way in 7za.exe to override existing file so
# we always delete before creating a new one.
if not os.path.exists(archive_file):
RunSystemCommand(cmd)
elif options.skip_rebuild_archive != "true":
os.remove(archive_file)
RunSystemCommand(cmd)
# Do not compress the archive in developer (component) builds.
if options.component_build == '1':
compressed_file = os.path.join(
options.output_dir, options.output_name + COMPRESSED_ARCHIVE_SUFFIX)
if os.path.exists(compressed_file):
os.remove(compressed_file)
return os.path.basename(archive_file)
# If we are generating a patch, run bsdiff against previous build and
# compress the resulting patch file. If this is not a patch just compress the
# uncompressed archive file.
patch_name_prefix = options.output_name + CHROME_PATCH_FILE_SUFFIX
if options.last_chrome_installer:
prev_archive_file = os.path.join(options.last_chrome_installer,
options.output_name + ARCHIVE_SUFFIX)
patch_file = os.path.join(options.build_dir, patch_name_prefix +
PATCH_FILE_EXT)
GenerateDiffPatch(options, prev_archive_file, archive_file, patch_file)
compressed_archive_file = patch_name_prefix + '_' + \
current_version + '_from_' + prev_version + \
COMPRESSED_FILE_EXT
orig_file = patch_file
else:
compressed_archive_file = options.output_name + COMPRESSED_ARCHIVE_SUFFIX
orig_file = archive_file
compressed_archive_file_path = os.path.join(options.output_dir,
compressed_archive_file)
CompressUsingLZMA(options.build_dir, compressed_archive_file_path, orig_file)
return compressed_archive_file
def PrepareSetupExec(options, current_version, prev_version):
"""Prepares setup.exe for bundling in mini_installer based on options."""
if options.setup_exe_format == "FULL":
setup_file = SETUP_EXEC
elif options.setup_exe_format == "DIFF":
if not options.last_chrome_installer:
raise Exception(
"To use DIFF for setup.exe, --last_chrome_installer is needed.")
prev_setup_file = os.path.join(options.last_chrome_installer, SETUP_EXEC)
new_setup_file = os.path.join(options.build_dir, SETUP_EXEC)
patch_file = os.path.join(options.build_dir, SETUP_PATCH_FILE_PREFIX +
PATCH_FILE_EXT)
GenerateDiffPatch(options, prev_setup_file, new_setup_file, patch_file)
setup_file = SETUP_PATCH_FILE_PREFIX + '_' + current_version + \
'_from_' + prev_version + COMPRESSED_FILE_EXT
setup_file_path = os.path.join(options.build_dir, setup_file)
CompressUsingLZMA(options.build_dir, setup_file_path, patch_file)
else:
cmd = ['makecab.exe',
'/D', 'CompressionType=LZX',
'/V1',
'/L', options.output_dir,
os.path.join(options.build_dir, SETUP_EXEC),]
# Send useless makecab progress on stdout to the bitbucket.
RunSystemCommand(cmd, stdout=open(os.devnull, "w"))
setup_file = SETUP_EXEC[:-1] + "_"
return setup_file
_RESOURCE_FILE_TEMPLATE = """\
// This file is automatically generated by create_installer_archive.py.
// It contains the resource entries that are going to be linked inside
// mini_installer.exe. For each file to be linked there should be two
// lines:
// - The first line contains the output filename (without path) and the
// type of the resource ('BN' - not compressed , 'BL' - LZ compressed,
// 'B7' - LZMA compressed)
// - The second line contains the path to the input file. Uses '/' to
// separate path components.
%(setup_file)s %(setup_file_resource_type)s
"%(setup_file_path)s"
%(archive_file)s B7
"%(archive_file_path)s"
"""
def CreateResourceInputFile(
output_dir, setup_format, archive_file, setup_file, resource_file_path):
"""Creates resource input file (packed_files.txt) for mini_installer project.
This method checks the format of setup.exe being used and according sets
its resource type.
"""
setup_resource_type = "BL"
if (setup_format == "FULL"):
setup_resource_type = "BN"
elif (setup_format == "DIFF"):
setup_resource_type = "B7"
# Expand the resource file template.
args = {
'setup_file': setup_file,
'setup_file_resource_type': setup_resource_type,
'setup_file_path':
os.path.join(output_dir, setup_file).replace("\\","/"),
'archive_file': archive_file,
'archive_file_path':
os.path.join(output_dir, archive_file).replace("\\","/"),
}
resource_file = _RESOURCE_FILE_TEMPLATE % args
with open(resource_file_path, 'w') as f:
f.write(resource_file)
# Reads |manifest_name| from |build_dir| and writes |manifest_name| to
# |output_dir| with the same content plus |inserted_string| added just before
# |insert_before|.
def CopyAndAugmentManifest(build_dir, output_dir, manifest_name,
inserted_string, insert_before):
manifest_file = open(os.path.join(build_dir, manifest_name), 'r')
manifest_lines = manifest_file.readlines()
manifest_file.close()
insert_line = -1
insert_pos = -1
for i in xrange(len(manifest_lines)):
insert_pos = manifest_lines[i].find(insert_before)
if insert_pos != -1:
insert_line = i
break
if insert_line == -1:
raise ValueError('Could not find {0} in the manifest:\n{1}'.format(
insert_before, ''.join(manifest_lines)))
old = manifest_lines[insert_line]
manifest_lines[insert_line] = (old[:insert_pos] + inserted_string +
old[insert_pos:])
modified_manifest_file = open(
os.path.join(output_dir, manifest_name), 'w')
modified_manifest_file.write(''.join(manifest_lines))
modified_manifest_file.close()
# Copy the relevant CRT DLLs to |build_dir|. We copy DLLs from all versions
# of VS installed to make sure we have the correct CRT version, unused DLLs
# should not conflict with the others anyways.
def CopyVisualStudioRuntimeDLLs(build_dir, target_arch):
is_debug = os.path.basename(build_dir).startswith('Debug')
if not is_debug and not os.path.basename(build_dir).startswith('Release'):
print ("Warning: could not determine build configuration from "
"output directory, assuming Release build.")
crt_dlls = []
sys_dll_dir = None
if is_debug:
crt_dlls = glob.glob(
"C:/Program Files (x86)/Microsoft Visual Studio */VC/redist/"
"Debug_NonRedist/" + target_arch + "/Microsoft.*.DebugCRT/*.dll")
else:
crt_dlls = glob.glob(
"C:/Program Files (x86)/Microsoft Visual Studio */VC/redist/" +
target_arch + "/Microsoft.*.CRT/*.dll")
# Also handle the case where someone is building using only winsdk and
# doesn't have Visual Studio installed.
if not crt_dlls:
if target_arch == 'x64':
# check we are are on a 64bit system by existence of WOW64 dir
if os.access("C:/Windows/SysWOW64", os.F_OK):
sys_dll_dir = "C:/Windows/System32"
else:
# only support packaging of 64bit installer on 64bit system
# but this just as bad as not finding DLLs at all so we
# don't abort here to mirror behavior below
print ("Warning: could not find x64 CRT DLLs on x86 system.")
else:
# On a 64-bit system, 32-bit dlls are in SysWOW64 (don't ask).
if os.access("C:/Windows/SysWOW64", os.F_OK):
sys_dll_dir = "C:/Windows/SysWOW64"
else:
sys_dll_dir = "C:/Windows/System32"
if sys_dll_dir is not None:
if is_debug:
crt_dlls = glob.glob(os.path.join(sys_dll_dir, "msvc*0d.dll"))
else:
crt_dlls = glob.glob(os.path.join(sys_dll_dir, "msvc*0.dll"))
if not crt_dlls:
print ("Warning: could not find CRT DLLs to copy to build dir - target "
"may not run on a system that doesn't have those DLLs.")
for dll in crt_dlls:
shutil.copy(dll, build_dir)
# Copies component build DLLs and generates required config files and manifests
# in order for chrome.exe and setup.exe to be able to find those DLLs at
# run-time.
# This is meant for developer builds only and should never be used to package
# an official build.
def DoComponentBuildTasks(staging_dir, build_dir, target_arch, current_version):
# Get the required directories for the upcoming operations.
chrome_dir = os.path.join(staging_dir, CHROME_DIR)
version_dir = os.path.join(chrome_dir, current_version)
installer_dir = os.path.join(version_dir, 'Installer')
# |installer_dir| is technically only created post-install, but we need it
# now to add setup.exe's config and manifest to the archive.
if not os.path.exists(installer_dir):
os.mkdir(installer_dir)
# Copy the VS CRT DLLs to |build_dir|. This must be done before the general
# copy step below to ensure the CRT DLLs are added to the archive and marked
# as a dependency in the exe manifests generated below.
CopyVisualStudioRuntimeDLLs(build_dir, target_arch)
# Copy all the DLLs in |build_dir| to the version directory. Simultaneously
# build a list of their names to mark them as dependencies of chrome.exe and
# setup.exe later.
dlls = glob.glob(os.path.join(build_dir, '*.dll'))
dll_names = []
for dll in dlls:
# remoting_*.dll's don't belong in the archive (it doesn't depend on them
# in gyp). Trying to copy them causes a build race when creating the
# installer archive in component mode. See: crbug.com/180996
if os.path.basename(dll).startswith('remoting_'):
continue
shutil.copy(dll, version_dir)
dll_names.append(os.path.splitext(os.path.basename(dll))[0])
exe_config = (
"<configuration>\n"
" <windows>\n"
" <assemblyBinding xmlns='urn:schemas-microsoft-com:asm.v1'>\n"
" <probing privatePath='{rel_path}'/>\n"
" </assemblyBinding>\n"
" </windows>\n"
"</configuration>")
# Write chrome.exe.config to point to the version directory.
chrome_exe_config_file = open(
os.path.join(chrome_dir, 'chrome.exe.config'), 'w')
chrome_exe_config_file.write(exe_config.format(rel_path=current_version))
chrome_exe_config_file.close()
# Write setup.exe.config to point to the version directory (which is one
# level up from setup.exe post-install).
setup_exe_config_file = open(
os.path.join(installer_dir, 'setup.exe.config'), 'w')
setup_exe_config_file.write(exe_config.format(rel_path='..'))
setup_exe_config_file.close()
# Add a dependency for each DLL in |dlls| to the existing manifests for
# chrome.exe and setup.exe. Some of these DLLs are not actually used by
# either process, but listing them all as dependencies doesn't hurt as it
# only makes them visible to the exes, just like they already are in the
# build output directory.
exe_manifest_dependencies_list = []
for name in dll_names:
exe_manifest_dependencies_list.append(
"<dependency>"
"<dependentAssembly>"
"<assemblyIdentity type='win32' name='chrome.{dll_name}' "
"version='0.0.0.0' language='*'/>"
"</dependentAssembly>"
"</dependency>".format(dll_name=name))
exe_manifest_dependencies = ''.join(exe_manifest_dependencies_list)
# Write a modified chrome.exe.manifest beside chrome.exe.
CopyAndAugmentManifest(build_dir, chrome_dir, 'chrome.exe.manifest',
exe_manifest_dependencies, '</assembly>')
# Write a modified setup.exe.manifest beside setup.exe in
# |version_dir|/Installer.
CopyAndAugmentManifest(build_dir, installer_dir, 'setup.exe.manifest',
exe_manifest_dependencies, '</assembly>')
# Generate assembly manifests for each DLL in |dlls|. These do not interfere
# with the private manifests potentially embedded in each DLL. They simply
# allow chrome.exe and setup.exe to see those DLLs although they are in a
# separate directory post-install.
for name in dll_names:
dll_manifest = (
"<assembly\n"
" xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>\n"
" <assemblyIdentity name='chrome.{dll_name}' version='0.0.0.0'\n"
" type='win32'/>\n"
" <file name='{dll_name}.dll'/>\n"
"</assembly>".format(dll_name=name))
dll_manifest_file = open(os.path.join(
version_dir, "chrome.{dll_name}.manifest".format(dll_name=name)), 'w')
dll_manifest_file.write(dll_manifest)
dll_manifest_file.close()
def main(options):
"""Main method that reads input file, creates archive file and write
resource input file.
"""
current_version = BuildVersion(options.build_dir)
config = Readconfig(options.input_file, current_version)
(staging_dir, temp_dir) = MakeStagingDirectories(options.staging_dir)
prev_version = GetPrevVersion(options.build_dir, temp_dir,
options.last_chrome_installer,
options.output_name)
# Preferentially copy the files we can find from the output_dir, as
# this is where we'll find the Syzygy-optimized executables when
# building the optimized mini_installer.
if options.build_dir != options.output_dir:
CopyAllFilesToStagingDir(config, options.distribution,
staging_dir, options.output_dir,
options.enable_hidpi, options.enable_touch_ui)
# Now copy the remainder of the files from the build dir.
CopyAllFilesToStagingDir(config, options.distribution,
staging_dir, options.build_dir,
options.enable_hidpi, options.enable_touch_ui)
if options.component_build == '1':
DoComponentBuildTasks(staging_dir, options.build_dir,
options.target_arch, current_version)
version_numbers = current_version.split('.')
current_build_number = version_numbers[2] + '.' + version_numbers[3]
prev_build_number = ''
if prev_version:
version_numbers = prev_version.split('.')
prev_build_number = version_numbers[2] + '.' + version_numbers[3]
# Name of the archive file built (for example - chrome.7z or
# patch-<old_version>-<new_version>.7z or patch-<new_version>.7z
archive_file = CreateArchiveFile(options, staging_dir,
current_build_number, prev_build_number)
setup_file = PrepareSetupExec(options,
current_build_number, prev_build_number)
CreateResourceInputFile(options.output_dir, options.setup_exe_format,
archive_file, setup_file, options.resource_file_path)
def _ParseOptions():
parser = optparse.OptionParser()
parser.add_option('-i', '--input_file',
help='Input file describing which files to archive.')
parser.add_option('-b', '--build_dir',
help='Build directory. The paths in input_file are relative to this.')
parser.add_option('--staging_dir',
help='Staging directory where intermediate files and directories '
'will be created')
parser.add_option('-o', '--output_dir',
help='The output directory where the archives will be written. '
'Defaults to the build_dir.')
parser.add_option('--resource_file_path',
help='The path where the resource file will be output. '
'Defaults to %s in the build directory.' %
MINI_INSTALLER_INPUT_FILE)
parser.add_option('-d', '--distribution',
help='Name of Chromium Distribution. Optional.')
parser.add_option('-s', '--skip_rebuild_archive',
default="False", help='Skip re-building Chrome.7z archive if it exists.')
parser.add_option('-l', '--last_chrome_installer',
help='Generate differential installer. The value of this parameter '
'specifies the directory that contains base versions of '
'setup.exe, courgette.exe (if --diff_algorithm is COURGETTE) '
'& chrome.7z.')
parser.add_option('-f', '--setup_exe_format', default='COMPRESSED',
help='How setup.exe should be included {COMPRESSED|DIFF|FULL}.')
parser.add_option('-a', '--diff_algorithm', default='BSDIFF',
help='Diff algorithm to use when generating differential patches '
'{BSDIFF|COURGETTE}.')
parser.add_option('-n', '--output_name', default='chrome',
help='Name used to prefix names of generated archives.')
parser.add_option('--enable_hidpi', default='0',
help='Whether to include HiDPI resource files.')
parser.add_option('--enable_touch_ui', default='0',
help='Whether to include resource files from the "TOUCH" section of the '
'input file.')
parser.add_option('--component_build', default='0',
help='Whether this archive is packaging a component build. This will '
'also turn off compression of chrome.7z into chrome.packed.7z and '
'helpfully delete any old chrome.packed.7z in |output_dir|.')
parser.add_option('--target_arch', default='x86',
help='Specify the target architecture for installer - this is used '
'to determine which CRT runtime files to pull and package '
'with the installer archive {x86|x64}.')
options, _ = parser.parse_args()
if not options.build_dir:
parser.error('You must provide a build dir.')
options.build_dir = os.path.normpath(options.build_dir)
if not options.staging_dir:
parser.error('You must provide a staging dir.')
if not options.input_file:
parser.error('You must provide an input file')
if not options.output_dir:
options.output_dir = options.build_dir
if not options.resource_file_path:
options.resource_file_path = os.path.join(options.build_dir,
MINI_INSTALLER_INPUT_FILE)
return options
if '__main__' == __name__:
print sys.argv
sys.exit(main(_ParseOptions()))
| bsd-3-clause |
michaelgallacher/intellij-community | python/lib/Lib/string.py | 92 | 16675 | """A collection of string operations (most are no longer used).
Warning: most of the code you see here isn't normally used nowadays.
Beginning with Python 1.6, many of these functions are implemented as
methods on the standard string object. They used to be implemented by
a built-in module called strop, but strop is now obsolete itself.
Public module variables:
whitespace -- a string containing all characters considered whitespace
lowercase -- a string containing all characters considered lowercase letters
uppercase -- a string containing all characters considered uppercase letters
letters -- a string containing all characters considered letters
digits -- a string containing all characters considered decimal digits
hexdigits -- a string containing all characters considered hexadecimal digits
octdigits -- a string containing all characters considered octal digits
punctuation -- a string containing all characters considered punctuation
printable -- a string containing all characters considered printable
"""
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
lowercase = 'abcdefghijklmnopqrstuvwxyz'
uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
letters = lowercase + uppercase
ascii_lowercase = lowercase
ascii_uppercase = uppercase
ascii_letters = ascii_lowercase + ascii_uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + letters + punctuation + whitespace
# Case conversion helpers
# Use str to convert Unicode literal in case of -U
l = map(chr, xrange(256))
_idmap = str('').join(l)
del l
# Functions which aren't available as string methods.
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
def capwords(s, sep=None):
"""capwords(s, [sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. Note that this replaces runs of whitespace characters by
a single space.
"""
return (sep or ' ').join([x.capitalize() for x in s.split(sep)])
# Construct a translation string
_idmapL = None
def maketrans(fromstr, tostr):
"""maketrans(frm, to) -> string
Return a translation table (a string of 256 bytes long)
suitable for use in string.translate. The strings frm and to
must be of the same length.
"""
if len(fromstr) != len(tostr):
raise ValueError, "maketrans arguments must have same length"
global _idmapL
if not _idmapL:
_idmapL = map(None, _idmap)
L = _idmapL[:]
fromstr = map(ord, fromstr)
for i in range(len(fromstr)):
L[fromstr[i]] = tostr[i]
return ''.join(L)
####################################################################
import re as _re
class _multimap:
"""Helper class for combining multiple mappings.
Used by .{safe_,}substitute() to combine the mapping and keyword
arguments.
"""
def __init__(self, primary, secondary):
self._primary = primary
self._secondary = secondary
def __getitem__(self, key):
try:
return self._primary[key]
except KeyError:
return self._secondary[key]
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, _re.IGNORECASE | _re.VERBOSE)
class Template:
"""A string class for supporting $-substitutions."""
__metaclass__ = _TemplateMetaclass
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return self.delimiter + named
braced = mo.group('braced')
if braced is not None:
try:
return '%s' % (mapping[braced],)
except KeyError:
return self.delimiter + '{' + braced + '}'
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return self.delimiter
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
####################################################################
# NOTE: Everything below here is deprecated. Use string methods instead.
# This stuff will go away in Python 3.0.
# Backward compatible names for exceptions
index_error = ValueError
atoi_error = ValueError
atof_error = ValueError
atol_error = ValueError
# convert UPPER CASE letters to lower case
def lower(s):
"""lower(s) -> string
Return a copy of the string s converted to lowercase.
"""
return s.lower()
# Convert lower case letters to UPPER CASE
def upper(s):
"""upper(s) -> string
Return a copy of the string s converted to uppercase.
"""
return s.upper()
# Swap lower case letters and UPPER CASE
def swapcase(s):
"""swapcase(s) -> string
Return a copy of the string s with upper case characters
converted to lowercase and vice versa.
"""
return s.swapcase()
# Strip leading and trailing tabs and spaces
def strip(s, chars=None):
"""strip(s [,chars]) -> string
Return a copy of the string s with leading and trailing
whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping.
"""
return s.strip(chars)
# Strip leading tabs and spaces
def lstrip(s, chars=None):
"""lstrip(s [,chars]) -> string
Return a copy of the string s with leading whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return s.lstrip(chars)
# Strip trailing tabs and spaces
def rstrip(s, chars=None):
"""rstrip(s [,chars]) -> string
Return a copy of the string s with trailing whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return s.rstrip(chars)
# Split a string into a list of space/tab-separated words
def split(s, sep=None, maxsplit=-1):
"""split(s [,sep [,maxsplit]]) -> list of strings
Return a list of the words in the string s, using sep as the
delimiter string. If maxsplit is given, splits at no more than
maxsplit places (resulting in at most maxsplit+1 words). If sep
is not specified or is None, any whitespace string is a separator.
(split and splitfields are synonymous)
"""
return s.split(sep, maxsplit)
splitfields = split
# Split a string into a list of space/tab-separated words
def rsplit(s, sep=None, maxsplit=-1):
"""rsplit(s [,sep [,maxsplit]]) -> list of strings
Return a list of the words in the string s, using sep as the
delimiter string, starting at the end of the string and working
to the front. If maxsplit is given, at most maxsplit splits are
done. If sep is not specified or is None, any whitespace string
is a separator.
"""
return s.rsplit(sep, maxsplit)
# Join fields with optional separator
def join(words, sep = ' '):
"""join(list [,sep]) -> string
Return a string composed of the words in list, with
intervening occurrences of sep. The default separator is a
single space.
(joinfields and join are synonymous)
"""
return sep.join(words)
joinfields = join
# Find substring, raise exception if not found
def index(s, *args):
"""index(s, sub [,start [,end]]) -> int
Like find but raises ValueError when the substring is not found.
"""
return s.index(*args)
# Find last substring, raise exception if not found
def rindex(s, *args):
"""rindex(s, sub [,start [,end]]) -> int
Like rfind but raises ValueError when the substring is not found.
"""
return s.rindex(*args)
# Count non-overlapping occurrences of substring
def count(s, *args):
"""count(s, sub[, start[,end]]) -> int
Return the number of occurrences of substring sub in string
s[start:end]. Optional arguments start and end are
interpreted as in slice notation.
"""
return s.count(*args)
# Find substring, return -1 if not found
def find(s, *args):
"""find(s, sub [,start [,end]]) -> in
Return the lowest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return s.find(*args)
# Find last substring, return -1 if not found
def rfind(s, *args):
"""rfind(s, sub [,start [,end]]) -> int
Return the highest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return s.rfind(*args)
# for a bit of speed
_float = float
_int = int
_long = long
# Convert string to float
def atof(s):
"""atof(s) -> float
Return the floating point number represented by the string s.
"""
return _float(s)
# Convert string to integer
def atoi(s , base=10):
"""atoi(s [,base]) -> int
Return the integer represented by the string s in the given
base, which defaults to 10. The string s must consist of one
or more digits, possibly preceded by a sign. If base is 0, it
is chosen from the leading characters of s, 0 for octal, 0x or
0X for hexadecimal. If base is 16, a preceding 0x or 0X is
accepted.
"""
return _int(s, base)
# Convert string to long integer
def atol(s, base=10):
"""atol(s [,base]) -> long
Return the long integer represented by the string s in the
given base, which defaults to 10. The string s must consist
of one or more digits, possibly preceded by a sign. If base
is 0, it is chosen from the leading characters of s, 0 for
octal, 0x or 0X for hexadecimal. If base is 16, a preceding
0x or 0X is accepted. A trailing L or l is not accepted,
unless base is 0.
"""
return _long(s, base)
# Left-justify a string
def ljust(s, width, *args):
"""ljust(s, width[, fillchar]) -> string
Return a left-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated. If specified the fillchar is used instead of spaces.
"""
return s.ljust(width, *args)
# Right-justify a string
def rjust(s, width, *args):
"""rjust(s, width[, fillchar]) -> string
Return a right-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated. If specified the fillchar is used instead of spaces.
"""
return s.rjust(width, *args)
# Center a string
def center(s, width, *args):
"""center(s, width[, fillchar]) -> string
Return a center version of s, in a field of the specified
width. padded with spaces as needed. The string is never
truncated. If specified the fillchar is used instead of spaces.
"""
return s.center(width, *args)
# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03'
# Decadent feature: the argument may be a string or a number
# (Use of this is deprecated; it should be a string as with ljust c.s.)
def zfill(x, width):
"""zfill(x, width) -> string
Pad a numeric string x with zeros on the left, to fill a field
of the specified width. The string x is never truncated.
"""
if not isinstance(x, basestring):
x = repr(x)
return x.zfill(width)
# Expand tabs in a string.
# Doesn't take non-printing chars into account, but does understand \n.
def expandtabs(s, tabsize=8):
"""expandtabs(s [,tabsize]) -> string
Return a copy of the string s with all tab characters replaced
by the appropriate number of spaces, depending on the current
column, and the tabsize (default 8).
"""
return s.expandtabs(tabsize)
# Character translation through look-up table.
def translate(s, table, deletions=""):
"""translate(s,table [,deletions]) -> string
Return a copy of the string s, where all characters occurring
in the optional argument deletions are removed, and the
remaining characters have been mapped through the given
translation table, which must be a string of length 256. The
deletions argument is not allowed for Unicode strings.
"""
if deletions:
return s.translate(table, deletions)
else:
# Add s[:0] so that if s is Unicode and table is an 8-bit string,
# table is converted to Unicode. This means that table *cannot*
# be a dictionary -- for that feature, use u.translate() directly.
return s.translate(table + s[:0])
# Capitalize a string, e.g. "aBc dEf" -> "Abc def".
def capitalize(s):
"""capitalize(s) -> string
Return a copy of the string s with only its first character
capitalized.
"""
return s.capitalize()
# Substring replacement (global)
def replace(s, old, new, maxsplit=-1):
"""replace (str, old, new[, maxsplit]) -> string
Return a copy of string str with all occurrences of substring
old replaced by new. If the optional argument maxsplit is
given, only the first maxsplit occurrences are replaced.
"""
return s.replace(old, new, maxsplit)
# Try importing optional built-in module "strop" -- if it exists,
# it redefines some string operations that are 100-1000 times faster.
# It also defines values for whitespace, lowercase and uppercase
# that match <ctype.h>'s definitions.
try:
from strop import maketrans, lowercase, uppercase, whitespace
letters = lowercase + uppercase
except ImportError:
pass # Use the original versions
| apache-2.0 |
blademainer/intellij-community | python/helpers/docutils/parsers/rst/directives/images.py | 51 | 6348 | # $Id: images.py 5952 2009-05-19 08:45:27Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Directives for figures and simple images.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import nodes, utils
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives, states
from docutils.nodes import fully_normalize_name, whitespace_normalize_name
from docutils.parsers.rst.roles import set_classes
try:
import Image as PIL # PIL
except ImportError:
PIL = None
class Image(Directive):
align_h_values = ('left', 'center', 'right')
align_v_values = ('top', 'middle', 'bottom')
align_values = align_v_values + align_h_values
def align(argument):
# This is not callable as self.align. We cannot make it a
# staticmethod because we're saving an unbound method in
# option_spec below.
return directives.choice(argument, Image.align_values)
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.percentage,
'align': align,
'target': directives.unchanged_required,
'class': directives.class_option}
def run(self):
if 'align' in self.options:
if isinstance(self.state, states.SubstitutionDef):
# Check for align_v_values.
if self.options['align'] not in self.align_v_values:
raise self.error(
'Error in "%s" directive: "%s" is not a valid value '
'for the "align" option within a substitution '
'definition. Valid values for "align" are: "%s".'
% (self.name, self.options['align'],
'", "'.join(self.align_v_values)))
elif self.options['align'] not in self.align_h_values:
raise self.error(
'Error in "%s" directive: "%s" is not a valid value for '
'the "align" option. Valid values for "align" are: "%s".'
% (self.name, self.options['align'],
'", "'.join(self.align_h_values)))
messages = []
reference = directives.uri(self.arguments[0])
self.options['uri'] = reference
reference_node = None
if 'target' in self.options:
block = states.escape2null(
self.options['target']).splitlines()
block = [line for line in block]
target_type, data = self.state.parse_target(
block, self.block_text, self.lineno)
if target_type == 'refuri':
reference_node = nodes.reference(refuri=data)
elif target_type == 'refname':
reference_node = nodes.reference(
refname=fully_normalize_name(data),
name=whitespace_normalize_name(data))
reference_node.indirect_reference_name = data
self.state.document.note_refname(reference_node)
else: # malformed target
messages.append(data) # data is a system message
del self.options['target']
set_classes(self.options)
image_node = nodes.image(self.block_text, **self.options)
if reference_node:
reference_node += image_node
return messages + [reference_node]
else:
return messages + [image_node]
class Figure(Image):
def align(argument):
return directives.choice(argument, Figure.align_h_values)
def figwidth_value(argument):
if argument.lower() == 'image':
return 'image'
else:
return directives.length_or_percentage_or_unitless(argument, 'px')
option_spec = Image.option_spec.copy()
option_spec['figwidth'] = figwidth_value
option_spec['figclass'] = directives.class_option
option_spec['align'] = align
has_content = True
def run(self):
figwidth = self.options.pop('figwidth', None)
figclasses = self.options.pop('figclass', None)
align = self.options.pop('align', None)
(image_node,) = Image.run(self)
if isinstance(image_node, nodes.system_message):
return [image_node]
figure_node = nodes.figure('', image_node)
if figwidth == 'image':
if PIL and self.state.document.settings.file_insertion_enabled:
# PIL doesn't like Unicode paths:
try:
i = PIL.open(str(image_node['uri']))
except (IOError, UnicodeError):
pass
else:
self.state.document.settings.record_dependencies.add(
image_node['uri'])
figure_node['width'] = i.size[0]
elif figwidth is not None:
figure_node['width'] = figwidth
if figclasses:
figure_node['classes'] += figclasses
if align:
figure_node['align'] = align
if self.content:
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
first_node = node[0]
if isinstance(first_node, nodes.paragraph):
caption = nodes.caption(first_node.rawsource, '',
*first_node.children)
figure_node += caption
elif not (isinstance(first_node, nodes.comment)
and len(first_node) == 0):
error = self.state_machine.reporter.error(
'Figure caption must be a paragraph or empty comment.',
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [figure_node, error]
if len(node) > 1:
figure_node += nodes.legend('', *node[1:])
return [figure_node]
| apache-2.0 |
ericmoritz/requests_debug | test.py | 1 | 6351 | import threading
import Queue
from wsgiref.simple_server import make_server
from functools import partial
from pprint import pprint
from requests_debug import debug as requests_debug
import requests
import time
from testfixtures import compare
from contextlib import contextmanager
import logging
logging.basicConfig(level=logging.DEBUG)
def client_thread_target(results_q, thread_id, url):
for n in xrange(2):
requests.get(
url,
params={"thread_id": thread_id, "n": n}
)
results_q.put(
(thread_id, requests_debug.checkpoint_id(), requests_debug.items())
)
def client_thread(results_q, thread_id, url):
return threading.Thread(
target=partial(
client_thread_target,
results_q,
thread_id,
url,
)
)
def server_timeout_thread(timeout, http_server):
time.sleep(timeout)
stop_server(http_server)
@contextmanager
def start_server():
def app(environ, start_response):
if "error" in environ.get('PATH_INFO', ''):
start_response("302 Moved Temporarily", [
("Location", environ['PATH_INFO'])])
return []
elif "404" in environ.get('PATH_INFO', ''):
start_response("404 Not Found", [])
return []
else:
start_response("200 OK", [])
return ["ok."]
http_server = make_server('127.0.0.1', 0, app)
timeout_thread = threading.Thread(
target=partial(
server_timeout_thread,
3,
http_server))
timeout_thread.start()
server_thread = threading.Thread(target=http_server.serve_forever)
server_thread.start()
yield http_server
stop_server(http_server)
def stop_server(http_server):
http_server.shutdown()
def server_port(http_server):
return http_server.server_address[1]
def test_exception():
requests_debug.install_hook()
with start_server() as http_server:
url = make_url(
server_port(http_server),
"error/")
try:
requests.get(url)
except requests.TooManyRedirects, e:
stop_server(http_server)
compare(
normalize_items(requests_debug.items()),
[{'checkpoint_id': requests_debug.checkpoint_id(),
'method': 'get',
'status': None,
'url': url}])
def test_uninstall_hook():
def assert_items(items_cb):
with start_server() as http_server:
url = make_url(server_port(http_server),
"test.py")
requests.get(url)
compare(
normalize_items(requests_debug.items()),
items_cb(url)
)
# install the hook
requests_debug.install_hook()
# assert that the hook is working
assert_items(lambda url: [
{'method': 'get',
'checkpoint_id': requests_debug.checkpoint_id(),
'status': 200,
'url': url}
])
# uninstall the hook
requests_debug.uninstall_hook()
# assert that nothing is recorded when we uninstall the hook
assert_items(lambda url: [])
def make_url(port, path):
return "http://localhost:{0}/".format(port) + path
# make the results look like the values we care about
def normalize_items(items):
return [
{'method': item['method'],
'checkpoint_id': item['checkpoint_id'],
'status': item['status'],
'url': item['url']}
for item in items
]
def test_threading():
"""
Assert that the thread locals actually work correctly by making requests
"""
with start_server() as http_server:
requests_debug.install_hook()
make_url_ = partial(make_url, server_port(http_server))
results_q = Queue.Queue()
client_threads = [
client_thread(results_q, 0, make_url_("test.py")),
client_thread(results_q, 1, make_url_("test.py")),
client_thread(results_q, 2, make_url_("404")),
]
# use an ordered dict to keep things sorted
# as we collect the results
results = []
for client in client_threads:
client.start()
for client in client_threads:
# we may not get the result for the client
# we're on but we need to collect that many
# values, so this is a quick way to do that.
# this may timeout and return None if a request
# takes longer than 2 seconds (it shouldn't)
results.append(results_q.get(True, 2))
results.sort(key=lambda x: x[0])
def normalize(results):
return [
(thread_id, checkpoint_id, normalize_items(items))
for thread_id, checkpoint_id, items in results
]
compare(normalize(results), [
(0, results[0][1], [
{'method': 'get',
'checkpoint_id': results[0][1],
'status': 200,
'url': make_url_("test.py?thread_id=0&n=0")},
{'method': 'get',
'checkpoint_id': results[0][1],
'status': 200,
'url': make_url_("test.py?thread_id=0&n=1")},
]),
(1, results[1][1], [
{'method': 'get',
'checkpoint_id': results[1][1],
'status': 200,
'url': make_url_("test.py?thread_id=1&n=0")},
{'method': 'get',
'checkpoint_id': results[1][1],
'status': 200,
'url': make_url_("test.py?thread_id=1&n=1")},
]),
(2, results[2][1], [
{'method': 'get',
'checkpoint_id': results[2][1],
'status': 404,
'url': make_url_("404?thread_id=2&n=0")},
{'method': 'get',
'checkpoint_id': results[2][1],
'status': 404,
'url': make_url_("404?thread_id=2&n=1")},
])])
if __name__ == '__main__':
test_threading()
| mit |
bd-j/magellanic | magellanic/sfhs/prediction_scripts/predicted_total.py | 1 | 5894 | import sys, pickle, copy
import numpy as np
import matplotlib.pyplot as pl
import astropy.io.fits as pyfits
import magellanic.regionsed as rsed
import magellanic.mcutils as utils
from magellanic.lfutils import *
try:
import fsps
from sedpy import observate
except ImportError:
#you wont be able to predict the integrated spectrum or magnitudes
# filterlist must be set to None in calls to total_cloud_data
sps = None
wlengths = {'2': '{4.5\mu m}',
'4': '{8\mu m}'}
dmod = {'smc':18.9,
'lmc':18.5}
cloud_info = {}
cloud_info['smc'] = [utils.smc_regions(), 20, 23, [7, 13, 16], [3,5,6]]
cloud_info['lmc'] = [utils.lmc_regions(), 48, 38, [7, 11, 13, 16], [3,4,5,6]]
def total_cloud_data(cloud, filternames = None, basti=False,
lfstring=None, agb_dust=1.0,
one_metal=None):
#########
# SPS
#########
#
if filternames is not None:
sps = fsps.StellarPopulation(add_agb_dust_model=True)
sps.params['sfh'] = 0
sps.params['agb_dust'] = agb_dust
dust = ['nodust', 'agbdust']
sps.params['imf_type'] = 0.0 #salpeter
filterlist = observate.load_filters(filternames)
else:
filterlist = None
##########
# SFHs
##########
regions, nx, ny, zlist, zlist_basti = cloud_info[cloud.lower()]
if basti:
zlist = basti_zlist
if 'header' in regions.keys():
rheader = regions.pop('header') #dump the header info from the reg. dict
total_sfhs = None
for n, dat in regions.iteritems():
total_sfhs = sum_sfhs(total_sfhs, dat['sfhs'])
total_zmet = dat['zmet']
#collapse SFHs to one metallicity
if one_metal is not None:
ts = None
for sfh in total_sfhs:
ts = sum_sfhs(ts, sfh)
total_sfh = ts
zlist = [zlist[one_metal]]
total_zmet = [total_zmet[one_metal]]
#############
# LFs
############
bins = rsed.lfbins
if lfstring is not None:
# these are stored as a list of different metallicities
lffiles = [lfstring.format(z) for z in zlist]
lf_base = [read_villaume_lfs(f) for f in lffiles]
#get LFs broken out by age and metallicity as well as the total
lfs_zt, lf, logages = rsed.one_region_lfs(copy.deepcopy(total_sfhs), lf_base)
else:
lfs_zt, lf, logages = None, None, None
###########
# SED
############
if filterlist is not None:
spec, wave, mass = rsed.one_region_sed(copy.deepcopy(total_sfhs), total_zmet, sps)
mags = observate.getSED(wave, spec*rsed.to_cgs, filterlist=filterlist)
maggies = 10**(-0.4 * np.atleast_1d(mags))
else:
maggies, mass = None, None
#############
# Write output
############
total_values = {}
total_values['agb_clf'] = lf
total_values['agb_clfs_zt'] = lfs_zt
total_values['clf_mags'] = bins
total_values['logages'] = logages
total_values['sed_ab_maggies'] = maggies
total_values['sed_filters'] = filternames
total_values['lffile'] = lfstring
total_values['mstar'] = mass
total_values['zlist'] = zlist
return total_values, total_sfhs
def sum_sfhs(sfhs1, sfhs2):
"""
Accumulate individual sets of SFHs into a total set of SFHs. This
assumes that the individual SFH sets all have the same number and
order of metallicities, and the same time binning.
"""
if sfhs1 is None:
return copy.deepcopy(sfhs2)
elif sfhs2 is None:
return copy.deepcopy(sfhs1)
else:
out = copy.deepcopy(sfhs1)
for s1, s2 in zip(out, sfhs2):
s1['sfr'] += s2['sfr']
return out
if __name__ == '__main__':
filters = ['galex_NUV', 'spitzer_irac_ch2',
'spitzer_irac_ch4', 'spitzer_mips_24']
#filters = None
ldir, cdir = 'lf_data/', 'composite_lfs/'
outst = '{0}_n2teffcut.p'
# total_cloud_data will loop over the appropriate (for the
# isochrone) metallicities for a given lfst filename template
lfst = '{0}z{{0:02.0f}}_tau{1:2.1f}_vega_irac{2}_n2_teffcut_lf.txt'
basti = False
agb_dust=1.0
agebins = np.arange(9)*0.3 + 7.4
#loop over clouds (and bands and agb_dust) to produce clfs
for cloud in ['smc']:
rdir = '{0}cclf_{1}_'.format(cdir, cloud)
for band in ['2','4']:
lfstring = lfst.format(ldir, agb_dust, band)
dat, sfhs = total_cloud_data(cloud, filternames=filters, agb_dust=agb_dust,
lfstring=lfstring, basti=basti)
agebins = sfhs[0]['t1'][3:-1]
outfile = lfstring.replace(ldir, rdir).replace('z{0:02.0f}_','').replace('.txt','.dat')
write_clf_many([dat['clf_mags'], dat['agb_clf']], outfile, lfstring)
#fig, ax = plot_weighted_lfs(dat, agebins = agebins, dm=dmod[cloud])
#fig.suptitle('{0} @ IRAC{1}'.format(cloud.upper(), band))
#fig.savefig('byage_clfs/{0}_clfs_by_age_and_Z_irac{1}'.format(cloud, band))
#pl.close(fig)
colheads = (len(agebins)-1) * ' N<m(t={})'
colheads = colheads.format(*(agebins[:-1]+agebins[1:])/2.)
tbin_lfs = np.array([rebin_lfs(lf, ages, agebins) for lf, ages
in zip(dat['agb_clfs_zt'], dat['logages'])])
write_clf_many([dat['clf_mags'], tbin_lfs.sum(axis=0)],
outfile.replace(cdir,'byage_clfs/'), lfstring,
colheads=colheads)
pl.figure()
for s, z in zip(sfhs, dat['zlist']):
pl.step(s['t1'], s['sfr'], where='post', label='zind={0}'.format(z), linewidth=3)
pl.legend(loc=0)
pl.title(cloud.upper())
print(cloud, dat['mstar'])
| gpl-2.0 |
bccp/abopt | abopt/legacy/tests/test_vmad.py | 1 | 2670 | from __future__ import print_function
from abopt.legacy.vmad import VM, microcode, programme, Zero, Tape
from numpy.testing import assert_raises, assert_array_equal, assert_allclose
import numpy
class TestVM(VM):
@microcode(ain=['x'], aout=['y'])
def unitary(self, x, y, factor):
y[...] = x * factor
@unitary.defvjp
def _(self, _x, _y, factor):
_x[...] = _y * factor
@microcode(ain=['x1', 'x2'], aout=['y'])
def binary(self, x1, x2, y):
y[...] = x1 + x2
@binary.defvjp
def _(self, _x1, _x2, _y):
_x1[...] = _y
_x2[...] = _y
def test_single_compute():
vm = TestVM()
code = vm.code()
code.unitary(x='a', y='b', factor=3.0)
b = code.compute('b', {'a' : 1.0})
assert_array_equal(b, 3.0)
def test_single_gradient():
vm = TestVM()
code = vm.code()
code.unitary(x='a', y='b', factor=3.0)
b, _a = code.compute_with_gradient(['b', '_a'], {'a' : 1.0}, {'_b': 1.0})
assert_array_equal(b, 3.0)
assert_array_equal(_a, 3.0)
def test_nested_gradient():
vm = TestVM()
code = vm.code()
code.unitary(x='a', y='b', factor=3.0)
code.unitary(x='b', y='c', factor=3.0)
c = code.compute('c', {'a' : 1.0})
assert_array_equal(c, 9.0)
_a = code.compute_with_gradient('_a', {'a' : 1.0}, {'_c': 1.0})
c, _a = code.compute_with_gradient(['c', '_a'], {'a' : 1.0}, {'_c': 1.0})
assert_array_equal(c, 9.0)
assert_array_equal(_a, 9.0)
def test_partial_gradient():
vm = TestVM()
code = vm.code()
code.unitary(x='a', y='b1', factor=3.0)
code.unitary(x='a', y='b2', factor=3.0)
code.unitary(x='a', y='b3', factor=3.0)
code.unitary(x='a', y='b4', factor=3.0)
code.binary(x1='b1', x2='b2', y='c1')
code.binary(x1='b3', x2='b4', y='c2')
code.binary(x1='c1', x2='c2', y='d')
d, tape = code.compute('d', {'a' : 1.0}, return_tape=True)
assert_array_equal(d, 12.0)
gradient = vm.gradient(tape)
d, _a = code.compute_with_gradient(['d', '_a'], {'a' : 1.0}, {'_d': 1.0})
assert_array_equal(d, 12.0)
assert_array_equal(_a, 12.0)
def test_inplace_gradient():
vm = TestVM()
code = vm.code()
code.unitary(x='a', y='a', factor=3.0)
code.unitary(x='a', y='b1', factor=3.0)
code.unitary(x='a', y='b2', factor=3.0)
code.binary(x1='b1', x2='b2', y='b1')
code.unitary(x='b1', y='d', factor=3.0)
d, tape = code.compute('d', {'a' : 1.0}, return_tape=True)
assert_array_equal(d, 54.0)
gradient = vm.gradient(tape)
d, _a = code.compute_with_gradient(['d', '_a'], {'a' : 1.0}, {'_d': 1.0})
assert_array_equal(d, 54.0)
assert_array_equal(_a, 54.0)
| gpl-3.0 |
minghuascode/pyj | examples/misc/flaskexamples/flaskcelery/public/services/jsonrpc/cgihandler.py | 17 | 2170 | """
Copyright (c) 2006 Jan-Klaas Kollhof
This file is part of jsonrpc.
jsonrpc is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from jsonrpc import SimpleServiceHandler
import sys,os
class CGIHandler(SimpleServiceHandler):
def __init__(self, service, messageDelimiter="\n"):
self.sendData =[]
SimpleServiceHandler.__init__(self, service, messageDelimiter=messageDelimiter)
def send(self, data):
self.sendData.append(data)
def handle(self):
try:
contLen=int(os.environ['CONTENT_LENGTH'])
data = sys.stdin.read(contLen)
except:
data = ""
#execute the request
self.handlePartialData(data)
self.sendReply()
self.close()
def sendReply(self):
data = "\n".join(self.sendData)
response = "Content-Type: text/plain\n"
response += "Content-Length: %d\n\n" % len(data)
response += data
#on windows all \n are converted to \r\n if stdout is a terminal and is not set to binary mode :(
#this will then cause an incorrect Content-length.
#I have only experienced this problem with apache on Win so far.
if sys.platform == "win32":
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
#put out the response
sys.stdout.write(response)
def handleCGIRequest(service):
CGIHandler(service,messageDelimiter="\n").handle()
| apache-2.0 |
forge33/CouchPotatoServer | couchpotato/core/plugins/base.py | 3 | 15178 | import threading
from urllib import quote
from urlparse import urlparse
import glob
import inspect
import os.path
import re
import time
import traceback
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import ss, toSafeString, \
toUnicode, sp
from couchpotato.core.helpers.variable import getExt, md5, isLocalIP, scanForPassword, tryInt, getIdentifier, \
randomString
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
import requests
from requests.packages.urllib3 import Timeout
from requests.packages.urllib3.exceptions import MaxRetryError
from tornado import template
from tornado.web import StaticFileHandler
log = CPLog(__name__)
class Plugin(object):
_class_name = None
_database = None
plugin_path = None
enabled_option = 'enabled'
auto_register_static = True
_needs_shutdown = False
_running = None
_locks = {}
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0'
http_last_use = {}
http_time_between_calls = 0
http_failed_request = {}
http_failed_disabled = {}
def __new__(cls, *args, **kwargs):
new_plugin = super(Plugin, cls).__new__(cls)
new_plugin.registerPlugin()
return new_plugin
def registerPlugin(self):
addEvent('app.do_shutdown', self.doShutdown)
addEvent('plugin.running', self.isRunning)
self._running = []
if self.auto_register_static:
self.registerStatic(inspect.getfile(self.__class__))
# Setup database
if self._database:
addEvent('database.setup', self.databaseSetup)
def databaseSetup(self):
for index_name in self._database:
klass = self._database[index_name]
fireEvent('database.setup_index', index_name, klass)
def conf(self, attr, value = None, default = None, section = None):
class_name = self.getName().lower().split(':')[0].lower()
return Env.setting(attr, section = section if section else class_name, value = value, default = default)
def deleteConf(self, attr):
return Env._settings.delete(attr, section = self.getName().lower().split(':')[0].lower())
def getName(self):
return self._class_name or self.__class__.__name__
def setName(self, name):
self._class_name = name
def renderTemplate(self, parent_file, templ, **params):
t = template.Template(open(os.path.join(os.path.dirname(parent_file), templ), 'r').read())
return t.generate(**params)
def registerStatic(self, plugin_file, add_to_head = True):
# Register plugin path
self.plugin_path = os.path.dirname(plugin_file)
static_folder = toUnicode(os.path.join(self.plugin_path, 'static'))
if not os.path.isdir(static_folder):
return
# Get plugin_name from PluginName
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', self.__class__.__name__)
class_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
# View path
path = 'static/plugin/%s/' % class_name
# Add handler to Tornado
Env.get('app').add_handlers(".*$", [(Env.get('web_base') + path + '(.*)', StaticFileHandler, {'path': static_folder})])
# Register for HTML <HEAD>
if add_to_head:
for f in glob.glob(os.path.join(self.plugin_path, 'static', '*')):
ext = getExt(f)
if ext in ['js', 'css']:
fireEvent('register_%s' % ('script' if ext in 'js' else 'style'), path + os.path.basename(f), f)
def createFile(self, path, content, binary = False):
path = sp(path)
self.makeDir(os.path.dirname(path))
if os.path.exists(path):
log.debug('%s already exists, overwriting file with new version', path)
write_type = 'w+' if not binary else 'w+b'
# Stream file using response object
if isinstance(content, requests.models.Response):
# Write file to temp
with open('%s.tmp' % path, write_type) as f:
for chunk in content.iter_content(chunk_size = 1048576):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
# Rename to destination
os.rename('%s.tmp' % path, path)
else:
try:
f = open(path, write_type)
f.write(content)
f.close()
os.chmod(path, Env.getPermission('file'))
except:
log.error('Unable writing to file "%s": %s', (path, traceback.format_exc()))
if os.path.isfile(path):
os.remove(path)
def makeDir(self, path):
path = sp(path)
try:
if not os.path.isdir(path):
os.makedirs(path, Env.getPermission('folder'))
return True
except Exception as e:
log.error('Unable to create folder "%s": %s', (path, e))
return False
def deleteEmptyFolder(self, folder, show_error = True, only_clean = None):
folder = sp(folder)
for item in os.listdir(folder):
full_folder = sp(os.path.join(folder, item))
if not only_clean or (item in only_clean and os.path.isdir(full_folder)):
for subfolder, dirs, files in os.walk(full_folder, topdown = False):
try:
os.rmdir(subfolder)
except:
if show_error:
log.info2('Couldn\'t remove directory %s: %s', (subfolder, traceback.format_exc()))
try:
os.rmdir(folder)
except:
if show_error:
log.error('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc()))
# http request
def urlopen(self, url, timeout = 30, data = None, headers = None, files = None, show_error = True, stream = False):
url = quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]")
if not headers: headers = {}
if not data: data = {}
# Fill in some headers
parsed_url = urlparse(url)
host = '%s%s' % (parsed_url.hostname, (':' + str(parsed_url.port) if parsed_url.port else ''))
headers['Referer'] = headers.get('Referer', '%s://%s' % (parsed_url.scheme, host))
headers['Host'] = headers.get('Host', None)
headers['User-Agent'] = headers.get('User-Agent', self.user_agent)
headers['Accept-encoding'] = headers.get('Accept-encoding', 'gzip')
headers['Connection'] = headers.get('Connection', 'keep-alive')
headers['Cache-Control'] = headers.get('Cache-Control', 'max-age=0')
r = Env.get('http_opener')
# Don't try for failed requests
if self.http_failed_disabled.get(host, 0) > 0:
if self.http_failed_disabled[host] > (time.time() - 900):
log.info2('Disabled calls to %s for 15 minutes because so many failed requests.', host)
if not show_error:
raise Exception('Disabled calls to %s for 15 minutes because so many failed requests' % host)
else:
return ''
else:
del self.http_failed_request[host]
del self.http_failed_disabled[host]
self.wait(host)
status_code = None
try:
kwargs = {
'headers': headers,
'data': data if len(data) > 0 else None,
'timeout': timeout,
'files': files,
'verify': False, #verify_ssl, Disable for now as to many wrongly implemented certificates..
'stream': stream,
}
method = 'post' if len(data) > 0 or files else 'get'
log.info('Opening url: %s %s, data: %s', (method, url, [x for x in data.keys()] if isinstance(data, dict) else 'with data'))
response = r.request(method, url, **kwargs)
status_code = response.status_code
if response.status_code == requests.codes.ok:
data = response if stream else response.content
else:
response.raise_for_status()
self.http_failed_request[host] = 0
except (IOError, MaxRetryError, Timeout):
if show_error:
log.error('Failed opening url in %s: %s %s', (self.getName(), url, traceback.format_exc(0)))
# Save failed requests by hosts
try:
# To many requests
if status_code in [429]:
self.http_failed_request[host] = 1
self.http_failed_disabled[host] = time.time()
if not self.http_failed_request.get(host):
self.http_failed_request[host] = 1
else:
self.http_failed_request[host] += 1
# Disable temporarily
if self.http_failed_request[host] > 5 and not isLocalIP(host):
self.http_failed_disabled[host] = time.time()
except:
log.debug('Failed logging failed requests for %s: %s', (url, traceback.format_exc()))
raise
self.http_last_use[host] = time.time()
return data
def wait(self, host = ''):
if self.http_time_between_calls == 0:
return
now = time.time()
last_use = self.http_last_use.get(host, 0)
if last_use > 0:
wait = (last_use - now) + self.http_time_between_calls
if wait > 0:
log.debug('Waiting for %s, %d seconds', (self.getName(), max(1, wait)))
time.sleep(min(wait, 30))
def beforeCall(self, handler):
self.isRunning('%s.%s' % (self.getName(), handler.__name__))
def afterCall(self, handler):
self.isRunning('%s.%s' % (self.getName(), handler.__name__), False)
def doShutdown(self, *args, **kwargs):
self.shuttingDown(True)
return True
def shuttingDown(self, value = None):
if value is None:
return self._needs_shutdown
self._needs_shutdown = value
def isRunning(self, value = None, boolean = True):
if value is None:
return self._running
if boolean:
self._running.append(value)
else:
try:
self._running.remove(value)
except:
log.error("Something went wrong when finishing the plugin function. Could not find the 'is_running' key")
def getCache(self, cache_key, url = None, **kwargs):
use_cache = not len(kwargs.get('data', {})) > 0 and not kwargs.get('files')
if use_cache:
cache_key_md5 = md5(cache_key)
cache = Env.get('cache').get(cache_key_md5)
if cache:
if not Env.get('dev'): log.debug('Getting cache %s', cache_key)
return cache
if url:
try:
cache_timeout = 300
if 'cache_timeout' in kwargs:
cache_timeout = kwargs.get('cache_timeout')
del kwargs['cache_timeout']
data = self.urlopen(url, **kwargs)
if data and cache_timeout > 0 and use_cache:
self.setCache(cache_key, data, timeout = cache_timeout)
return data
except:
if not kwargs.get('show_error', True):
raise
log.debug('Failed getting cache: %s', (traceback.format_exc(0)))
return ''
def setCache(self, cache_key, value, timeout = 300):
cache_key_md5 = md5(cache_key)
log.debug('Setting cache %s', cache_key)
Env.get('cache').set(cache_key_md5, value, timeout)
return value
def createNzbName(self, data, media, unique_tag = False):
release_name = data.get('name')
tag = self.cpTag(media, unique_tag = unique_tag)
# Check if password is filename
name_password = scanForPassword(data.get('name'))
if name_password:
release_name, password = name_password
tag += '{{%s}}' % password
elif data.get('password'):
tag += '{{%s}}' % data.get('password')
max_length = 127 - len(tag) # Some filesystems don't support 128+ long filenames
return '%s%s' % (toSafeString(toUnicode(release_name)[:max_length]), tag)
def createFileName(self, data, filedata, media, unique_tag = False):
name = self.createNzbName(data, media, unique_tag = unique_tag)
if data.get('protocol') == 'nzb' and 'DOCTYPE nzb' not in filedata and '</nzb>' not in filedata:
return '%s.%s' % (name, 'rar')
return '%s.%s' % (name, data.get('protocol'))
def cpTag(self, media, unique_tag = False):
tag = ''
if Env.setting('enabled', 'renamer') or unique_tag:
identifier = getIdentifier(media) or ''
unique_tag = ', ' + randomString() if unique_tag else ''
tag = '.cp('
tag += identifier
tag += ', ' if unique_tag and identifier else ''
tag += randomString() if unique_tag else ''
tag += ')'
return tag if len(tag) > 7 else ''
def checkFilesChanged(self, files, unchanged_for = 60):
now = time.time()
file_too_new = False
file_time = []
for cur_file in files:
# File got removed while checking
if not os.path.isfile(cur_file):
file_too_new = now
break
# File has changed in last 60 seconds
file_time = self.getFileTimes(cur_file)
for t in file_time:
if t > now - unchanged_for:
file_too_new = tryInt(time.time() - t)
break
if file_too_new:
break
if file_too_new:
try:
time_string = time.ctime(file_time[0])
except:
try:
time_string = time.ctime(file_time[1])
except:
time_string = 'unknown'
return file_too_new, time_string
return False, None
def getFileTimes(self, file_path):
return [os.path.getmtime(file_path), os.path.getctime(file_path) if os.name != 'posix' else 0]
def isDisabled(self):
return not self.isEnabled()
def isEnabled(self):
return self.conf(self.enabled_option) or self.conf(self.enabled_option) is None
def acquireLock(self, key):
lock = self._locks.get(key)
if not lock:
self._locks[key] = threading.RLock()
log.debug('Acquiring lock: %s', key)
self._locks.get(key).acquire()
def releaseLock(self, key):
lock = self._locks.get(key)
if lock:
log.debug('Releasing lock: %s', key)
self._locks.get(key).release()
| gpl-3.0 |
repotvsupertuga/tvsupertuga.repository | script.module.resolveurl/lib/resolveurl/lib/CustomProgressDialog.py | 3 | 3627 | """
ResolveURL Addon for Kodi
Copyright (C) 2016 t0mm0, tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import xbmcgui
import kodi
import log_utils
logger = log_utils.Logger.get_logger(__name__)
DIALOG_XML = 'ProgressDialog.xml'
class ProgressDialog(object):
dialog = None
def create(self, heading, line1='', line2='', line3=''):
try: self.dialog = ProgressDialog.Window(DIALOG_XML, kodi.get_setting('xml_folder'))
except: self.dialog = ProgressDialog.Window(DIALOG_XML, kodi.get_path())
self.dialog.show()
self.dialog.setHeading(heading)
self.dialog.setLine1(line1)
self.dialog.setLine2(line2)
self.dialog.setLine3(line3)
def update(self, percent, line1='', line2='', line3=''):
if self.dialog is not None:
self.dialog.setProgress(percent)
if line1: self.dialog.setLine1(line1)
if line2: self.dialog.setLine2(line2)
if line3: self.dialog.setLine3(line3)
def iscanceled(self):
if self.dialog is not None:
return self.dialog.cancel
else:
return False
def close(self):
if self.dialog is not None:
self.dialog.close()
del self.dialog
class Window(xbmcgui.WindowXMLDialog):
HEADING_CTRL = 100
LINE1_CTRL = 10
LINE2_CTRL = 11
LINE3_CTRL = 12
PROGRESS_CTRL = 20
ACTION_PREVIOUS_MENU = 10
ACTION_BACK = 92
CANCEL_BUTTON = 200
cancel = False
def onInit(self):
pass
def onAction(self, action):
# logger.log('Action: %s' % (action.getId()), log_utils.LOGDEBUG, COMPONENT)
if action == self.ACTION_PREVIOUS_MENU or action == self.ACTION_BACK:
self.cancel = True
self.close()
def onControl(self, control):
# logger.log('onControl: %s' % (control), log_utils.LOGDEBUG, COMPONENT)
pass
def onFocus(self, control):
# logger.log('onFocus: %s' % (control), log_utils.LOGDEBUG, COMPONENT)
pass
def onClick(self, control):
# logger.log('onClick: %s' % (control), log_utils.LOGDEBUG, COMPONENT)
if control == self.CANCEL_BUTTON:
self.cancel = True
self.close()
def setHeading(self, heading):
self.setLabel(self.HEADING_CTRL, heading)
def setProgress(self, progress):
self.getControl(self.PROGRESS_CTRL).setPercent(progress)
def setLine1(self, line):
self.setLabel(self.LINE1_CTRL, line)
def setLine2(self, line):
self.setLabel(self.LINE2_CTRL, line)
def setLine3(self, line):
self.setLabel(self.LINE3_CTRL, line)
def setLabel(self, ctrl, line):
self.getControl(ctrl).setLabel(line)
| gpl-2.0 |
blackmiaool/js-beautify | python/jsbeautifier/unpackers/evalbased.py | 221 | 1171 | #
# Unpacker for eval() based packers, a part of javascript beautifier
# by Einar Lielmanis <einar@jsbeautifier.org>
#
# written by Stefano Sanfilippo <a.little.coder@gmail.com>
#
# usage:
#
# if detect(some_string):
# unpacked = unpack(some_string)
#
"""Unpacker for eval() based packers: runs JS code and returns result.
Works only if a JS interpreter (e.g. Mozilla's Rhino) is installed and
properly set up on host."""
from subprocess import PIPE, Popen
PRIORITY = 3
def detect(source):
"""Detects if source is likely to be eval() packed."""
return source.strip().lower().startswith('eval(function(')
def unpack(source):
"""Runs source and return resulting code."""
return jseval('print %s;' % source[4:]) if detect(source) else source
# In case of failure, we'll just return the original, without crashing on user.
def jseval(script):
"""Run code in the JS interpreter and return output."""
try:
interpreter = Popen(['js'], stdin=PIPE, stdout=PIPE)
except OSError:
return script
result, errors = interpreter.communicate(script)
if interpreter.poll() or errors:
return script
return result
| mit |
reverse-CSE-591/reverse | driver.py | 1 | 19133 | #!/usr/bin/python -tt
#####################################################################################################################
# CSE 591: Security and Vulnerability Analysis
#
# Team 5:
#
# Kartheek Nallepalli
# Bhargavi Rajagopalan
# Priya Pipada
# Ayush Maheshwari
# Nikhil Aourpally
#
#
# This is the driver program. Run the main function here to find potential vulnerabilities in the website
#####################################################################################################################
# Python Imports
from __future__ import division
from bs4 import BeautifulSoup
from lxml import html
from os import system, path
from random import randint
from urlparse import urlparse
import ast
import json
import math
import nltk
import re
import requests
import sys
import time
import urllib
import urllib2
# This is a global set that contains all the URL's crawled from the website.
urls = set()
stopWords = []
#####################################################################################################################
# This method takes in a form to be filled and the url and tries to guess valid inputs that would result in a
# successful response from the server
# Inputs:
# params[] (List[String]): list of parameters along with the types in the following format.
# ex: ["username::text", "password::password"]
# action (String): The action the form should take when submitted
# url (String): The page URL for getting the HTML data and figuring out what to fill
# Output:
# validResponse (String): returns the HTML string of the valid response
#####################################################################################################################
def getValidResponse(params, action, url, cookies):
formInput={}
for key in params:
value = params[key]
formInput[key] = generateValue(value['label'],value['type'])
#print cookies, type(cookies)
(header,validResponse) = constructPostRequest(formInput, cookies, action)
return validResponse
#####################################################################################################################
# This method constructs a HTTP Post Request to submit the form to it
# Inputs:
#Output:
#####################################################################################################################
def constructPostRequest(formInput, input_cookies, action):
r = requests.post(action, data=formInput, verify=False, cookies=input_cookies)
return (r.headers,r.text)
#####################################################################################################################
# This method takes in a form to be filled and the url and inserts <scripts> into the fields.
# Inputs:
# params{} (Dictionary): list of parameters along with the types in the following format.
# ex: ["username::text", "password::password"]
# action (String): The action the form should take when submitted
# Output:
# xssResponse (String): returns the HTML response
#####################################################################################################################
def getXssResponse(params, action, url, cookies):
formInput={}
for key in params:
value = params[key]
formInput[key]="<sCript>xssAttack</sCript>"
(header,xssInjResponse) = constructPostRequest(formInput,cookies,action)
return xssInjResponse
#####################################################################################################################
# This method computes the XSS injection score for the given response
# Inputs:
#Output:
#####################################################################################################################
def getXssScore(xssResponse, input_cookies):
urls = open("crawledURLs.txt")
for url in urls:
response = requests.get(re.sub("\n","",url), verify=False, cookies=input_cookies).text
if bool(re.search('<sCript>xssAttack</sCript>', response)):
return 1
return 0
#####################################################################################################################
# This method takes in a form to be filled and the url and tries SQL injection in the fields
# Inputs:
# params[] (List[String]): list of parameters along with the types in the following format.
# ex: ["username::text", "password::password"]
# action (String): The action the form should take when submitted
# Output:
# xssResponse (String): returns the HTML response
#####################################################################################################################
def getSqlInjResponse(params, action, url, cookies):
formInput={}
for key in params:
value = params[key]
formInput[key] ="' or 1=1 --'"
(header,sqlInjResponse) = constructPostRequest(formInput,cookies,action)
return sqlInjResponse
#####################################################################################################################
# This method takes in two HTML strings, compares them and assigns a similarity score. The idea is to use this
# score to see how similar pages with valid and invalid outputs are.
# Inputs:
# html_1 (String): The first HTML page
# html_2 (String): The second HTML page
# Output:
# score (double): similarity between pages
#####################################################################################################################
def getSimilarityScore(html_1, html_2):
cleanResponse1 = BeautifulSoup(html_1).get_text()
cleanResponse2 = BeautifulSoup(html_2).get_text()
return calculateCosineSimilarity(formatVector(cleanResponse1), formatVector(cleanResponse2))
#####################################################################################################################
# The method calculates the cosine similarity between two groups
# Inputs:
#Output:
#####################################################################################################################
def calculateCosineSimilarity(group1, group2):
doc1sq = doc2sq = frequency = 0
for i in group1:
if i in group2:
frequency += group1[i] * group2[i]
for j in group1:
doc1sq += math.pow(group1[j], 2)
for k in group2:
doc2sq += math.pow(group2[k], 2)
score = float(frequency) / (math.sqrt(doc1sq) * math.sqrt(doc2sq))
return score
#####################################################################################################################
# This method constructs a HTTP Post Request to submit the form to it
# Inputs:
#Output:
#####################################################################################################################
def formatVector(response):
global stopWords
cleanResponse = map(lambda x:re.split(" ", x), re.split("\n", response))
vectorList = []
vectorDict = {}
for i in cleanResponse:
vectorList.extend(i)
vector = []
for i in vectorList:
if str(i) != '' or str(i) not in stopWords:
vector.append(i.lower())
for j in vector:
if j in vectorDict:
vectorDict[j] += 1
else:
vectorDict[j] = 1
return vectorDict
#####################################################################################################################
# This method takes in the original label extracted, gets the similarity score and predicts the valid form entries
# by understanding meaning of the labes and mapping them to known labels using dictionary similarity and edit-
# distance score.
#
# TODO : Faced problems with wu-palmer similarity over wordNet (flase positives and not all terms present)
# Currently using just the edit distance
#
# Inputs:
# label (String): Label generated from the scarppy code extended
# Output:
# generated value (String): Valid generated form input value
#####################################################################################################################
def getLabel(orglabel):
userset = ['user','username','user_name']
maxscore =0
newlabel =''
for field in userset:
score = getEdidDistanceScore(orglabel, field)
if(score > maxscore):
maxscore = score
newlabel = 'username'
#print 'Max score' + str(maxscore), 'Label' + newlabel
if(maxscore<0.5):
newlabel = orglabel
return newlabel
#####################################################################################################################
# This method generates random values based on the form field type and implements intelligent form filling
# Inputs:
#Output:
#####################################################################################################################
def generateValue(label, labeltype):
if labeltype == 'text':
newlabel = getLabel(label)
if newlabel == 'username':
return 'reverse'+ str(time.time())
else:
return 'reverserandom'+ str(time.time())
elif labeltype == 'password':
return 'reversePass'+ str(time.time())
elif labeltype == 'email':
return 'reverse'+str(time.time())+'@reverse.com'
elif labeltype == 'number':
return randint(0,10000)
#####################################################################################################################
# Helper methods
#####################################################################################################################
# Get the specific form parameters
def getFormParams(link):
params = {}
labels = []
source = link['source'].replace("\n","")
for i in range(0, len(source)):
label = ''
if source[i] == '>':
while source[i] != '<':
label += source[i]
i = i + 1
if i >= len(source) - 1:
break;
if label[1:] and not label[1:].isspace():
labels.append(label[1:])
i = 0
for j in link['form']:
params[j['name']] = {}
params[j['name']]['type'] = j['type']
params[j['name']]['label'] = labels[0]
i = i + 1
return (link['target'], params)
# This method gets the list of stopwords
def getStopWords():
global stopWords
f = open("stopwords.en")
for i in f:
stopWords.append(re.sub("\n","",i))
# Get the edit-distance score between two words
def getEdidDistanceScore(word1, word2):
distance = nltk.metrics.distance.edit_distance(word1, word2, transpositions=False)
avgLength = (len(word1) + len(word2))/2
score = distance/avgLength
return score
#Get cookies from user
def getCookies():
flag = 0
cookies = {}
print "Enter cookies(Press X to exit): "
while True:
if not flag:
key = raw_input("Enter Key: ")
flag = 1
if key == 'X':
break;
else:
value = raw_input("Enter value: ")
flag = 0
if value == 'X':
break;
cookies[key] = value
return cookies
#####################################################################################################################
# Method to inject malicious input values into the application to check if nth order SQL injection is possible
#####################################################################################################################
def nthOrderSQLInjection(params, action, url, cookies, index, urlForms):
UserName = "reverse_12345"
Password = "aXb675hjWF@"
SQLKeyWord = "' union select "
TableInfo = 'from dual;--'
responseString = None
for i in range(0,5):
formInput = {}
ParameterPadding = 'Null,' * i
Parameter = '"Evilmax"' + str(index) + ' '
MaliciousInputValue = UserName + SQLKeyWord + ParameterPadding + Parameter + TableInfo
for key in params:
value = params[key]
if value['type'] != 'password':
formInput[key] = MaliciousInputValue
else:
formInput[key] = Password
constructPostRequest(formInput, cookies, action)
for urlForm in urlForms:
(newAction, newParams) = getFormParams(urlForm)
newFormInput = {}
for newParam in newParams:
value = newParams[newParam]
if value['type'] != 'password':
newFormInput[newParam] = UserName
else:
newFormInput[newParam] = Password
(header, response) = constructPostRequest(formInput, cookies, newAction)
if 'EvilMax' in response:
SplitString = response.split("EvilMax")
Index = SplitString[1].split(' ')
if index != Index:
responseString = responseString + "nth Order SQL injection present in " + newAction + "\n"
return responseString
#####################################################################################################################
# The method takes the URLs extracted from the crawler scrapy and performs a "deeper" crawling by seeing if the
# server is setting any cookies after login and adds that to the list of cookies.
#Output: Updates cookies (Dictionary)
#####################################################################################################################
def deepCrawling(urlForms,cookies):
storedFormInputs=[]
formInput={}
login=False
for urlForm in urlForms:
(action, params) = getFormParams(urlForm)
credentials = {'username': None, 'password' : None}
for key in params:
value = params[key]
if value['type'] != 'submit':
formInput[key] = generateValue(value['label'],value['type'])
newLabel = getLabel(value['label'])
if newLabel == 'username':
credentials['username'] = formInput[key]
if value['type'] == 'password':
credentials['password'] = formInput[key]
if credentials:
storedFormInputs.append(credentials)
(header,response) = constructPostRequest(formInput,cookies,action)
if "registered" in response.lower() or "created" in response.lower() or "authenticated" in response.lower():
login=True
if login == True:
for urlForm in urlForms:
(action, params) = getFormParams(urlForm)
for storedFormInput in storedFormInputs:
formInput = {}
for key in params:
value = params[key]
newLabel = getLabel(value['label'])
if newLabel == 'username':
formInput[key] = storedFormInput['username']
if value['type'] == 'password' and storedFormInput['password']:
formInput[key] = storedFormInput['password']
(header, response) = constructPostRequest(formInput,cookies,action)
if 'set-cookie' in header.keys():
newCookie = str(header['set-cookie']).split(';')[0]
CookieSplit = str(newCookie).split('=')
cookies[CookieSplit[0]] = CookieSplit[1]
return cookies
#####################################################################################################################
# This is the main method that gets called and submits the report on possible vulnerabilities
#####################################################################################################################
def main():
# Init Global variables
getStopWords()
# Add the required headers, most likely its just the login cookie for the page.
#opener = urllib2.build_opener()
#opener.addheaders.append(('Cookie', 'cse591=kP047iYtubEZ6ZnMKmxO'))
# domain = "129.219.253.30:80"
url = raw_input("Enter the web address: ")
cookies = getCookies()
domain = urlparse(url).netloc
# Remove any residual files
system("rm items.json")
system("rm crawledURLs.txt")
system("rm reverse_report")
system("rm reverse_response")
# Use Scrapy to get recursively get all URLs, Stores the
system("scrapy crawl ReverseCrawler -a domain="+domain+" -a start_urls="+url+" -a cookies=\""+str(cookies)+"\" -o items.json")
#cookies = ast.literal_eval(cookies)
# Iterate over all the URL's and their forms
UrlForms = json.load(open("items.json"))
print "\n\n\n"
# Open report, response file
reportFile = open('reverse_report','w')
responseFile = open('reverse_response','w')
# Perform a deeper crawling and re-crawl using scrapy to fetch more URLs
cookies = deepCrawling(UrlForms,cookies)
system("rm -f items.json")
system("scrapy crawl ReverseCrawler -a domain="+domain+" -a start_urls="+url+" -a cookies=\""+str(cookies)+"\" -o items.json")
UrlForms = json.load(open("items.json"))
# Iterate through all possible forms
index = 0
for urlForm in UrlForms:
(action, params) = getFormParams(urlForm)
print "[INFO] action: ", action
# Get the valid response
validResponse = getValidResponse(params, action, url, cookies)
# Append the resposes to response file
responseFile.write("%%%%%%%%%%%%%%%%%%%%%%%%%% Start Valid Response %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n")
responseFile.write(action + "\n")
responseFile.write(str(params) + "\n")
responseFile.write(BeautifulSoup(validResponse).get_text() + "\n")
responseFile.write("############################ Start SQL Injection response ###########################\n")
# Attempt SQL Injection and Get the score
sqlInjResponse = getSqlInjResponse(params, action, url, cookies)
responseFile.write(BeautifulSoup(sqlInjResponse).get_text() + "\n")
responseFile.write("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ Start XSS response @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n")
sqlInjectionScore = float(1) - getSimilarityScore(validResponse, sqlInjResponse)
print "[INFO] SQL_INJ_Score = ", sqlInjectionScore
# Attempt nth Order SQL injection
responseString = nthOrderSQLInjection(params, action, url, cookies, index, UrlForms)
# Attempt XSS and get the score
xssResponse = getXssResponse(params, action, url, cookies)
responseFile.write("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n")
xssScore = getXssScore(xssResponse, cookies)
print "[INFO] XSS_Score = ", xssScore
# Add scores to the report
reportFile.write("[Params]:: " + str(params) + "\n")
reportFile.write("[Action]:: " + action + "\n")
reportFile.write("[SQL_Inj_Score]:: " + str(sqlInjectionScore) + "\n")
reportFile.write("[XSS_Inj_Score]:: " + str(xssScore) + "\n\n")
if responseString is not None:
reportFile.write("[nth Order SQL Injection]::" + responseString + "\n")
print "\n\n"
index = index + 1
# Close the report
reportFile.close()
responseFile.close()
if __name__ == '__main__':
main()
| mit |
gtolan/mrkttrdr | node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py | 1558 | 4945 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
try:
xml_string = xml_string.encode(encoding)
except Exception:
xml_string = unicode(xml_string, 'latin-1').encode(encoding)
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
| mit |
mollstam/UnrealPy | UnrealPyEmbed/Source/Python/Lib/python27/distutils/command/build_py.py | 74 | 16338 | """distutils.command.build_py
Implements the Distutils 'build_py' command."""
__revision__ = "$Id$"
import os
import sys
from glob import glob
from distutils.core import Command
from distutils.errors import DistutilsOptionError, DistutilsFileError
from distutils.util import convert_path
from distutils import log
class build_py(Command):
description = "\"build\" pure Python modules (copy to build directory)"
user_options = [
('build-lib=', 'd', "directory to \"build\" (copy) to"),
('compile', 'c', "compile .py to .pyc"),
('no-compile', None, "don't compile .py files [default]"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('force', 'f', "forcibly build everything (ignore file timestamps)"),
]
boolean_options = ['compile', 'force']
negative_opt = {'no-compile' : 'compile'}
def initialize_options(self):
self.build_lib = None
self.py_modules = None
self.package = None
self.package_data = None
self.package_dir = None
self.compile = 0
self.optimize = 0
self.force = None
def finalize_options(self):
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('force', 'force'))
# Get the distribution options that are aliases for build_py
# options -- list of packages and list of modules.
self.packages = self.distribution.packages
self.py_modules = self.distribution.py_modules
self.package_data = self.distribution.package_data
self.package_dir = {}
if self.distribution.package_dir:
for name, path in self.distribution.package_dir.items():
self.package_dir[name] = convert_path(path)
self.data_files = self.get_data_files()
# Ick, copied straight from install_lib.py (fancy_getopt needs a
# type system! Hell, *everything* needs a type system!!!)
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
assert 0 <= self.optimize <= 2
except (ValueError, AssertionError):
raise DistutilsOptionError("optimize must be 0, 1, or 2")
def run(self):
# XXX copy_file by default preserves atime and mtime. IMHO this is
# the right thing to do, but perhaps it should be an option -- in
# particular, a site administrator might want installed files to
# reflect the time of installation rather than the last
# modification time before the installed release.
# XXX copy_file by default preserves mode, which appears to be the
# wrong thing to do: if a file is read-only in the working
# directory, we want it to be installed read/write so that the next
# installation of the same module distribution can overwrite it
# without problems. (This might be a Unix-specific issue.) Thus
# we turn off 'preserve_mode' when copying to the build directory,
# since the build directory is supposed to be exactly what the
# installation will look like (ie. we preserve mode when
# installing).
# Two options control which modules will be installed: 'packages'
# and 'py_modules'. The former lets us work with whole packages, not
# specifying individual modules at all; the latter is for
# specifying modules one-at-a-time.
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.byte_compile(self.get_outputs(include_bytecode=0))
def get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
data = []
if not self.packages:
return data
for package in self.packages:
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Length of path to strip from found files
plen = 0
if src_dir:
plen = len(src_dir)+1
# Strip directory from globbed filenames
filenames = [
file[plen:] for file in self.find_data_files(package, src_dir)
]
data.append((package, src_dir, build_dir, filenames))
return data
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
globs = (self.package_data.get('', [])
+ self.package_data.get(package, []))
files = []
for pattern in globs:
# Each pattern has to be converted to a platform-specific path
filelist = glob(os.path.join(src_dir, convert_path(pattern)))
# Files that match more than one pattern are only added once
files.extend([fn for fn in filelist if fn not in files
and os.path.isfile(fn)])
return files
def build_package_data(self):
"""Copy data files into build directory"""
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
self.copy_file(os.path.join(src_dir, filename), target,
preserve_mode=False)
def get_package_dir(self, package):
"""Return the directory, relative to the top of the source
distribution, where package 'package' should be found
(at least according to the 'package_dir' option, if any)."""
path = package.split('.')
if not self.package_dir:
if path:
return os.path.join(*path)
else:
return ''
else:
tail = []
while path:
try:
pdir = self.package_dir['.'.join(path)]
except KeyError:
tail.insert(0, path[-1])
del path[-1]
else:
tail.insert(0, pdir)
return os.path.join(*tail)
else:
# Oops, got all the way through 'path' without finding a
# match in package_dir. If package_dir defines a directory
# for the root (nameless) package, then fallback on it;
# otherwise, we might as well have not consulted
# package_dir at all, as we just use the directory implied
# by 'tail' (which should be the same as the original value
# of 'path' at this point).
pdir = self.package_dir.get('')
if pdir is not None:
tail.insert(0, pdir)
if tail:
return os.path.join(*tail)
else:
return ''
def check_package(self, package, package_dir):
# Empty dir name means current directory, which we can probably
# assume exists. Also, os.path.exists and isdir don't know about
# my "empty string means current dir" convention, so we have to
# circumvent them.
if package_dir != "":
if not os.path.exists(package_dir):
raise DistutilsFileError(
"package directory '%s' does not exist" % package_dir)
if not os.path.isdir(package_dir):
raise DistutilsFileError(
"supposed package directory '%s' exists, "
"but is not a directory" % package_dir)
# Require __init__.py for all but the "root package"
if package:
init_py = os.path.join(package_dir, "__init__.py")
if os.path.isfile(init_py):
return init_py
else:
log.warn(("package init file '%s' not found " +
"(or not a regular file)"), init_py)
# Either not in a package at all (__init__.py not expected), or
# __init__.py doesn't exist -- so don't return the filename.
return None
def check_module(self, module, module_file):
if not os.path.isfile(module_file):
log.warn("file %s (for module %s) not found", module_file, module)
return False
else:
return True
def find_package_modules(self, package, package_dir):
self.check_package(package, package_dir)
module_files = glob(os.path.join(package_dir, "*.py"))
modules = []
setup_script = os.path.abspath(self.distribution.script_name)
for f in module_files:
abs_f = os.path.abspath(f)
if abs_f != setup_script:
module = os.path.splitext(os.path.basename(f))[0]
modules.append((package, module, f))
else:
self.debug_print("excluding %s" % setup_script)
return modules
def find_modules(self):
"""Finds individually-specified Python modules, ie. those listed by
module name in 'self.py_modules'. Returns a list of tuples (package,
module_base, filename): 'package' is a tuple of the path through
package-space to the module; 'module_base' is the bare (no
packages, no dots) module name, and 'filename' is the path to the
".py" file (relative to the distribution root) that implements the
module.
"""
# Map package names to tuples of useful info about the package:
# (package_dir, checked)
# package_dir - the directory where we'll find source files for
# this package
# checked - true if we have checked that the package directory
# is valid (exists, contains __init__.py, ... ?)
packages = {}
# List of (package, module, filename) tuples to return
modules = []
# We treat modules-in-packages almost the same as toplevel modules,
# just the "package" for a toplevel is empty (either an empty
# string or empty list, depending on context). Differences:
# - don't check for __init__.py in directory for empty package
for module in self.py_modules:
path = module.split('.')
package = '.'.join(path[0:-1])
module_base = path[-1]
try:
(package_dir, checked) = packages[package]
except KeyError:
package_dir = self.get_package_dir(package)
checked = 0
if not checked:
init_py = self.check_package(package, package_dir)
packages[package] = (package_dir, 1)
if init_py:
modules.append((package, "__init__", init_py))
# XXX perhaps we should also check for just .pyc files
# (so greedy closed-source bastards can distribute Python
# modules too)
module_file = os.path.join(package_dir, module_base + ".py")
if not self.check_module(module, module_file):
continue
modules.append((package, module_base, module_file))
return modules
def find_all_modules(self):
"""Compute the list of all modules that will be built, whether
they are specified one-module-at-a-time ('self.py_modules') or
by whole packages ('self.packages'). Return a list of tuples
(package, module, module_file), just like 'find_modules()' and
'find_package_modules()' do."""
modules = []
if self.py_modules:
modules.extend(self.find_modules())
if self.packages:
for package in self.packages:
package_dir = self.get_package_dir(package)
m = self.find_package_modules(package, package_dir)
modules.extend(m)
return modules
def get_source_files(self):
return [module[-1] for module in self.find_all_modules()]
def get_module_outfile(self, build_dir, package, module):
outfile_path = [build_dir] + list(package) + [module + ".py"]
return os.path.join(*outfile_path)
def get_outputs(self, include_bytecode=1):
modules = self.find_all_modules()
outputs = []
for (package, module, module_file) in modules:
package = package.split('.')
filename = self.get_module_outfile(self.build_lib, package, module)
outputs.append(filename)
if include_bytecode:
if self.compile:
outputs.append(filename + "c")
if self.optimize > 0:
outputs.append(filename + "o")
outputs += [
os.path.join(build_dir, filename)
for package, src_dir, build_dir, filenames in self.data_files
for filename in filenames
]
return outputs
def build_module(self, module, module_file, package):
if isinstance(package, str):
package = package.split('.')
elif not isinstance(package, (list, tuple)):
raise TypeError(
"'package' must be a string (dot-separated), list, or tuple")
# Now put the module source file into the "build" area -- this is
# easy, we just copy it somewhere under self.build_lib (the build
# directory for Python source).
outfile = self.get_module_outfile(self.build_lib, package, module)
dir = os.path.dirname(outfile)
self.mkpath(dir)
return self.copy_file(module_file, outfile, preserve_mode=0)
def build_modules(self):
modules = self.find_modules()
for (package, module, module_file) in modules:
# Now "build" the module -- ie. copy the source file to
# self.build_lib (the build directory for Python source).
# (Actually, it gets copied to the directory for this package
# under self.build_lib.)
self.build_module(module, module_file, package)
def build_packages(self):
for package in self.packages:
# Get list of (package, module, module_file) tuples based on
# scanning the package directory. 'package' is only included
# in the tuple so that 'find_modules()' and
# 'find_package_tuples()' have a consistent interface; it's
# ignored here (apart from a sanity check). Also, 'module' is
# the *unqualified* module name (ie. no dots, no package -- we
# already know its package!), and 'module_file' is the path to
# the .py file, relative to the current directory
# (ie. including 'package_dir').
package_dir = self.get_package_dir(package)
modules = self.find_package_modules(package, package_dir)
# Now loop over the modules we found, "building" each one (just
# copy it to self.build_lib).
for (package_, module, module_file) in modules:
assert package == package_
self.build_module(module, module_file, package)
def byte_compile(self, files):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
prefix = self.build_lib
if prefix[-1] != os.sep:
prefix = prefix + os.sep
# XXX this code is essentially the same as the 'byte_compile()
# method of the "install_lib" command, except for the determination
# of the 'prefix' string. Hmmm.
if self.compile:
byte_compile(files, optimize=0,
force=self.force, prefix=prefix, dry_run=self.dry_run)
if self.optimize > 0:
byte_compile(files, optimize=self.optimize,
force=self.force, prefix=prefix, dry_run=self.dry_run)
| mit |
GGXH/python_koans | python2/koans/about_methods.py | 2 | 5757 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Partially based on AboutMethods in the Ruby Koans
#
from runner.koan import *
def my_global_function(a, b):
return a + b
class AboutMethods(Koan):
def test_calling_a_global_function(self):
self.assertEqual(5, my_global_function(2, 3))
# NOTE: Wrong number of arguments is not a SYNTAX error, but a
# runtime error.
def test_calling_functions_with_wrong_number_of_arguments(self):
try:
my_global_function()
except Exception as exception:
# NOTE: The .__name__ attribute will convert the class
# into a string value.
self.assertEqual('TypeError', exception.__class__.__name__)
self.assertMatch(
r'my_global_function\(\) takes exactly 2 arguments \(0 given\)',
exception[0])
try:
my_global_function(1, 2, 3)
except Exception as e:
# Note, watch out for parenthesis. They need slashes in front!
self.assertMatch('my_global_function\(\) takes exactly 2 arguments \(3 given\)', e[0])
# ------------------------------------------------------------------
def pointless_method(self, a, b):
sum = a + b
def test_which_does_not_return_anything(self):
self.assertEqual(None, self.pointless_method(1, 2))
# Notice that methods accessed from class scope do not require
# you to pass the first "self" argument?
# ------------------------------------------------------------------
def method_with_defaults(self, a, b='default_value'):
return [a, b]
def test_calling_with_default_values(self):
self.assertEqual([1, 'default_value'], self.method_with_defaults(1))
self.assertEqual([1, 2], self.method_with_defaults(1, 2))
# ------------------------------------------------------------------
def method_with_var_args(self, *args):
return args
def test_calling_with_variable_arguments(self):
self.assertEqual((), self.method_with_var_args())
self.assertEqual(('one', ), self.method_with_var_args('one'))
self.assertEqual(('one','two'), self.method_with_var_args('one', 'two'))
# ------------------------------------------------------------------
def function_with_the_same_name(self, a, b):
return a + b
def test_functions_without_self_arg_are_global_functions(self):
def function_with_the_same_name(a, b):
return a * b
self.assertEqual(12, function_with_the_same_name(3, 4))
def test_calling_methods_in_same_class_with_explicit_receiver(self):
def function_with_the_same_name(a, b):
return a * b
self.assertEqual(7, self.function_with_the_same_name(3, 4))
# ------------------------------------------------------------------
def another_method_with_the_same_name(self):
return 10
link_to_overlapped_method = another_method_with_the_same_name
def another_method_with_the_same_name(self):
return 42
def test_that_old_methods_are_hidden_by_redefinitions(self):
self.assertEqual(42, self.another_method_with_the_same_name())
def test_that_overlapped_method_is_still_there(self):
self.assertEqual(10, self.link_to_overlapped_method())
# ------------------------------------------------------------------
def empty_method(self):
pass
def test_methods_that_do_nothing_need_to_use_pass_as_a_filler(self):
self.assertEqual(None, self.empty_method())
def test_pass_does_nothing_at_all(self):
"You"
"shall"
"not"
pass
self.assertEqual(True, "Still got to this line" != None)
# ------------------------------------------------------------------
def one_line_method(self): return 'Madagascar'
def test_no_indentation_required_for_one_line_statement_bodies(self):
self.assertEqual('Madagascar', self.one_line_method())
# ------------------------------------------------------------------
def method_with_documentation(self):
"A string placed at the beginning of a function is used for documentation"
return "ok"
def test_the_documentation_can_be_viewed_with_the_doc_method(self):
self.assertMatch("A string placed at the beginning of a function is used for documentation", self.method_with_documentation.__doc__)
# ------------------------------------------------------------------
class Dog(object):
def name(self):
return "Fido"
def _tail(self):
# Prefixing a method with an underscore implies private scope
return "wagging"
def __password(self):
return 'password' # Genius!
def test_calling_methods_in_other_objects(self):
rover = self.Dog()
self.assertEqual("Fido", rover.name())
def test_private_access_is_implied_but_not_enforced(self):
rover = self.Dog()
# This is a little rude, but legal
self.assertEqual("wagging", rover._tail())
def test_double_underscore_attribute_prefixes_cause_name_mangling(self):
"""Attributes names that start with a double underscore get
mangled when an instance is created."""
rover = self.Dog()
try:
#This may not be possible...
password = rover.__password()
except Exception as ex:
self.assertEqual("AttributeError", ex.__class__.__name__)
# But this still is!
self.assertEqual('password', rover._Dog__password())
# Name mangling exists to avoid name clash issues when subclassing.
# It is not for providing effective access protection
| mit |
gusDuarte/sugar | extensions/cpsection/aboutme/model.py | 4 | 4041 | # Copyright (C) 2008 One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from gettext import gettext as _
from gi.repository import GConf
_COLORS = {
'red': {'dark': '#b20008', 'medium': '#e6000a', 'light': '#ffadce'},
'orange': {'dark': '#9a5200', 'medium': '#c97e00', 'light': '#ffc169'},
'yellow': {'dark': '#807500', 'medium': '#be9e00', 'light': '#fffa00'},
'green': {'dark': '#008009', 'medium': '#00b20d', 'light': '#8bff7a'},
'blue': {'dark': '#00588c', 'medium': '#005fe4', 'light': '#bccdff'},
'purple': {'dark': '#5e008c', 'medium': '#7f00bf', 'light': '#d1a3ff'},
}
_MODIFIERS = ('dark', 'medium', 'light')
def get_nick():
client = GConf.Client.get_default()
return client.get_string('/desktop/sugar/user/nick')
def print_nick():
print get_nick()
def set_nick(nick):
"""Set the nickname.
nick : e.g. 'walter'
"""
if not nick:
raise ValueError(_('You must enter a name.'))
if not isinstance(nick, unicode):
nick = unicode(nick, 'utf-8')
client = GConf.Client.get_default()
client.set_string('/desktop/sugar/user/nick', nick)
return 1
def get_color():
client = GConf.Client.get_default()
return client.get_string('/desktop/sugar/user/color')
def print_color():
color_string = get_color()
tmp = color_string.split(',')
stroke_tuple = None
fill_tuple = None
for color in _COLORS:
for hue in _COLORS[color]:
if _COLORS[color][hue] == tmp[0]:
stroke_tuple = (color, hue)
if _COLORS[color][hue] == tmp[1]:
fill_tuple = (color, hue)
if stroke_tuple is not None:
print _('stroke: color=%s hue=%s') % (stroke_tuple[0],
stroke_tuple[1])
else:
print _('stroke: %s') % (tmp[0])
if fill_tuple is not None:
print _('fill: color=%s hue=%s') % (fill_tuple[0], fill_tuple[1])
else:
print _('fill: %s') % (tmp[1])
def set_color(stroke, fill, stroke_modifier='medium', fill_modifier='medium'):
"""Set the system color by setting a fill and stroke color.
fill : [red, orange, yellow, blue, green, purple]
stroke : [red, orange, yellow, blue, green, purple]
hue stroke : [dark, medium, light] (optional)
hue fill : [dark, medium, light] (optional)
"""
if stroke_modifier not in _MODIFIERS or fill_modifier not in _MODIFIERS:
print (_('Error in specified color modifiers.'))
return
if stroke not in _COLORS or fill not in _COLORS:
print (_('Error in specified colors.'))
return
if stroke_modifier == fill_modifier:
if fill_modifier == 'medium':
fill_modifier = 'light'
else:
fill_modifier = 'medium'
color = _COLORS[stroke][stroke_modifier] + ',' \
+ _COLORS[fill][fill_modifier]
client = GConf.Client.get_default()
client.set_string('/desktop/sugar/user/color', color)
return 1
def get_color_xo():
client = GConf.Client.get_default()
return client.get_string('/desktop/sugar/user/color')
def set_color_xo(color):
"""Set a color with an XoColor
This method is used by the graphical user interface
"""
client = GConf.Client.get_default()
client.set_string('/desktop/sugar/user/color', color)
return 1
| gpl-2.0 |
supriyantomaftuh/django | tests/gis_tests/rasterapp/test_rasterfield.py | 241 | 3424 | import json
from unittest import skipIf
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.shortcuts import numpy
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from ..data.rasters.textrasters import JSON_RASTER
from ..models import models
from .models import RasterModel
@skipUnlessDBFeature('supports_raster')
class RasterFieldTest(TransactionTestCase):
available_apps = ['gis_tests.rasterapp']
def test_field_null_value(self):
"""
Test creating a model where the RasterField has a null value.
"""
r = RasterModel.objects.create(rast=None)
r.refresh_from_db()
self.assertIsNone(r.rast)
def test_access_band_data_directly_from_queryset(self):
RasterModel.objects.create(rast=JSON_RASTER)
qs = RasterModel.objects.all()
qs[0].rast.bands[0].data()
def test_model_creation(self):
"""
Test RasterField through a test model.
"""
# Create model instance from JSON raster
r = RasterModel.objects.create(rast=JSON_RASTER)
r.refresh_from_db()
# Test raster metadata properties
self.assertEqual((5, 5), (r.rast.width, r.rast.height))
self.assertEqual([0.0, -1.0, 0.0, 0.0, 0.0, 1.0], r.rast.geotransform)
self.assertIsNone(r.rast.bands[0].nodata_value)
# Compare srs
self.assertEqual(r.rast.srs.srid, 4326)
# Compare pixel values
band = r.rast.bands[0].data()
# If numpy, convert result to list
if numpy:
band = band.flatten().tolist()
# Loop through rows in band data and assert single
# value is as expected.
self.assertEqual(
[
0.0, 1.0, 2.0, 3.0, 4.0,
5.0, 6.0, 7.0, 8.0, 9.0,
10.0, 11.0, 12.0, 13.0, 14.0,
15.0, 16.0, 17.0, 18.0, 19.0,
20.0, 21.0, 22.0, 23.0, 24.0
],
band
)
def test_implicit_raster_transformation(self):
"""
Test automatic transformation of rasters with srid different from the
field srid.
"""
# Parse json raster
rast = json.loads(JSON_RASTER)
# Update srid to another value
rast['srid'] = 3086
# Save model and get it from db
r = RasterModel.objects.create(rast=rast)
r.refresh_from_db()
# Confirm raster has been transformed to the default srid
self.assertEqual(r.rast.srs.srid, 4326)
# Confirm geotransform is in lat/lon
self.assertEqual(
r.rast.geotransform,
[-87.9298551266551, 9.459646421449934e-06, 0.0,
23.94249275457565, 0.0, -9.459646421449934e-06]
)
def test_verbose_name_arg(self):
"""
RasterField should accept a positional verbose name argument.
"""
self.assertEqual(
RasterModel._meta.get_field('rast').verbose_name,
'A Verbose Raster Name'
)
@skipIf(HAS_GDAL, 'Test raster field exception on systems without GDAL.')
class RasterFieldWithoutGDALTest(TestCase):
def test_raster_field_without_gdal_exception(self):
msg = 'RasterField requires GDAL.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
models.OriginalRasterField()
| bsd-3-clause |
xuleiboy1234/autoTitle | tensorflow/tensorflow/contrib/saved_model/python/__init__.py | 136 | 1173 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel contrib support.
SavedModel provides a language-neutral format to save machine-learned models
that is recoverable and hermetic. It enables higher-level systems and tools to
produce, consume and transform TensorFlow models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.saved_model.python.saved_model import *
# pylint: enable=wildcard-import
| mit |
particl/particl-core | test/functional/test_framework/script_util.py | 29 | 3452 | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Useful Script constants and utils."""
from test_framework.script import CScript, hash160, sha256, OP_0, OP_DUP, OP_HASH160, OP_CHECKSIG, OP_EQUAL, OP_EQUALVERIFY
from test_framework.util import hex_str_to_bytes
# To prevent a "tx-size-small" policy rule error, a transaction has to have a
# non-witness size of at least 82 bytes (MIN_STANDARD_TX_NONWITNESS_SIZE in
# src/policy/policy.h). Considering a Tx with the smallest possible single
# input (blank, empty scriptSig), and with an output omitting the scriptPubKey,
# we get to a minimum size of 60 bytes:
#
# Tx Skeleton: 4 [Version] + 1 [InCount] + 1 [OutCount] + 4 [LockTime] = 10 bytes
# Blank Input: 32 [PrevTxHash] + 4 [Index] + 1 [scriptSigLen] + 4 [SeqNo] = 41 bytes
# Output: 8 [Amount] + 1 [scriptPubKeyLen] = 9 bytes
#
# Hence, the scriptPubKey of the single output has to have a size of at
# least 22 bytes, which corresponds to the size of a P2WPKH scriptPubKey.
# The following script constant consists of a single push of 21 bytes of 'a':
# <PUSH_21> <21-bytes of 'a'>
# resulting in a 22-byte size. It should be used whenever (small) fake
# scriptPubKeys are needed, to guarantee that the minimum transaction size is
# met.
DUMMY_P2WPKH_SCRIPT = CScript([b'a' * 21])
DUMMY_2_P2WPKH_SCRIPT = CScript([b'b' * 21])
def keyhash_to_p2pkh_script(hash, main = False):
assert len(hash) == 20
return CScript([OP_DUP, OP_HASH160, hash, OP_EQUALVERIFY, OP_CHECKSIG])
def scripthash_to_p2sh_script(hash, main = False):
assert len(hash) == 20
return CScript([OP_HASH160, hash, OP_EQUAL])
def key_to_p2pkh_script(key, main = False):
key = check_key(key)
return keyhash_to_p2pkh_script(hash160(key), main)
def script_to_p2sh_script(script, main = False):
script = check_script(script)
return scripthash_to_p2sh_script(hash160(script), main)
def key_to_p2sh_p2wpkh_script(key, main = False):
key = check_key(key)
p2shscript = CScript([OP_0, hash160(key)])
return script_to_p2sh_script(p2shscript, main)
def program_to_witness_script(version, program, main = False):
if isinstance(program, str):
program = hex_str_to_bytes(program)
assert 0 <= version <= 16
assert 2 <= len(program) <= 40
assert version > 0 or len(program) in [20, 32]
return CScript([version, program])
def script_to_p2wsh_script(script, main = False):
script = check_script(script)
return program_to_witness_script(0, sha256(script), main)
def key_to_p2wpkh_script(key, main = False):
key = check_key(key)
return program_to_witness_script(0, hash160(key), main)
def script_to_p2sh_p2wsh_script(script, main = False):
script = check_script(script)
p2shscript = CScript([OP_0, sha256(script)])
return script_to_p2sh_script(p2shscript, main)
def check_key(key):
if isinstance(key, str):
key = hex_str_to_bytes(key) # Assuming this is hex string
if isinstance(key, bytes) and (len(key) == 33 or len(key) == 65):
return key
assert False
def check_script(script):
if isinstance(script, str):
script = hex_str_to_bytes(script) # Assuming this is hex string
if isinstance(script, bytes) or isinstance(script, CScript):
return script
assert False
| mit |
woelfware/BluMote | test/button_tx.py | 1 | 1963 | #!/usr/bin/env python
# Copyright (C) 2011 Woelfware
from bluetooth import *
import blumote
import cPickle
from glob import glob
import os
import sys
import time
class Blumote_Client(blumote.Services):
def __init__(self):
blumote.Services.__init__(self)
self.addr = None
def find_blumote_pods(self, pod_name = None):
if pod_name is None:
pod_name = self.service["name"]
print "Searching for \"%s\" service..." % (pod_name)
return find_service(name = pod_name)
def connect_to_blumote_pod(self, addr):
self.client_sock = BluetoothSocket(RFCOMM)
self.client_sock.connect((addr, 1))
def transport_tx(self, cmd, msg):
full_msg = struct.pack("B", cmd)
full_msg += msg
self.client_sock.send(full_msg)
def ir_transmit(self, msg):
self.transport_tx(self.cmd_codes.ir_transmit, msg)
return self.client_sock.recv(128)
if __name__ == "__main__":
bm_remote = Blumote_Client()
found = False
while not found:
try:
nearby_devices = discover_devices(lookup_names = True)
except:
print 'failed to find a blumote... retrying'
nearby_devices = ()
print 'found %d device(s)' % len(nearby_devices)
for addr, name in nearby_devices:
if name[:len('BluMote')] == 'BluMote':
print 'connecting to', addr, name
bm_remote.connect_to_blumote_pod(addr)
found = True
break
buttons = glob('*.pkl')
print 'Available buttons:'
for i, button in enumerate(buttons):
print '\t%i: %s' % (i, os.path.splitext(button)[0])
print
while True:
selection = raw_input('Select a button to transmit (-1 to quit): ')
try:
selection = int(selection)
except ValueError:
print 'Invalid selection'
continue
if selection == -1:
break
if ((selection < 0) or (selection >= len(buttons))):
print 'Invalid selecion'
continue
button = open(buttons[selection], 'rb')
key_code = cPickle.load(button)
button.close()
bm_remote.ir_transmit(''.join(['\x03', key_code]))
bm_remote.client_sock.close()
| gpl-3.0 |
nzlosh/st2 | st2common/st2common/validators/api/misc.py | 3 | 1383 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2common.constants.pack import SYSTEM_PACK_NAME
from st2common.exceptions.apivalidation import ValueValidationException
__all__ = ["validate_not_part_of_system_pack"]
def validate_not_part_of_system_pack(resource_db):
"""
Validate that the provided resource database object doesn't belong to
a system level pack.
If it does, ValueValidationException is thrown.
:param resource_db: Resource database object to check.
:type resource_db: ``object``
"""
pack = getattr(resource_db, "pack", None)
if pack == SYSTEM_PACK_NAME:
msg = "Resources belonging to system level packs can't be manipulated"
raise ValueValidationException(msg)
return resource_db
| apache-2.0 |
TalShafir/ansible | lib/ansible/modules/cloud/ovirt/ovirt_snapshot_facts.py | 55 | 3817 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_snapshot_facts
short_description: Retrieve facts about one or more oVirt/RHV virtual machine snapshots
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV virtual machine snapshots."
notes:
- "This module creates a new top-level C(ovirt_snapshots) fact, which
contains a list of snapshots."
options:
vm:
description:
- "Name of the VM with snapshot."
required: true
description:
description:
- "Description of the snapshot, can be used as glob expression."
snapshot_id:
description:
- "Id of the snapshot we want to retrieve facts about."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all snapshots which description start with C(update) for VM named C(centos7):
- ovirt_snapshot_facts:
vm: centos7
description: update*
- debug:
var: ovirt_snapshots
'''
RETURN = '''
ovirt_snapshots:
description: "List of dictionaries describing the snapshot. Snapshot attributes are mapped to dictionary keys,
all snapshot attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/snapshot."
returned: On success.
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
search_by_name,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
vm=dict(required=True),
description=dict(default=None),
snapshot_id=dict(default=None),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
vms_service = connection.system_service().vms_service()
vm_name = module.params['vm']
vm = search_by_name(vms_service, vm_name)
if vm is None:
raise Exception("VM '%s' was not found." % vm_name)
snapshots_service = vms_service.service(vm.id).snapshots_service()
if module.params['description']:
snapshots = [
e for e in snapshots_service.list()
if fnmatch.fnmatch(e.description, module.params['description'])
]
elif module.params['snapshot_id']:
snapshots = [
snapshots_service.snapshot_service(module.params['snapshot_id']).get()
]
else:
snapshots = snapshots_service.list()
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_snapshots=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in snapshots
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| gpl-3.0 |
timhuanggithub/pox_load_balancing | pox/openflow/flow_table.py | 25 | 9857 | # Copyright 2011,2012,2013 Colin Scott
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
Implementation of an OpenFlow flow table
"""
from libopenflow_01 import *
from pox.lib.revent import *
import time
# FlowTable Entries:
# match - ofp_match (13-tuple)
# counters - hash from name -> count. May be stale
# actions - ordered list of ofp_action_*s to apply for matching packets
class TableEntry (object):
"""
Models a flow table entry, with a match, actions, and options/flags/counters.
Note: the current time can either be specified explicitely with the optional 'now' parameter or is taken from time.time()
"""
def __init__(self,priority=OFP_DEFAULT_PRIORITY, cookie = 0, idle_timeout=0, hard_timeout=0, flags=0, match=ofp_match(), actions=[], buffer_id=None, now=None):
# overriding __new__ instead of init to make fields optional. There's probably a better way to do this.
if now==None: now = time.time()
self.counters = {
'created': now,
'last_touched': now,
'bytes': 0,
'packets': 0
}
self.priority = priority
self.cookie = cookie
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.flags = flags
self.match = match
self.actions = actions
self.buffer_id = buffer_id
@staticmethod
def from_flow_mod(flow_mod):
priority = flow_mod.priority
cookie = flow_mod.cookie
match = flow_mod.match
actions = flow_mod.actions
buffer_id = flow_mod.buffer_id
flags = flow_mod.flags
return TableEntry(priority, cookie, flow_mod.idle_timeout, flow_mod.hard_timeout, flags, match, actions, buffer_id)
def to_flow_mod(self, flags=None, **kw):
if flags is None:
flags = self.flags
return ofp_flow_mod(priority = self.priority, cookie = self.cookie, match = self.match,
idle_timeout = self.idle_timeout, hard_timeout = self.hard_timeout,
actions = self.actions, buffer_id = self.buffer_id, flags = flags, **kw)
def is_matched_by(self, match, priority = None, strict = False, out_port=None):
""" return whether /this/ entry is matched by some other entry (e.g., for FLOW_MOD updates) """
check_port = lambda: out_port == None or any(isinstance(a, ofp_action_output) and a.port == out_port for a in self.actions)
if(strict):
return (self.match == match and self.priority == priority) and check_port()
else:
return match.matches_with_wildcards(self.match) and check_port()
def touch_packet(self, byte_count, now=None):
""" update the counters and expiry timer of this entry for a packet with a given byte count"""
if now==None: now = time.time()
self.counters["bytes"] += byte_count
self.counters["packets"] += 1
self.counters["last_touched"] = now
def is_expired(self, now=None):
"""" return whether this flow entry is expired due to its idle timeout or hard timeout"""
if now==None: now = time.time()
return (self.hard_timeout > 0 and now - self.counters["created"] > self.hard_timeout) or (self.idle_timeout > 0 and now - self.counters["last_touched"] > self.idle_timeout)
def __str__ (self):
return self.__class__.__name__ + "\n " + self.show()
def __repr__(self):
return "TableEntry("+self.show() + ")"
def show(self):
return "priority=%s, cookie=%x, idle_timeoout=%d, hard_timeout=%d, match=%s, actions=%s buffer_id=%s" % (
self.priority, self.cookie, self.idle_timeout, self.hard_timeout, self.match, repr(self.actions), str(self.buffer_id))
def flow_stats(self, now=None):
if now == None: now = time.time()
duration = now - self.counters["created"]
return ofp_flow_stats (
match = self.match,
duration_sec = int(duration),
duration_nsec = int(duration * 1e9),
priority = self.priority,
idle_timeout = self.idle_timeout,
hard_timeout = self.hard_timeout,
cookie = self.cookie,
packet_count = self.counters["packets"],
byte_count = self.counters["last_touched"],
actions = self.actions
)
class FlowTableModification (Event):
def __init__(self, added=[], removed=[]):
Event.__init__(self)
self.added = added
self.removed = removed
class FlowTable (EventMixin):
_eventMixin_events = set([FlowTableModification])
"""
General model of a flow table. Maintains an ordered list of flow entries, and finds
matching entries for packets and other entries. Supports expiration of flows.
"""
def __init__(self):
EventMixin.__init__(self)
# For now we represent the table as a multidimensional array.
#
# [ (cookie, match, counters, actions),
# (cookie, match, counters, actions),
# ... ]
#
# Implies O(N) lookup for now. TODO: fix
self._table = []
@property
def entries(self):
return self._table
def __len__(self):
return len(self._table)
def add_entry(self, entry):
if not isinstance(entry, TableEntry):
raise "Not an Entry type"
self._table.append(entry)
# keep table sorted by descending priority, with exact matches always going first
# note: python sort is stable
self._table.sort(key=lambda(e): (e.priority if e.match.is_wildcarded else (1<<16) + 1), reverse=True)
self.raiseEvent(FlowTableModification(added=[entry]))
def remove_entry(self, entry):
if not isinstance(entry, TableEntry):
raise "Not an Entry type"
self._table.remove(entry)
self.raiseEvent(FlowTableModification(removed=[entry]))
def entries_for_port(self, port_no):
entries = []
for entry in self._table:
actions = entry.actions
if len(actions) > 0:
last_action = actions[-1]
if type(last_action) == ofp_action_output:
outgoing_port = last_action.port#.port_no
if outgoing_port == port_no:
entries.append(entry)
return entries
def matching_entries(self, match, priority=0, strict=False, out_port=None):
return [ entry for entry in self._table if entry.is_matched_by(match, priority, strict, out_port) ]
def flow_stats(self, match, out_port=None, now=None):
return ( e.flow_stats() for e in self.matching_entries(match=match, strict=False, out_port=out_port))
def expired_entries(self, now=None):
return [ entry for entry in self._table if entry.is_expired(now) ]
def remove_expired_entries(self, now=None):
remove_flows = self.expired_entries(now)
for entry in remove_flows:
self._table.remove(entry)
self.raiseEvent(FlowTableModification(removed=remove_flows))
return remove_flows
def remove_matching_entries(self, match, priority=0, strict=False):
remove_flows = self.matching_entries(match, priority, strict)
for entry in remove_flows:
self._table.remove(entry)
self.raiseEvent(FlowTableModification(removed=remove_flows))
return remove_flows
def entry_for_packet(self, packet, in_port):
""" return the highest priority flow table entry that matches the given packet
on the given in_port, or None if no matching entry is found. """
packet_match = ofp_match.from_packet(packet, in_port)
for entry in self._table:
if entry.match.matches_with_wildcards(packet_match, consider_other_wildcards=False):
return entry
else:
return None
class SwitchFlowTable(FlowTable):
"""
Model a flow table for our switch implementation. Handles the behavior in response
to the OF messages send to the switch
"""
def process_flow_mod(self, flow_mod):
""" Process a flow mod sent to the switch
@return a tuple (added|modified|removed, [list of affected entries])
"""
if(flow_mod.flags & OFPFF_CHECK_OVERLAP):
raise NotImplementedError("OFPFF_CHECK_OVERLAP checking not implemented")
if(flow_mod.out_port != OFPP_NONE and
flow_mod.command == ofp_flow_mod_command_rev_map['OFPFC_DELETE']):
raise NotImplementedError("flow_mod outport checking not implemented")
if flow_mod.command == OFPFC_ADD:
# exactly matching entries have to be removed
self.remove_matching_entries(flow_mod.match,flow_mod.priority, strict=True)
return ("added", self.add_entry(TableEntry.from_flow_mod(flow_mod)))
elif flow_mod.command == OFPFC_MODIFY or flow_mod.command == OFPFC_MODIFY_STRICT:
is_strict = (flow_mod.command == OFPFC_MODIFY_STRICT)
modified = []
for entry in self._table:
# update the actions field in the matching flows
if(entry.is_matched_by(flow_mod.match, priority=flow_mod.priority, strict=is_strict)):
entry.actions = flow_mod.actions
modified.append(entry)
if(len(modified) == 0):
# if no matching entry is found, modify acts as add
return ("added", self.add_entry(TableEntry.from_flow_mod(flow_mod)))
else:
return ("modified", modified)
elif flow_mod.command == OFPFC_DELETE or flow_mod.command == OFPFC_DELETE_STRICT:
is_strict = (flow_mod.command == OFPFC_DELETE_STRICT)
return ("removed", self.remove_matching_entries(flow_mod.match, flow_mod.priority, strict=True))
else:
raise AttributeError("Command not yet implemented: %s" % flow_mod.command)
| gpl-3.0 |
VcamX/grpc | src/python/grpcio/grpc/framework/alpha/_face_utilities.py | 1 | 7822 | # Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import abc
import collections
import six
# face_interfaces is referenced from specification in this module.
from grpc.framework.common import cardinality
from grpc.framework.face import interfaces as face_interfaces # pylint: disable=unused-import
from grpc.framework.face import utilities as face_utilities
from grpc.framework.alpha import _reexport
from grpc.framework.alpha import interfaces
def _qualified_name(service_name, method_name):
return '/%s/%s' % (service_name, method_name)
# TODO(nathaniel): This structure is getting bloated; it could be shrunk if
# implementations._Stub used a generic rather than a dynamic underlying
# face-layer stub.
class InvocationBreakdown(six.with_metaclass(abc.ABCMeta)):
"""An intermediate representation of invocation-side views of RPC methods.
Attributes:
cardinalities: A dictionary from RPC method name to interfaces.Cardinality
value.
qualified_names: A dictionary from unqualified RPC method name to
service-qualified RPC method name.
face_cardinalities: A dictionary from service-qualified RPC method name to
to cardinality.Cardinality value.
request_serializers: A dictionary from service-qualified RPC method name to
callable behavior to be used serializing request values for the RPC.
response_deserializers: A dictionary from service-qualified RPC method name
to callable behavior to be used deserializing response values for the
RPC.
"""
class _EasyInvocationBreakdown(
InvocationBreakdown,
collections.namedtuple(
'_EasyInvocationBreakdown',
('cardinalities', 'qualified_names', 'face_cardinalities',
'request_serializers', 'response_deserializers'))):
pass
class ServiceBreakdown(six.with_metaclass(abc.ABCMeta)):
"""An intermediate representation of service-side views of RPC methods.
Attributes:
implementations: A dictionary from service-qualified RPC method name to
face_interfaces.MethodImplementation implementing the RPC method.
request_deserializers: A dictionary from service-qualified RPC method name
to callable behavior to be used deserializing request values for the RPC.
response_serializers: A dictionary from service-qualified RPC method name
to callable behavior to be used serializing response values for the RPC.
"""
class _EasyServiceBreakdown(
ServiceBreakdown,
collections.namedtuple(
'_EasyServiceBreakdown',
('implementations', 'request_deserializers', 'response_serializers'))):
pass
def break_down_invocation(service_name, method_descriptions):
"""Derives an InvocationBreakdown from several RPC method descriptions.
Args:
service_name: The package-qualified full name of the service.
method_descriptions: A dictionary from RPC method name to
interfaces.RpcMethodInvocationDescription describing the RPCs.
Returns:
An InvocationBreakdown corresponding to the given method descriptions.
"""
cardinalities = {}
qualified_names = {}
face_cardinalities = {}
request_serializers = {}
response_deserializers = {}
for name, method_description in six.iteritems(method_descriptions):
qualified_name = _qualified_name(service_name, name)
method_cardinality = method_description.cardinality()
cardinalities[name] = method_description.cardinality()
qualified_names[name] = qualified_name
face_cardinalities[qualified_name] = _reexport.common_cardinality(
method_cardinality)
request_serializers[qualified_name] = method_description.serialize_request
response_deserializers[qualified_name] = (
method_description.deserialize_response)
return _EasyInvocationBreakdown(
cardinalities, qualified_names, face_cardinalities, request_serializers,
response_deserializers)
def break_down_service(service_name, method_descriptions):
"""Derives a ServiceBreakdown from several RPC method descriptions.
Args:
method_descriptions: A dictionary from RPC method name to
interfaces.RpcMethodServiceDescription describing the RPCs.
Returns:
A ServiceBreakdown corresponding to the given method descriptions.
"""
implementations = {}
request_deserializers = {}
response_serializers = {}
for name, method_description in six.iteritems(method_descriptions):
qualified_name = _qualified_name(service_name, name)
method_cardinality = method_description.cardinality()
if method_cardinality is interfaces.Cardinality.UNARY_UNARY:
def service(
request, face_rpc_context,
service_behavior=method_description.service_unary_unary):
return service_behavior(
request, _reexport.rpc_context(face_rpc_context))
implementations[qualified_name] = face_utilities.unary_unary_inline(
service)
elif method_cardinality is interfaces.Cardinality.UNARY_STREAM:
def service(
request, face_rpc_context,
service_behavior=method_description.service_unary_stream):
return service_behavior(
request, _reexport.rpc_context(face_rpc_context))
implementations[qualified_name] = face_utilities.unary_stream_inline(
service)
elif method_cardinality is interfaces.Cardinality.STREAM_UNARY:
def service(
request_iterator, face_rpc_context,
service_behavior=method_description.service_stream_unary):
return service_behavior(
request_iterator, _reexport.rpc_context(face_rpc_context))
implementations[qualified_name] = face_utilities.stream_unary_inline(
service)
elif method_cardinality is interfaces.Cardinality.STREAM_STREAM:
def service(
request_iterator, face_rpc_context,
service_behavior=method_description.service_stream_stream):
return service_behavior(
request_iterator, _reexport.rpc_context(face_rpc_context))
implementations[qualified_name] = face_utilities.stream_stream_inline(
service)
request_deserializers[qualified_name] = (
method_description.deserialize_request)
response_serializers[qualified_name] = (
method_description.serialize_response)
return _EasyServiceBreakdown(
implementations, request_deserializers, response_serializers)
| bsd-3-clause |
SmithsonianEnterprises/django-threadedcomments | setup.py | 3 | 2570 | #!/usr/bin/env python
from setuptools import setup, find_packages
from os import path
import codecs
import os
import re
import sys
# When creating the sdist, make sure the django.mo file also exists:
if 'sdist' in sys.argv or 'develop' in sys.argv:
os.chdir('threadedcomments')
try:
from django.core import management
management.call_command('compilemessages', stdout=sys.stderr, verbosity=1)
finally:
os.chdir('..')
def read(*parts):
file_path = path.join(path.dirname(__file__), *parts)
return codecs.open(file_path, encoding='utf-8').read()
def find_version(*parts):
version_file = read(*parts)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return str(version_match.group(1))
raise RuntimeError("Unable to find version string.")
setup(
name='django-threadedcomments',
version=find_version('threadedcomments', '__init__.py'),
license='BSD',
install_requires=[
'django-contrib-comments>=1.5',
],
description='A simple yet flexible threaded commenting system.',
long_description=read('README.rst'),
keywords='django,comments,threading',
author='Eric Florenzano',
author_email='floguy@gmail.com',
maintainer='Diederik van der Boor',
maintainer_email='vdboor@edoburu.nl',
url='https://github.com/HonzaKral/django-threadedcomments',
download_url='https://github.com/HonzaKral/django-threadedcomments/zipball/master',
packages=find_packages(exclude=('example*',)),
include_package_data=True,
test_suite = 'runtests',
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Framework :: Django',
'Framework :: Django :: 1.5',
'Framework :: Django :: 1.6',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| bsd-3-clause |
earshel/PokeyPySnipe | POGOProtos/Data/Logs/CatchPokemonLogEntry_pb2.py | 5 | 4809 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Data/Logs/CatchPokemonLogEntry.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Enums import PokemonId_pb2 as POGOProtos_dot_Enums_dot_PokemonId__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Data/Logs/CatchPokemonLogEntry.proto',
package='POGOProtos.Data.Logs',
syntax='proto3',
serialized_pb=_b('\n/POGOProtos/Data/Logs/CatchPokemonLogEntry.proto\x12\x14POGOProtos.Data.Logs\x1a POGOProtos/Enums/PokemonId.proto\"\xf7\x01\n\x14\x43\x61tchPokemonLogEntry\x12\x41\n\x06result\x18\x01 \x01(\x0e\x32\x31.POGOProtos.Data.Logs.CatchPokemonLogEntry.Result\x12/\n\npokemon_id\x18\x02 \x01(\x0e\x32\x1b.POGOProtos.Enums.PokemonId\x12\x15\n\rcombat_points\x18\x03 \x01(\x05\x12\x17\n\x0fpokemon_data_id\x18\x04 \x01(\x04\";\n\x06Result\x12\t\n\x05UNSET\x10\x00\x12\x14\n\x10POKEMON_CAPTURED\x10\x01\x12\x10\n\x0cPOKEMON_FLED\x10\x02\x62\x06proto3')
,
dependencies=[POGOProtos_dot_Enums_dot_PokemonId__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CATCHPOKEMONLOGENTRY_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='POGOProtos.Data.Logs.CatchPokemonLogEntry.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSET', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POKEMON_CAPTURED', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POKEMON_FLED', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=296,
serialized_end=355,
)
_sym_db.RegisterEnumDescriptor(_CATCHPOKEMONLOGENTRY_RESULT)
_CATCHPOKEMONLOGENTRY = _descriptor.Descriptor(
name='CatchPokemonLogEntry',
full_name='POGOProtos.Data.Logs.CatchPokemonLogEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='POGOProtos.Data.Logs.CatchPokemonLogEntry.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pokemon_id', full_name='POGOProtos.Data.Logs.CatchPokemonLogEntry.pokemon_id', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='combat_points', full_name='POGOProtos.Data.Logs.CatchPokemonLogEntry.combat_points', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pokemon_data_id', full_name='POGOProtos.Data.Logs.CatchPokemonLogEntry.pokemon_data_id', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_CATCHPOKEMONLOGENTRY_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=108,
serialized_end=355,
)
_CATCHPOKEMONLOGENTRY.fields_by_name['result'].enum_type = _CATCHPOKEMONLOGENTRY_RESULT
_CATCHPOKEMONLOGENTRY.fields_by_name['pokemon_id'].enum_type = POGOProtos_dot_Enums_dot_PokemonId__pb2._POKEMONID
_CATCHPOKEMONLOGENTRY_RESULT.containing_type = _CATCHPOKEMONLOGENTRY
DESCRIPTOR.message_types_by_name['CatchPokemonLogEntry'] = _CATCHPOKEMONLOGENTRY
CatchPokemonLogEntry = _reflection.GeneratedProtocolMessageType('CatchPokemonLogEntry', (_message.Message,), dict(
DESCRIPTOR = _CATCHPOKEMONLOGENTRY,
__module__ = 'POGOProtos.Data.Logs.CatchPokemonLogEntry_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Data.Logs.CatchPokemonLogEntry)
))
_sym_db.RegisterMessage(CatchPokemonLogEntry)
# @@protoc_insertion_point(module_scope)
| mit |
ovnicraft/openerp-server | openerp/addons/base/module/wizard/base_update_translations.py | 1 | 2901 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
import tools
import cStringIO
from tools.translate import _
class base_update_translations(osv.osv_memory):
def _get_languages(self, cr, uid, context):
lang_obj = self.pool.get('res.lang')
ids = lang_obj.search(cr, uid, ['&', ('active', '=', True), ('translatable', '=', True),])
langs = lang_obj.browse(cr, uid, ids)
return [(lang.code, lang.name) for lang in langs]
def _get_lang_name(self, cr, uid, lang_code):
lang_obj = self.pool.get('res.lang')
ids = lang_obj.search(cr, uid, [('code', '=', lang_code)])
if not ids:
raise osv.except_osv(_('Error!'), _('No language with code "%s" exists') % lang_code)
lang = lang_obj.browse(cr, uid, ids[0])
return lang.name
def act_update(self, cr, uid, ids, context=None):
this = self.browse(cr, uid, ids)[0]
lang_name = self._get_lang_name(cr, uid, this.lang)
buf = cStringIO.StringIO()
tools.trans_export(this.lang, ['all'], buf, 'csv', cr)
tools.trans_load_data(cr, buf, 'csv', this.lang, lang_name=lang_name)
buf.close()
return {'type': 'ir.actions.act_window_close'}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(base_update_translations, self).default_get(cr, uid, fields, context=context)
if context.get('active_model') != "res.lang":
return res
record_id = context.get('active_id', False) or False
if record_id:
lang = self.pool.get('res.lang').browse(cr, uid, record_id).code
res.update(lang=lang)
return res
_name = 'base.update.translations'
_columns = {
'lang': fields.selection(_get_languages, 'Language', required=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Lujeni/ansible | test/units/modules/network/fortios/test_fortios_wireless_controller_hotspot20_anqp_network_auth_type.py | 21 | 9129 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_wireless_controller_hotspot20_anqp_network_auth_type
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_wireless_controller_hotspot20_anqp_network_auth_type.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_wireless_controller_hotspot20_anqp_network_auth_type_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_hotspot20_anqp_network_auth_type': {
'auth_type': 'acceptance-of-terms',
'name': 'default_name_4',
'url': 'myurl_5.com'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_hotspot20_anqp_network_auth_type.fortios_wireless_controller_hotspot20(input_data, fos_instance)
expected_data = {
'auth-type': 'acceptance-of-terms',
'name': 'default_name_4',
'url': 'myurl_5.com'
}
set_method_mock.assert_called_with('wireless-controller.hotspot20', 'anqp-network-auth-type', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_wireless_controller_hotspot20_anqp_network_auth_type_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_hotspot20_anqp_network_auth_type': {
'auth_type': 'acceptance-of-terms',
'name': 'default_name_4',
'url': 'myurl_5.com'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_hotspot20_anqp_network_auth_type.fortios_wireless_controller_hotspot20(input_data, fos_instance)
expected_data = {
'auth-type': 'acceptance-of-terms',
'name': 'default_name_4',
'url': 'myurl_5.com'
}
set_method_mock.assert_called_with('wireless-controller.hotspot20', 'anqp-network-auth-type', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_wireless_controller_hotspot20_anqp_network_auth_type_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'wireless_controller_hotspot20_anqp_network_auth_type': {
'auth_type': 'acceptance-of-terms',
'name': 'default_name_4',
'url': 'myurl_5.com'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_hotspot20_anqp_network_auth_type.fortios_wireless_controller_hotspot20(input_data, fos_instance)
delete_method_mock.assert_called_with('wireless-controller.hotspot20', 'anqp-network-auth-type', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_wireless_controller_hotspot20_anqp_network_auth_type_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'wireless_controller_hotspot20_anqp_network_auth_type': {
'auth_type': 'acceptance-of-terms',
'name': 'default_name_4',
'url': 'myurl_5.com'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_hotspot20_anqp_network_auth_type.fortios_wireless_controller_hotspot20(input_data, fos_instance)
delete_method_mock.assert_called_with('wireless-controller.hotspot20', 'anqp-network-auth-type', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_wireless_controller_hotspot20_anqp_network_auth_type_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_hotspot20_anqp_network_auth_type': {
'auth_type': 'acceptance-of-terms',
'name': 'default_name_4',
'url': 'myurl_5.com'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_hotspot20_anqp_network_auth_type.fortios_wireless_controller_hotspot20(input_data, fos_instance)
expected_data = {
'auth-type': 'acceptance-of-terms',
'name': 'default_name_4',
'url': 'myurl_5.com'
}
set_method_mock.assert_called_with('wireless-controller.hotspot20', 'anqp-network-auth-type', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_wireless_controller_hotspot20_anqp_network_auth_type_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_hotspot20_anqp_network_auth_type': {
'random_attribute_not_valid': 'tag',
'auth_type': 'acceptance-of-terms',
'name': 'default_name_4',
'url': 'myurl_5.com'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_hotspot20_anqp_network_auth_type.fortios_wireless_controller_hotspot20(input_data, fos_instance)
expected_data = {
'auth-type': 'acceptance-of-terms',
'name': 'default_name_4',
'url': 'myurl_5.com'
}
set_method_mock.assert_called_with('wireless-controller.hotspot20', 'anqp-network-auth-type', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
wscullin/spack | lib/spack/spack/platforms/bgq.py | 3 | 2163 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
from spack.architecture import Platform, Target
from spack.operating_systems.linux_distro import LinuxDistro
from spack.operating_systems.cnk import Cnk
class Bgq(Platform):
priority = 30
front_end = 'power7'
back_end = 'ppc64'
default = 'ppc64'
def __init__(self):
''' IBM Blue Gene/Q system platform.'''
super(Bgq, self).__init__('bgq')
self.add_target(self.front_end, Target(self.front_end))
self.add_target(self.back_end, Target(self.back_end))
front_distro = LinuxDistro()
back_distro = Cnk()
self.front_os = str(front_distro)
self.back_os = str(back_distro)
self.default_os = self.back_os
self.add_operating_system(str(front_distro), front_distro)
self.add_operating_system(str(back_distro), back_distro)
@classmethod
def detect(self):
return os.path.exists('/bgsys')
| lgpl-2.1 |
intgr/django | django/db/backends/mysql/base.py | 7 | 14150 | """
MySQL database backend for Django.
Requires mysqlclient: https://pypi.python.org/pypi/mysqlclient/
"""
import re
from django.core.exceptions import ImproperlyConfigured
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils.functional import cached_property
try:
import MySQLdb as Database
except ImportError as err:
raise ImproperlyConfigured(
'Error loading MySQLdb module.\n'
'Did you install mysqlclient?'
) from err
from MySQLdb.constants import CLIENT, FIELD_TYPE # isort:skip
from MySQLdb.converters import conversions # isort:skip
# Some of these import MySQLdb, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .validation import DatabaseValidation # isort:skip
version = Database.version_info
if version < (1, 3, 3):
raise ImproperlyConfigured("mysqlclient 1.3.3 or newer is required; you have %s" % Database.__version__)
# MySQLdb returns TIME columns as timedelta -- they are more like timedelta in
# terms of actual behavior as they are signed and include days -- and Django
# expects time.
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: backend_utils.typecast_time,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same).
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
class CursorWrapper:
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
# args is None means no string interpolation
return self.cursor.execute(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
raise utils.IntegrityError(*tuple(e.args))
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
raise utils.IntegrityError(*tuple(e.args))
raise
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Close instead of passing through to avoid backend-specific behavior
# (#17671).
self.close()
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
_data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BigAutoField': 'bigint AUTO_INCREMENT',
'BinaryField': 'longblob',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
@cached_property
def data_types(self):
if self.features.supports_microsecond_precision:
return dict(self._data_types, DateTimeField='datetime(6)', TimeField='time(6)')
else:
return self._data_types
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\\', '\\\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE BINARY CONCAT('%%', {}, '%%')",
'icontains': "LIKE CONCAT('%%', {}, '%%')",
'startswith': "LIKE BINARY CONCAT({}, '%%')",
'istartswith': "LIKE CONCAT({}, '%%')",
'endswith': "LIKE BINARY CONCAT('%%', {})",
'iendswith': "LIKE CONCAT('%%', {})",
}
isolation_levels = {
'read uncommitted',
'read committed',
'repeatable read',
'serializable',
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
validation_class = DatabaseValidation
def get_connection_params(self):
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
}
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = settings_dict['PASSWORD']
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
# Validate the transaction isolation level, if specified.
options = settings_dict['OPTIONS'].copy()
isolation_level = options.pop('isolation_level', 'read committed')
if isolation_level:
isolation_level = isolation_level.lower()
if isolation_level not in self.isolation_levels:
raise ImproperlyConfigured(
"Invalid transaction isolation level '%s' specified.\n"
"Use one of %s, or None." % (
isolation_level,
', '.join("'%s'" % s for s in sorted(self.isolation_levels))
))
# The variable assignment form of setting transaction isolation
# levels will be used, e.g. "set tx_isolation='repeatable-read'".
isolation_level = isolation_level.replace(' ', '-')
self.isolation_level = isolation_level
kwargs.update(options)
return kwargs
def get_new_connection(self, conn_params):
return Database.connect(**conn_params)
def init_connection_state(self):
assignments = []
if self.features.is_sql_auto_is_null_enabled:
# SQL_AUTO_IS_NULL controls whether an AUTO_INCREMENT column on
# a recently inserted row will return when the field is tested
# for NULL. Disabling this brings this aspect of MySQL in line
# with SQL standards.
assignments.append('SQL_AUTO_IS_NULL = 0')
if self.isolation_level:
assignments.append("TX_ISOLATION = '%s'" % self.isolation_level)
if assignments:
with self.cursor() as cursor:
cursor.execute('SET ' + ', '.join(assignments))
def create_cursor(self, name=None):
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit(autocommit)
def disable_constraint_checking(self):
"""
Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
to indicate constraint checks need to be re-enabled.
"""
self.cursor().execute('SET foreign_key_checks=0')
return True
def enable_constraint_checking(self):
"""
Re-enable foreign key checks after they have been disabled.
"""
# Override needs_rollback in case constraint_checks_disabled is
# nested inside transaction.atomic.
self.needs_rollback, needs_rollback = False, self.needs_rollback
try:
self.cursor().execute('SET foreign_key_checks=1')
finally:
self.needs_rollback = needs_rollback
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raises an IntegrityError on the first invalid foreign key reference
encountered (if any) and provides detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute(
"""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL
""" % (
primary_key_column_name, column_name, table_name,
referenced_table_name, column_name, referenced_column_name,
column_name, referenced_column_name,
)
)
for bad_row in cursor.fetchall():
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (
table_name, bad_row[0], table_name, column_name,
bad_row[1], referenced_table_name, referenced_column_name,
)
)
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def mysql_version(self):
with self.temporary_connection() as cursor:
cursor.execute('SELECT VERSION()')
server_info = cursor.fetchone()[0]
match = server_version_re.match(server_info)
if not match:
raise Exception('Unable to determine MySQL version from version string %r' % server_info)
return tuple(int(x) for x in match.groups())
| bsd-3-clause |
yrobla/nova | nova/api/openstack/compute/contrib/cells.py | 7 | 10153 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The cells extension."""
from oslo.config import cfg
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute
from nova import db
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
authorize = extensions.extension_authorizer('compute', 'cells')
def make_cell(elem):
elem.set('name')
elem.set('username')
elem.set('type')
elem.set('rpc_host')
elem.set('rpc_port')
caps = xmlutil.SubTemplateElement(elem, 'capabilities',
selector='capabilities')
cap = xmlutil.SubTemplateElement(caps, xmlutil.Selector(0),
selector=xmlutil.get_items)
cap.text = 1
cell_nsmap = {None: wsgi.XMLNS_V10}
class CellTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cell', selector='cell')
make_cell(root)
return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
class CellsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cells')
elem = xmlutil.SubTemplateElement(root, 'cell', selector='cells')
make_cell(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
class CellDeserializer(wsgi.XMLDeserializer):
"""Deserializer to handle xml-formatted cell create requests."""
def _extract_capabilities(self, cap_node):
caps = {}
for cap in cap_node.childNodes:
cap_name = cap.tagName
caps[cap_name] = self.extract_text(cap)
return caps
def _extract_cell(self, node):
cell = {}
cell_node = self.find_first_child_named(node, 'cell')
extract_fns = {'capabilities': self._extract_capabilities}
for child in cell_node.childNodes:
name = child.tagName
extract_fn = extract_fns.get(name, self.extract_text)
cell[name] = extract_fn(child)
return cell
def default(self, string):
"""Deserialize an xml-formatted cell create request."""
node = xmlutil.safe_minidom_parse_string(string)
return {'body': {'cell': self._extract_cell(node)}}
def _filter_keys(item, keys):
"""
Filters all model attributes except for keys
item is a dict
"""
return dict((k, v) for k, v in item.iteritems() if k in keys)
def _scrub_cell(cell, detail=False):
keys = ['name', 'username', 'rpc_host', 'rpc_port']
if detail:
keys.append('capabilities')
cell_info = _filter_keys(cell, keys)
cell_info['type'] = 'parent' if cell['is_parent'] else 'child'
return cell_info
class Controller(object):
"""Controller for Cell resources."""
def __init__(self):
self.compute_api = compute.API()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _get_cells(self, ctxt, req, detail=False):
"""Return all cells."""
# Ask the CellsManager for the most recent data
items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt)
items = common.limited(items, req)
items = [_scrub_cell(item, detail=detail) for item in items]
return dict(cells=items)
@wsgi.serializers(xml=CellsTemplate)
def index(self, req):
"""Return all cells in brief."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req)
@wsgi.serializers(xml=CellsTemplate)
def detail(self, req):
"""Return all cells in detail."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req, detail=True)
@wsgi.serializers(xml=CellTemplate)
def info(self, req):
"""Return name and capabilities for this cell."""
context = req.environ['nova.context']
authorize(context)
cell_capabs = {}
my_caps = CONF.cells.capabilities
for cap in my_caps:
key, value = cap.split('=')
cell_capabs[key] = value
cell = {'name': CONF.cells.name,
'type': 'self',
'rpc_host': None,
'rpc_port': 0,
'username': None,
'capabilities': cell_capabs}
return dict(cell=cell)
@wsgi.serializers(xml=CellTemplate)
def show(self, req, id):
"""Return data about the given cell name. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
cell = db.cell_get(context, id)
except exception.CellNotFound:
raise exc.HTTPNotFound()
return dict(cell=_scrub_cell(cell))
def delete(self, req, id):
"""Delete a child or parent cell entry. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
num_deleted = db.cell_delete(context, id)
if num_deleted == 0:
raise exc.HTTPNotFound()
return {}
def _validate_cell_name(self, cell_name):
"""Validate cell name is not empty and doesn't contain '!' or '.'."""
if not cell_name:
msg = _("Cell name cannot be empty")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
if '!' in cell_name or '.' in cell_name:
msg = _("Cell name cannot contain '!' or '.'")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _validate_cell_type(self, cell_type):
"""Validate cell_type is 'parent' or 'child'."""
if cell_type not in ['parent', 'child']:
msg = _("Cell type must be 'parent' or 'child'")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _convert_cell_type(self, cell):
"""Convert cell['type'] to is_parent boolean."""
if 'type' in cell:
self._validate_cell_type(cell['type'])
cell['is_parent'] = cell['type'] == 'parent'
del cell['type']
else:
cell['is_parent'] = False
@wsgi.serializers(xml=CellTemplate)
@wsgi.deserializers(xml=CellDeserializer)
def create(self, req, body):
"""Create a child cell entry."""
context = req.environ['nova.context']
authorize(context)
if 'cell' not in body:
msg = _("No cell information in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
if 'name' not in cell:
msg = _("No cell name in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
self._validate_cell_name(cell['name'])
self._convert_cell_type(cell)
cell = db.cell_create(context, cell)
return dict(cell=_scrub_cell(cell))
@wsgi.serializers(xml=CellTemplate)
@wsgi.deserializers(xml=CellDeserializer)
def update(self, req, id, body):
"""Update a child cell entry. 'id' is the cell name to update."""
context = req.environ['nova.context']
authorize(context)
if 'cell' not in body:
msg = _("No cell information in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
cell.pop('id', None)
if 'name' in cell:
self._validate_cell_name(cell['name'])
self._convert_cell_type(cell)
try:
cell = db.cell_update(context, id, cell)
except exception.CellNotFound:
raise exc.HTTPNotFound()
return dict(cell=_scrub_cell(cell))
def sync_instances(self, req, body):
"""Tell all cells to sync instance info."""
context = req.environ['nova.context']
authorize(context)
project_id = body.pop('project_id', None)
deleted = body.pop('deleted', False)
updated_since = body.pop('updated_since', None)
if body:
msg = _("Only 'updated_since' and 'project_id' are understood.")
raise exc.HTTPBadRequest(explanation=msg)
if updated_since:
try:
timeutils.parse_isotime(updated_since)
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
self.cells_rpcapi.sync_instances(context, project_id=project_id,
updated_since=updated_since, deleted=deleted)
class Cells(extensions.ExtensionDescriptor):
"""Enables cells-related functionality such as adding neighbor cells,
listing neighbor cells, and getting the capabilities of the local cell.
"""
name = "Cells"
alias = "os-cells"
namespace = "http://docs.openstack.org/compute/ext/cells/api/v1.1"
updated = "2011-09-21T00:00:00+00:00"
def get_resources(self):
coll_actions = {
'detail': 'GET',
'info': 'GET',
'sync_instances': 'POST',
}
res = extensions.ResourceExtension('os-cells',
Controller(), collection_actions=coll_actions)
return [res]
| apache-2.0 |
brian-yang/mozillians | vendor-local/lib/python/django_browserid/tests/__init__.py | 11 | 2935 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.utils.functional import wraps
from mock import patch
def fake_create_user(email):
pass
class mock_browserid(object):
"""
Mocks django_browserid verification. Can be used as a context manager or
as a decorator:
with mock_browserid('a@b.com'):
django_browserid.verify('random-token') # = {'status': 'okay',
# 'email': 'a@b.com',
# ...}
@mock_browserid(None)
def browserid_test():
django_browserid.verify('random-token') # = False
"""
def __init__(self, email=None, audience=None, unverified_email=None,
pass_mock=False):
self.pass_mock = pass_mock
self.patcher = patch('django_browserid.base._verify_http_request')
self.return_value = {
u'audience': audience,
u'email': email,
u'issuer': u'login.persona.org:443',
u'status': u'okay' if email is not None else u'failure',
u'valid-until': 1311377222765
}
if unverified_email is not None:
self.return_value['unverified-email'] = unverified_email
del self.return_value['email']
def __enter__(self):
mock = self.patcher.start()
mock.return_value = self.return_value
return mock
def __exit__(self, exc_type, exc_value, traceback):
self.patcher.stop()
def __call__(self, func):
@wraps(func)
def inner(*args, **kwargs):
with self as mock:
if self.pass_mock:
args += (mock,)
return func(*args, **kwargs)
return inner
class patch_settings(object):
"""
Convenient helper for patching settings. Can be used as both a context
manager and a decorator.
TODO: Remove when we drop support for Django 1.3 and use override_settings
instead.
"""
def __init__(self, **kwargs):
# Load settings at runtime to get the lazy settings object, and patch
# the _wrapped settings to avoid deleting settings accidentally.
from django.conf import settings
wrapped = settings._wrapped
self.patches = [patch.object(wrapped, name, value, create=True) for
name, value in kwargs.items()]
def __enter__(self):
for patcher in self.patches:
patcher.start()
def __exit__(self, exc_type, exc_value, traceback):
for patcher in self.patches:
patcher.stop()
def __call__(self, func):
@wraps(func)
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
| bsd-3-clause |
citrix-openstack-build/python-openstackclient | openstackclient/tests/common/test_clientmanager.py | 1 | 1180 | # Copyright 2012-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from openstackclient.common import clientmanager
from openstackclient.tests import utils
class Container(object):
attr = clientmanager.ClientCache(lambda x: object())
def __init__(self):
pass
class TestClientManager(utils.TestCase):
def setUp(self):
super(TestClientManager, self).setUp()
def test_singleton(self):
# NOTE(dtroyer): Verify that the ClientCache descriptor only invokes
# the factory one time and always returns the same value after that.
c = Container()
self.assertEqual(c.attr, c.attr)
| apache-2.0 |
ted-gould/nova | nova/api/openstack/compute/legacy_v2/contrib/aggregates.py | 51 | 11505 | # Copyright (c) 2012 Citrix Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Aggregate admin API extension."""
import datetime
import six
from webob import exc
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova import utils
authorize = extensions.extension_authorizer('compute', 'aggregates')
def _get_context(req):
return req.environ['nova.context']
def get_host_from_body(fn):
"""Makes sure that the host exists."""
def wrapped(self, req, id, body, *args, **kwargs):
if len(body) != 1:
msg = _('Only host parameter can be specified')
raise exc.HTTPBadRequest(explanation=msg)
elif 'host' not in body:
msg = _('Host parameter must be specified')
raise exc.HTTPBadRequest(explanation=msg)
try:
utils.check_string_length(body['host'], 'host', 1, 255)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
host = body['host']
return fn(self, req, id, host, *args, **kwargs)
return wrapped
class AggregateController(object):
"""The Host Aggregates API controller for the OpenStack API."""
def __init__(self):
self.api = compute_api.AggregateAPI()
def index(self, req):
"""Returns a list a host aggregate's id, name, availability_zone."""
context = _get_context(req)
authorize(context)
aggregates = self.api.get_aggregate_list(context)
return {'aggregates': [self._marshall_aggregate(a)['aggregate']
for a in aggregates]}
def create(self, req, body):
"""Creates an aggregate, given its name and
optional availability zone.
"""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest()
try:
host_aggregate = body["aggregate"]
name = host_aggregate["name"]
except KeyError:
raise exc.HTTPBadRequest()
avail_zone = host_aggregate.get("availability_zone")
try:
utils.check_string_length(name, "Aggregate name", 1, 255)
if avail_zone is not None:
utils.check_string_length(avail_zone, "Availability_zone", 1,
255)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
try:
aggregate = self.api.create_aggregate(context, name, avail_zone)
except exception.AggregateNameExists as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InvalidAggregateAction as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
agg = self._marshall_aggregate(aggregate)
# To maintain the same API result as before the changes for returning
# nova objects were made.
del agg['aggregate']['hosts']
del agg['aggregate']['metadata']
return agg
def show(self, req, id):
"""Shows the details of an aggregate, hosts and metadata included."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.get_aggregate(context, id)
except exception.AggregateNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return self._marshall_aggregate(aggregate)
def update(self, req, id, body):
"""Updates the name and/or availability_zone of given aggregate."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest()
try:
updates = body["aggregate"]
except KeyError:
raise exc.HTTPBadRequest()
if len(updates) < 1:
raise exc.HTTPBadRequest()
for key in updates.keys():
if key not in ["name", "availability_zone"]:
raise exc.HTTPBadRequest()
try:
if 'name' in updates:
utils.check_string_length(updates['name'], "Aggregate name", 1,
255)
if updates.get("availability_zone") is not None:
utils.check_string_length(updates['availability_zone'],
"Availability_zone", 1, 255)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
try:
aggregate = self.api.update_aggregate(context, id, updates)
except exception.AggregateNameExists as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.AggregateNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InvalidAggregateAction as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
return self._marshall_aggregate(aggregate)
def delete(self, req, id):
"""Removes an aggregate by id."""
context = _get_context(req)
authorize(context)
try:
self.api.delete_aggregate(context, id)
except exception.AggregateNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InvalidAggregateAction as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
def action(self, req, id, body):
_actions = {
'add_host': self._add_host,
'remove_host': self._remove_host,
'set_metadata': self._set_metadata,
}
for action, data in six.iteritems(body):
if action not in _actions.keys():
msg = _('Aggregates does not have %s action') % action
raise exc.HTTPBadRequest(explanation=msg)
return _actions[action](req, id, data)
raise exc.HTTPBadRequest(explanation=_("Invalid request body"))
@get_host_from_body
def _add_host(self, req, id, host):
"""Adds a host to the specified aggregate."""
context = _get_context(req)
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks. This has to be left only for API v2.0 because
# this version has to be stable even if it means that only admins
# can call this method while the policy could be changed.
nova_context.require_admin_context(context)
try:
aggregate = self.api.add_host_to_aggregate(context, id, host)
except (exception.AggregateNotFound, exception.ComputeHostNotFound):
msg = _('Cannot add host %(host)s in aggregate'
' %(id)s: not found') % {'host': host, 'id': id}
raise exc.HTTPNotFound(explanation=msg)
except (exception.AggregateHostExists,
exception.InvalidAggregateAction):
msg = _('Cannot add host %(host)s in aggregate'
' %(id)s: host exists') % {'host': host, 'id': id}
raise exc.HTTPConflict(explanation=msg)
return self._marshall_aggregate(aggregate)
@get_host_from_body
def _remove_host(self, req, id, host):
"""Removes a host from the specified aggregate."""
context = _get_context(req)
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks. This has to be left only for API v2.0 because
# this version has to be stable even if it means that only admins
# can call this method while the policy could be changed.
nova_context.require_admin_context(context)
try:
aggregate = self.api.remove_host_from_aggregate(context, id, host)
except (exception.AggregateNotFound, exception.AggregateHostNotFound,
exception.ComputeHostNotFound):
msg = _('Cannot remove host %(host)s in aggregate'
' %(id)s: not found') % {'host': host, 'id': id}
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidAggregateAction:
msg = _('Cannot remove host %(host)s in aggregate'
' %(id)s: invalid') % {'host': host, 'id': id}
raise exc.HTTPConflict(explanation=msg)
return self._marshall_aggregate(aggregate)
def _set_metadata(self, req, id, body):
"""Replaces the aggregate's existing metadata with new metadata."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest()
try:
metadata = body["metadata"]
except KeyError:
raise exc.HTTPBadRequest()
# The metadata should be a dict
if not isinstance(metadata, dict):
msg = _('The value of metadata must be a dict')
raise exc.HTTPBadRequest(explanation=msg)
try:
for key, value in metadata.items():
utils.check_string_length(key, "metadata.key", 1, 255)
if value is not None:
utils.check_string_length(value, "metadata.value", 0, 255)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
try:
aggregate = self.api.update_aggregate_metadata(context,
id, metadata)
except exception.AggregateNotFound:
msg = _('Cannot set metadata %(metadata)s in aggregate'
' %(id)s') % {'metadata': metadata, 'id': id}
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidAggregateAction as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
return self._marshall_aggregate(aggregate)
def _marshall_aggregate(self, aggregate):
_aggregate = {}
for key, value in aggregate.items():
# NOTE(danms): The original API specified non-TZ-aware timestamps
if isinstance(value, datetime.datetime):
value = value.replace(tzinfo=None)
_aggregate[key] = value
return {"aggregate": _aggregate}
class Aggregates(extensions.ExtensionDescriptor):
"""Admin-only aggregate administration."""
name = "Aggregates"
alias = "os-aggregates"
namespace = "http://docs.openstack.org/compute/ext/aggregates/api/v1.1"
updated = "2012-01-12T00:00:00Z"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-aggregates',
AggregateController(),
member_actions={"action": "POST", })
resources.append(res)
return resources
| apache-2.0 |
craneworks/debisogen | debisogen/iso.py | 3 | 4389 | """Utilities and shell command to bundle a preseed file inside a Debian ISO."""
import glob
import os
import re
import shutil
from utils import use_temp_dir, replace_in_file, execute_command, download_file
def download_iso_file(source, destination):
"""Download system installer image from source to destination."""
download_file(source, destination)
def iso_to_directory(iso_file, directory):
"""Extract content of an ISO image to destination directory."""
if not os.path.exists(directory):
os.makedirs(directory)
execute_command('bsdtar -C %(directory)s -xf %(iso_file)s',
{'directory': directory, 'iso_file': iso_file})
execute_command('chmod -R u+w %(directory)s', {'directory': directory})
def directory_to_iso(directory, iso_file):
"""Create ISO image from directory."""
output_dir = os.path.dirname(os.path.abspath(iso_file))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
execute_command('genisoimage -o %(iso)s -r -J -no-emul-boot '
' -boot-load-size 4 -boot-info-table '
'-b isolinux/isolinux.bin -c isolinux/boot.cat %(dir)s',
{'iso': iso_file, 'dir': directory})
def initrd_to_directory(initrd_file, directory):
"""Extract initrd.gz file to destination subdirectory."""
assert(initrd_file.endswith('.gz'))
execute_command('gunzip %(file)s', {'file': initrd_file})
initrd_file = initrd_file[:-3]
execute_command('cd %(dir)s ; cpio -id < %(file)s', {'dir': directory,
'file': initrd_file})
def directory_to_initrd(directory, initrd_file):
"""Compress directory as initrd.gz file."""
assert(initrd_file.endswith('.gz'))
initrd_file = initrd_file[:-3]
execute_command("cd %(in)s && find . | cpio --create --format='newc' > "
"%(out)s", {'in': directory, 'out': initrd_file})
execute_command('gzip %(file)s', {'file': initrd_file})
def toggle_boot_loader(directory, is_boot_loader_hidden=True):
"""In directory, alter isolinux.cfg file to hide (default) or show boot
loader on startup.
Hiding boot loader is required for a fully automated installation.
"""
timeout = int(is_boot_loader_hidden) # 1 to hide boot loader!
# Replace "timeout" option in isolinux/isolinux.cfg
filename = os.path.join(directory, 'isolinux', 'isolinux.cfg')
pattern = r'^(\s*timeout\s*)[0-1](\s*#|$)'
flags = re.IGNORECASE
pattern = re.compile(pattern, flags)
replacement = '\g<1>%d\g<2>' % timeout
replace_in_file(pattern, replacement, filename)
def rebuild_md5sum(directory):
"""Rebuild md5sum.txt file in the given directory."""
execute_command('cd %(dir)s ; md5sum `find ! -name "md5sum.txt" ! '
'-path "./isolinux/*" -follow -type f` > md5sum.txt ;',
{'dir': directory})
def insert_preseed_into_iso(preseed_file, input_iso_file, output_iso_file,
is_boot_loader_hidden=True):
"""Alters input ISO file to create another ISO file which includes preseed
file."""
input_iso_file = os.path.normpath(os.path.abspath(input_iso_file))
with use_temp_dir() as iso_directory:
iso_to_directory(input_iso_file, iso_directory)
# Find adequate install directory, i.e. "install.amd" for amd64
# architecture.
install_dir = glob.glob(os.path.join(iso_directory, 'install.*'))
if not install_dir:
raise Exception('No install.* directory found in %(iso)s')
if len(install_dir) > 1:
raise Exception('Several install.* directories found in %(iso)s '
'ISO: %(dirs)s' % {'iso': input_iso_file,
'dirs': install_dir})
initrd_file = os.path.join(install_dir[0], 'initrd.gz')
with use_temp_dir() as initrd_directory:
initrd_to_directory(initrd_file, initrd_directory)
output_preseed_file = os.path.join(initrd_directory, 'preseed.cfg')
shutil.copy(preseed_file, output_preseed_file)
directory_to_initrd(initrd_directory, initrd_file)
toggle_boot_loader(iso_directory, is_boot_loader_hidden)
rebuild_md5sum(iso_directory)
directory_to_iso(iso_directory, output_iso_file)
| bsd-3-clause |
plxaye/chromium | src/tools/valgrind/suppressions.py | 61 | 34532 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# suppressions.py
"""Post-process Valgrind suppression matcher.
Suppressions are defined as follows:
# optional one-line comments anywhere in the suppressions file.
{
<Short description of the error>
Toolname:Errortype
fun:function_name
obj:object_filename
fun:wildcarded_fun*_name
# an ellipsis wildcards zero or more functions in a stack.
...
fun:some_other_function_name
}
If ran from the command line, suppressions.py does a self-test
of the Suppression class.
"""
import os
import re
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__),
'..', 'python', 'google'))
import path_utils
ELLIPSIS = '...'
def GetSuppressions():
suppressions_root = path_utils.ScriptDir()
JOIN = os.path.join
result = {}
supp_filename = JOIN(suppressions_root, "memcheck", "suppressions.txt")
vg_common = ReadSuppressionsFromFile(supp_filename)
supp_filename = JOIN(suppressions_root, "tsan", "suppressions.txt")
tsan_common = ReadSuppressionsFromFile(supp_filename)
result['common_suppressions'] = vg_common + tsan_common
supp_filename = JOIN(suppressions_root, "memcheck", "suppressions_linux.txt")
vg_linux = ReadSuppressionsFromFile(supp_filename)
supp_filename = JOIN(suppressions_root, "tsan", "suppressions_linux.txt")
tsan_linux = ReadSuppressionsFromFile(supp_filename)
result['linux_suppressions'] = vg_linux + tsan_linux
supp_filename = JOIN(suppressions_root, "memcheck", "suppressions_mac.txt")
vg_mac = ReadSuppressionsFromFile(supp_filename)
supp_filename = JOIN(suppressions_root, "tsan", "suppressions_mac.txt")
tsan_mac = ReadSuppressionsFromFile(supp_filename)
result['mac_suppressions'] = vg_mac + tsan_mac
supp_filename = JOIN(suppressions_root, "tsan", "suppressions_win32.txt")
tsan_win = ReadSuppressionsFromFile(supp_filename)
result['win_suppressions'] = tsan_win
supp_filename = JOIN(suppressions_root, "..", "heapcheck", "suppressions.txt")
result['heapcheck_suppressions'] = ReadSuppressionsFromFile(supp_filename)
supp_filename = JOIN(suppressions_root, "drmemory", "suppressions.txt")
result['drmem_suppressions'] = ReadSuppressionsFromFile(supp_filename)
supp_filename = JOIN(suppressions_root, "drmemory", "suppressions_full.txt")
result['drmem_full_suppressions'] = ReadSuppressionsFromFile(supp_filename)
return result
def GlobToRegex(glob_pattern, ignore_case=False):
"""Translate glob wildcards (*?) into regex syntax. Escape the rest."""
regex = ''
for char in glob_pattern:
if char == '*':
regex += '.*'
elif char == '?':
regex += '.'
elif ignore_case and char.isalpha():
regex += '[%s%s]' % (char.lower(), char.upper())
else:
regex += re.escape(char)
return ''.join(regex)
def StripAndSkipCommentsIterator(lines):
"""Generator of (line_no, line) pairs that strips comments and whitespace."""
for (line_no, line) in enumerate(lines):
line = line.strip() # Drop \n
if line.startswith('#'):
continue # Comments
# Skip comment lines, but not empty lines, they indicate the end of a
# suppression. Add one to the line number as well, since most editors use
# 1-based numberings, and enumerate is 0-based.
yield (line_no + 1, line)
class Suppression(object):
"""This class represents a single stack trace suppression.
Attributes:
description: A string representing the error description.
type: A string representing the error type, e.g. Memcheck:Leak.
stack: The lines comprising the stack trace for the suppression.
regex: The actual regex used to match against scraped reports.
"""
def __init__(self, description, type, stack, defined_at, regex):
"""Inits Suppression.
description, type, stack, regex: same as class attributes
defined_at: file:line identifying where the suppression was defined
"""
self.description = description
self.type = type
self.stack = stack
self.defined_at = defined_at
self.regex = re.compile(regex, re.MULTILINE)
def Match(self, suppression_from_report):
"""Returns bool indicating whether this suppression matches
the suppression generated from Valgrind error report.
We match our suppressions against generated suppressions
(not against reports) since they have the same format
while the reports are taken from XML, contain filenames,
they are demangled, and are generally more difficult to
parse.
Args:
suppression_from_report: list of strings (function names).
Returns:
True if the suppression is not empty and matches the report.
"""
if not self.stack:
return False
lines = [f.strip() for f in suppression_from_report]
return self.regex.match('\n'.join(lines) + '\n') is not None
def FilenameToTool(filename):
"""Return the name of the tool that a file is related to, or None.
Example mappings:
tools/heapcheck/suppressions.txt -> heapcheck
tools/valgrind/tsan/suppressions.txt -> tsan
tools/valgrind/drmemory/suppressions.txt -> drmemory
tools/valgrind/drmemory/suppressions_full.txt -> drmemory
tools/valgrind/memcheck/suppressions.txt -> memcheck
tools/valgrind/memcheck/suppressions_mac.txt -> memcheck
"""
filename = os.path.abspath(filename)
parts = filename.split(os.sep)
tool = parts[-2]
if tool in ('heapcheck', 'drmemory', 'memcheck', 'tsan'):
return tool
return None
def ReadSuppressionsFromFile(filename):
"""Read suppressions from the given file and return them as a list"""
tool_to_parser = {
"drmemory": ReadDrMemorySuppressions,
"memcheck": ReadValgrindStyleSuppressions,
"tsan": ReadValgrindStyleSuppressions,
"heapcheck": ReadValgrindStyleSuppressions,
}
tool = FilenameToTool(filename)
assert tool in tool_to_parser, (
"unknown tool %s for filename %s" % (tool, filename))
parse_func = tool_to_parser[tool]
# Consider non-existent files to be empty.
if not os.path.exists(filename):
return []
input_file = file(filename, 'r')
try:
return parse_func(input_file, filename)
except SuppressionError:
input_file.close()
raise
class ValgrindStyleSuppression(Suppression):
"""A suppression using the Valgrind syntax.
Most tools, even ones that are not Valgrind-based, use this syntax, ie
Heapcheck, TSan, etc.
Attributes:
Same as Suppression.
"""
def __init__(self, description, type, stack, defined_at):
"""Creates a suppression using the Memcheck, TSan, and Heapcheck syntax."""
regex = '{\n.*\n%s\n' % type
for line in stack:
if line == ELLIPSIS:
regex += '(.*\n)*'
else:
regex += GlobToRegex(line)
regex += '\n'
regex += '(.*\n)*'
regex += '}'
# In the recent version of valgrind-variant we've switched
# from memcheck's default Addr[1248]/Value[1248]/Cond suppression types
# to simply Unaddressable/Uninitialized.
# The suppression generator no longer gives us "old" types thus
# for the "new-type" suppressions:
# * Memcheck:Unaddressable should also match Addr* reports,
# * Memcheck:Uninitialized should also match Cond and Value reports,
#
# We also want to support legacy suppressions (e.g. copied from
# upstream bugs etc), so:
# * Memcheck:Addr[1248] suppressions should match Unaddressable reports,
# * Memcheck:Cond and Memcheck:Value[1248] should match Uninitialized.
# Please note the latest two rules only apply to the
# tools/valgrind/waterfall.sh suppression matcher and the real
# valgrind-variant Memcheck will not suppress
# e.g. Addr1 printed as Unaddressable with Addr4 suppression.
# Be careful to check the access size while copying legacy suppressions!
for sz in [1, 2, 4, 8]:
regex = regex.replace("\nMemcheck:Addr%d\n" % sz,
"\nMemcheck:(Addr%d|Unaddressable)\n" % sz)
regex = regex.replace("\nMemcheck:Value%d\n" % sz,
"\nMemcheck:(Value%d|Uninitialized)\n" % sz)
regex = regex.replace("\nMemcheck:Cond\n",
"\nMemcheck:(Cond|Uninitialized)\n")
regex = regex.replace("\nMemcheck:Unaddressable\n",
"\nMemcheck:(Addr.|Unaddressable)\n")
regex = regex.replace("\nMemcheck:Uninitialized\n",
"\nMemcheck:(Cond|Value.|Uninitialized)\n")
return super(ValgrindStyleSuppression, self).__init__(
description, type, stack, defined_at, regex)
def __str__(self):
"""Stringify."""
lines = [self.description, self.type] + self.stack
return "{\n %s\n}\n" % "\n ".join(lines)
class SuppressionError(Exception):
def __init__(self, message, happened_at):
self._message = message
self._happened_at = happened_at
def __str__(self):
return 'Error reading suppressions at %s!\n%s' % (
self._happened_at, self._message)
def ReadValgrindStyleSuppressions(lines, supp_descriptor):
"""Given a list of lines, returns a list of suppressions.
Args:
lines: a list of lines containing suppressions.
supp_descriptor: should typically be a filename.
Used only when printing errors.
"""
result = []
cur_descr = ''
cur_type = ''
cur_stack = []
in_suppression = False
nline = 0
for line in lines:
nline += 1
line = line.strip()
if line.startswith('#'):
continue
if not in_suppression:
if not line:
# empty lines between suppressions
pass
elif line.startswith('{'):
in_suppression = True
pass
else:
raise SuppressionError('Expected: "{"',
"%s:%d" % (supp_descriptor, nline))
elif line.startswith('}'):
result.append(
ValgrindStyleSuppression(cur_descr, cur_type, cur_stack,
"%s:%d" % (supp_descriptor, nline)))
cur_descr = ''
cur_type = ''
cur_stack = []
in_suppression = False
elif not cur_descr:
cur_descr = line
continue
elif not cur_type:
if (not line.startswith("Memcheck:") and
not line.startswith("ThreadSanitizer:") and
(line != "Heapcheck:Leak")):
raise SuppressionError(
'Expected "Memcheck:TYPE", "ThreadSanitizer:TYPE" '
'or "Heapcheck:Leak", got "%s"' % line,
"%s:%d" % (supp_descriptor, nline))
supp_type = line.split(':')[1]
if not supp_type in ["Addr1", "Addr2", "Addr4", "Addr8",
"Cond", "Free", "Jump", "Leak", "Overlap", "Param",
"Value1", "Value2", "Value4", "Value8",
"Race", "UnlockNonLocked", "InvalidLock",
"Unaddressable", "Uninitialized"]:
raise SuppressionError('Unknown suppression type "%s"' % supp_type,
"%s:%d" % (supp_descriptor, nline))
cur_type = line
continue
elif re.match("^fun:.*|^obj:.*|^\.\.\.$", line):
cur_stack.append(line.strip())
elif len(cur_stack) == 0 and cur_type == "Memcheck:Param":
cur_stack.append(line.strip())
else:
raise SuppressionError(
'"fun:function_name" or "obj:object_file" or "..." expected',
"%s:%d" % (supp_descriptor, nline))
return result
def PresubmitCheckSuppressions(supps):
"""Check a list of suppressions and return a list of SuppressionErrors.
Mostly useful for separating the checking logic from the Presubmit API for
testing.
"""
known_supp_names = {} # Key: name, Value: suppression.
errors = []
for s in supps:
if re.search("<.*suppression.name.here>", s.description):
# Suppression name line is
# <insert_a_suppression_name_here> for Memcheck,
# <Put your suppression name here> for TSan,
# name=<insert_a_suppression_name_here> for DrMemory
errors.append(
SuppressionError(
"You've forgotten to put a suppression name like bug_XXX",
s.defined_at))
continue
if s.description in known_supp_names:
errors.append(
SuppressionError(
'Suppression named "%s" is defined more than once, '
'see %s' % (s.description,
known_supp_names[s.description].defined_at),
s.defined_at))
else:
known_supp_names[s.description] = s
return errors
def PresubmitCheck(input_api, output_api):
"""A helper function useful in PRESUBMIT.py
Returns a list of errors or [].
"""
sup_regex = re.compile('suppressions.*\.txt$')
filenames = [f.AbsoluteLocalPath() for f in input_api.AffectedFiles()
if sup_regex.search(f.LocalPath())]
errors = []
# TODO(timurrrr): warn on putting suppressions into a wrong file,
# e.g. TSan suppression in a memcheck file.
for f in filenames:
try:
supps = ReadSuppressionsFromFile(f)
errors.extend(PresubmitCheckSuppressions(supps))
except SuppressionError as e:
errors.append(e)
return [output_api.PresubmitError(str(e)) for e in errors]
class DrMemorySuppression(Suppression):
"""A suppression using the DrMemory syntax.
Attributes:
instr: The instruction to match.
Rest inherited from Suppression.
"""
def __init__(self, name, report_type, instr, stack, defined_at):
"""Constructor."""
self.instr = instr
# Construct the regex.
regex = '{\n'
if report_type == 'LEAK':
regex += '(POSSIBLE )?LEAK'
else:
regex += report_type
regex += '\nname=.*\n'
# TODO(rnk): Implement http://crbug.com/107416#c5 .
# drmemory_analyze.py doesn't generate suppressions with an instruction in
# them, so these suppressions will always fail to match. We should override
# Match to fetch the instruction from the report and try to match against
# that.
if instr:
regex += 'instruction=%s\n' % GlobToRegex(instr)
for line in stack:
if line == ELLIPSIS:
regex += '(.*\n)*'
elif '!' in line:
(mod, func) = line.split('!')
if func == ELLIPSIS: # mod!ellipsis frame
regex += '(%s\!.*\n)+' % GlobToRegex(mod, ignore_case=True)
else: # mod!func frame
# Ignore case for the module match, but not the function match.
regex += '%s\!%s\n' % (GlobToRegex(mod, ignore_case=True),
GlobToRegex(func, ignore_case=False))
else:
regex += GlobToRegex(line)
regex += '\n'
regex += '(.*\n)*' # Match anything left in the stack.
regex += '}'
return super(DrMemorySuppression, self).__init__(name, report_type, stack,
defined_at, regex)
def __str__(self):
"""Stringify."""
text = self.type + "\n"
if self.description:
text += "name=%s\n" % self.description
if self.instr:
text += "instruction=%s\n" % self.instr
text += "\n".join(self.stack)
text += "\n"
return text
# Possible DrMemory error report types. Keep consistent with suppress_name
# array in drmemory/drmemory/report.c.
DRMEMORY_ERROR_TYPES = [
'UNADDRESSABLE ACCESS',
'UNINITIALIZED READ',
'INVALID HEAP ARGUMENT',
'GDI USAGE ERROR',
'HANDLE LEAK',
'LEAK',
'POSSIBLE LEAK',
'WARNING',
]
# Regexes to match valid drmemory frames.
DRMEMORY_FRAME_PATTERNS = [
re.compile(r"^.*\!.*$"), # mod!func
re.compile(r"^.*!\.\.\.$"), # mod!ellipsis
re.compile(r"^\<.*\+0x.*\>$"), # <mod+0xoffs>
re.compile(r"^\<not in a module\>$"),
re.compile(r"^system call .*$"),
re.compile(r"^\*$"), # wildcard
re.compile(r"^\.\.\.$"), # ellipsis
]
def ReadDrMemorySuppressions(lines, supp_descriptor):
"""Given a list of lines, returns a list of DrMemory suppressions.
Args:
lines: a list of lines containing suppressions.
supp_descriptor: should typically be a filename.
Used only when parsing errors happen.
"""
lines = StripAndSkipCommentsIterator(lines)
suppressions = []
for (line_no, line) in lines:
if not line:
continue
if line not in DRMEMORY_ERROR_TYPES:
raise SuppressionError('Expected a DrMemory error type, '
'found %r instead\n Valid error types: %s' %
(line, ' '.join(DRMEMORY_ERROR_TYPES)),
"%s:%d" % (supp_descriptor, line_no))
# Suppression starts here.
report_type = line
name = ''
instr = None
stack = []
defined_at = "%s:%d" % (supp_descriptor, line_no)
found_stack = False
for (line_no, line) in lines:
if not found_stack and line.startswith('name='):
name = line.replace('name=', '')
elif not found_stack and line.startswith('instruction='):
instr = line.replace('instruction=', '')
else:
# Unrecognized prefix indicates start of stack trace.
found_stack = True
if not line:
# Blank line means end of suppression.
break
if not any([regex.match(line) for regex in DRMEMORY_FRAME_PATTERNS]):
raise SuppressionError(
('Unexpected stack frame pattern at line %d\n' +
'Frames should be one of the following:\n' +
' module!function\n' +
' module!...\n' +
' <module+0xhexoffset>\n' +
' <not in a module>\n' +
' system call Name\n' +
' *\n' +
' ...\n') % line_no, defined_at)
stack.append(line)
if len(stack) == 0: # In case we hit EOF or blank without any stack frames.
raise SuppressionError('Suppression "%s" has no stack frames, ends at %d'
% (name, line_no), defined_at)
if stack[-1] == ELLIPSIS:
raise SuppressionError('Suppression "%s" ends in an ellipsis on line %d' %
(name, line_no), defined_at)
suppressions.append(
DrMemorySuppression(name, report_type, instr, stack, defined_at))
return suppressions
def ParseSuppressionOfType(lines, supp_descriptor, def_line_no, report_type):
"""Parse the suppression starting on this line.
Suppressions start with a type, have an optional name and instruction, and a
stack trace that ends in a blank line.
"""
def TestStack(stack, positive, negative, suppression_parser=None):
"""A helper function for SelfTest() that checks a single stack.
Args:
stack: the stack to match the suppressions.
positive: the list of suppressions that must match the given stack.
negative: the list of suppressions that should not match.
suppression_parser: optional arg for the suppression parser, default is
ReadValgrindStyleSuppressions.
"""
if not suppression_parser:
suppression_parser = ReadValgrindStyleSuppressions
for supp in positive:
parsed = suppression_parser(supp.split("\n"), "positive_suppression")
assert parsed[0].Match(stack.split("\n")), (
"Suppression:\n%s\ndidn't match stack:\n%s" % (supp, stack))
for supp in negative:
parsed = suppression_parser(supp.split("\n"), "negative_suppression")
assert not parsed[0].Match(stack.split("\n")), (
"Suppression:\n%s\ndid match stack:\n%s" % (supp, stack))
def TestFailPresubmit(supp_text, error_text, suppression_parser=None):
"""A helper function for SelfTest() that verifies a presubmit check fires.
Args:
supp_text: suppression text to parse.
error_text: text of the presubmit error we expect to find.
suppression_parser: optional arg for the suppression parser, default is
ReadValgrindStyleSuppressions.
"""
if not suppression_parser:
suppression_parser = ReadValgrindStyleSuppressions
try:
supps = suppression_parser(supp_text.split("\n"), "<presubmit suppression>")
except SuppressionError, e:
# If parsing raised an exception, match the error text here.
assert error_text in str(e), (
"presubmit text %r not in SuppressionError:\n%r" %
(error_text, str(e)))
else:
# Otherwise, run the presubmit checks over the supps. We expect a single
# error that has text matching error_text.
errors = PresubmitCheckSuppressions(supps)
assert len(errors) == 1, (
"expected exactly one presubmit error, got:\n%s" % errors)
assert error_text in str(errors[0]), (
"presubmit text %r not in SuppressionError:\n%r" %
(error_text, str(errors[0])))
def SelfTest():
"""Tests the Suppression.Match() capabilities."""
test_memcheck_stack_1 = """{
test
Memcheck:Leak
fun:absolutly
fun:brilliant
obj:condition
fun:detection
fun:expression
}"""
test_memcheck_stack_2 = """{
test
Memcheck:Uninitialized
fun:absolutly
fun:brilliant
obj:condition
fun:detection
fun:expression
}"""
test_memcheck_stack_3 = """{
test
Memcheck:Unaddressable
fun:absolutly
fun:brilliant
obj:condition
fun:detection
fun:expression
}"""
test_memcheck_stack_4 = """{
test
Memcheck:Addr4
fun:absolutly
fun:brilliant
obj:condition
fun:detection
fun:expression
}"""
test_heapcheck_stack = """{
test
Heapcheck:Leak
fun:absolutly
fun:brilliant
obj:condition
fun:detection
fun:expression
}"""
test_tsan_stack = """{
test
ThreadSanitizer:Race
fun:absolutly
fun:brilliant
obj:condition
fun:detection
fun:expression
}"""
positive_memcheck_suppressions_1 = [
"{\nzzz\nMemcheck:Leak\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Leak\nfun:ab*ly\n}",
"{\nzzz\nMemcheck:Leak\nfun:absolutly\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\n...\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\n...\nfun:detection\n}",
"{\nzzz\nMemcheck:Leak\nfun:absolutly\n...\nfun:detection\n}",
"{\nzzz\nMemcheck:Leak\nfun:ab*ly\n...\nfun:detection\n}",
"{\nzzz\nMemcheck:Leak\n...\nobj:condition\n}",
"{\nzzz\nMemcheck:Leak\n...\nobj:condition\nfun:detection\n}",
"{\nzzz\nMemcheck:Leak\n...\nfun:brilliant\nobj:condition\n}",
]
positive_memcheck_suppressions_2 = [
"{\nzzz\nMemcheck:Uninitialized\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Uninitialized\nfun:ab*ly\n}",
"{\nzzz\nMemcheck:Uninitialized\nfun:absolutly\nfun:brilliant\n}",
# Legacy suppression types
"{\nzzz\nMemcheck:Value1\n...\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Cond\n...\nfun:detection\n}",
"{\nzzz\nMemcheck:Value8\nfun:absolutly\nfun:brilliant\n}",
]
positive_memcheck_suppressions_3 = [
"{\nzzz\nMemcheck:Unaddressable\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Unaddressable\nfun:absolutly\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Unaddressable\nfun:absolutly\nfun:brilliant\n}",
# Legacy suppression types
"{\nzzz\nMemcheck:Addr1\n...\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Addr8\n...\nfun:detection\n}",
]
positive_memcheck_suppressions_4 = [
"{\nzzz\nMemcheck:Addr4\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Unaddressable\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Addr4\nfun:absolutly\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Unaddressable\n...\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Addr4\n...\nfun:detection\n}",
]
positive_heapcheck_suppressions = [
"{\nzzz\nHeapcheck:Leak\n...\nobj:condition\n}",
"{\nzzz\nHeapcheck:Leak\nfun:absolutly\n}",
]
positive_tsan_suppressions = [
"{\nzzz\nThreadSanitizer:Race\n...\nobj:condition\n}",
"{\nzzz\nThreadSanitizer:Race\nfun:absolutly\n}",
]
negative_memcheck_suppressions_1 = [
"{\nzzz\nMemcheck:Leak\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Leak\nfun:ab*liant\n}",
"{\nzzz\nMemcheck:Leak\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\nobj:condition\n}",
"{\nzzz\nMemcheck:Addr8\nfun:brilliant\n}",
]
negative_memcheck_suppressions_2 = [
"{\nzzz\nMemcheck:Cond\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Value2\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Uninitialized\nfun:ab*liant\n}",
"{\nzzz\nMemcheck:Value4\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\nobj:condition\n}",
"{\nzzz\nMemcheck:Addr8\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Unaddressable\nfun:brilliant\n}",
]
negative_memcheck_suppressions_3 = [
"{\nzzz\nMemcheck:Addr1\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Uninitialized\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Addr2\nfun:ab*liant\n}",
"{\nzzz\nMemcheck:Value4\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\nobj:condition\n}",
"{\nzzz\nMemcheck:Addr8\nfun:brilliant\n}",
]
negative_memcheck_suppressions_4 = [
"{\nzzz\nMemcheck:Addr1\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Addr4\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Unaddressable\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Addr1\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Addr2\nfun:ab*liant\n}",
"{\nzzz\nMemcheck:Value4\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\nobj:condition\n}",
"{\nzzz\nMemcheck:Addr8\nfun:brilliant\n}",
]
negative_heapcheck_suppressions = [
"{\nzzz\nMemcheck:Leak\nfun:absolutly\n}",
"{\nzzz\nHeapcheck:Leak\nfun:brilliant\n}",
]
negative_tsan_suppressions = [
"{\nzzz\nThreadSanitizer:Leak\nfun:absolutly\n}",
"{\nzzz\nThreadSanitizer:Race\nfun:brilliant\n}",
]
TestStack(test_memcheck_stack_1,
positive_memcheck_suppressions_1,
negative_memcheck_suppressions_1)
TestStack(test_memcheck_stack_2,
positive_memcheck_suppressions_2,
negative_memcheck_suppressions_2)
TestStack(test_memcheck_stack_3,
positive_memcheck_suppressions_3,
negative_memcheck_suppressions_3)
TestStack(test_memcheck_stack_4,
positive_memcheck_suppressions_4,
negative_memcheck_suppressions_4)
TestStack(test_heapcheck_stack, positive_heapcheck_suppressions,
negative_heapcheck_suppressions)
TestStack(test_tsan_stack, positive_tsan_suppressions,
negative_tsan_suppressions)
# TODO(timurrrr): add TestFailPresubmit tests.
### DrMemory self tests.
# http://crbug.com/96010 suppression.
stack_96010 = """{
UNADDRESSABLE ACCESS
name=<insert_a_suppression_name_here>
*!TestingProfile::FinishInit
*!TestingProfile::TestingProfile
*!BrowserAboutHandlerTest_WillHandleBrowserAboutURL_Test::TestBody
*!testing::Test::Run
}"""
suppress_96010 = [
"UNADDRESSABLE ACCESS\nname=zzz\n...\n*!testing::Test::Run\n",
("UNADDRESSABLE ACCESS\nname=zzz\n...\n" +
"*!BrowserAboutHandlerTest_WillHandleBrowserAboutURL_Test::TestBody\n"),
"UNADDRESSABLE ACCESS\nname=zzz\n...\n*!BrowserAboutHandlerTest*\n",
"UNADDRESSABLE ACCESS\nname=zzz\n*!TestingProfile::FinishInit\n",
# No name should be needed
"UNADDRESSABLE ACCESS\n*!TestingProfile::FinishInit\n",
# Whole trace
("UNADDRESSABLE ACCESS\n" +
"*!TestingProfile::FinishInit\n" +
"*!TestingProfile::TestingProfile\n" +
"*!BrowserAboutHandlerTest_WillHandleBrowserAboutURL_Test::TestBody\n" +
"*!testing::Test::Run\n"),
]
negative_96010 = [
# Wrong type
"UNINITIALIZED READ\nname=zzz\n*!TestingProfile::FinishInit\n",
# No ellipsis
"UNADDRESSABLE ACCESS\nname=zzz\n*!BrowserAboutHandlerTest*\n",
]
TestStack(stack_96010, suppress_96010, negative_96010,
suppression_parser=ReadDrMemorySuppressions)
# Invalid heap arg
stack_invalid = """{
INVALID HEAP ARGUMENT
name=asdf
*!foo
}"""
suppress_invalid = [
"INVALID HEAP ARGUMENT\n*!foo\n",
]
negative_invalid = [
"UNADDRESSABLE ACCESS\n*!foo\n",
]
TestStack(stack_invalid, suppress_invalid, negative_invalid,
suppression_parser=ReadDrMemorySuppressions)
# Suppress only ntdll
stack_in_ntdll = """{
UNADDRESSABLE ACCESS
name=<insert_a_suppression_name_here>
ntdll.dll!RtlTryEnterCriticalSection
}"""
stack_not_ntdll = """{
UNADDRESSABLE ACCESS
name=<insert_a_suppression_name_here>
notntdll.dll!RtlTryEnterCriticalSection
}"""
suppress_in_ntdll = [
"UNADDRESSABLE ACCESS\nntdll.dll!RtlTryEnterCriticalSection\n",
]
suppress_in_any = [
"UNADDRESSABLE ACCESS\n*!RtlTryEnterCriticalSection\n",
]
TestStack(stack_in_ntdll, suppress_in_ntdll + suppress_in_any, [],
suppression_parser=ReadDrMemorySuppressions)
# Make sure we don't wildcard away the "not" part and match ntdll.dll by
# accident.
TestStack(stack_not_ntdll, suppress_in_any, suppress_in_ntdll,
suppression_parser=ReadDrMemorySuppressions)
# Suppress a POSSIBLE LEAK with LEAK.
stack_foo_possible = """{
POSSIBLE LEAK
name=foo possible
*!foo
}"""
suppress_foo_possible = [ "POSSIBLE LEAK\n*!foo\n" ]
suppress_foo_leak = [ "LEAK\n*!foo\n" ]
TestStack(stack_foo_possible, suppress_foo_possible + suppress_foo_leak, [],
suppression_parser=ReadDrMemorySuppressions)
# Don't suppress LEAK with POSSIBLE LEAK.
stack_foo_leak = """{
LEAK
name=foo leak
*!foo
}"""
TestStack(stack_foo_leak, suppress_foo_leak, suppress_foo_possible,
suppression_parser=ReadDrMemorySuppressions)
# Test case insensitivity of module names.
stack_user32_mixed_case = """{
LEAK
name=<insert>
USER32.dll!foo
user32.DLL!bar
user32.dll!baz
}"""
suppress_user32 = [ # Module name case doesn't matter.
"LEAK\nuser32.dll!foo\nuser32.dll!bar\nuser32.dll!baz\n",
"LEAK\nUSER32.DLL!foo\nUSER32.DLL!bar\nUSER32.DLL!baz\n",
]
no_suppress_user32 = [ # Function name case matters.
"LEAK\nuser32.dll!FOO\nuser32.dll!BAR\nuser32.dll!BAZ\n",
"LEAK\nUSER32.DLL!FOO\nUSER32.DLL!BAR\nUSER32.DLL!BAZ\n",
]
TestStack(stack_user32_mixed_case, suppress_user32, no_suppress_user32,
suppression_parser=ReadDrMemorySuppressions)
# Test mod!... frames.
stack_kernel32_through_ntdll = """{
LEAK
name=<insert>
kernel32.dll!foo
KERNEL32.dll!bar
kernel32.DLL!baz
ntdll.dll!quux
}"""
suppress_mod_ellipsis = [
"LEAK\nkernel32.dll!...\nntdll.dll!quux\n",
"LEAK\nKERNEL32.DLL!...\nntdll.dll!quux\n",
]
no_suppress_mod_ellipsis = [
# Need one or more matching frames, not zero, unlike regular ellipsis.
"LEAK\nuser32.dll!...\nkernel32.dll!...\nntdll.dll!quux\n",
]
TestStack(stack_kernel32_through_ntdll, suppress_mod_ellipsis,
no_suppress_mod_ellipsis,
suppression_parser=ReadDrMemorySuppressions)
# Test that the presubmit checks work.
forgot_to_name = """
UNADDRESSABLE ACCESS
name=<insert_a_suppression_name_here>
ntdll.dll!RtlTryEnterCriticalSection
"""
TestFailPresubmit(forgot_to_name, 'forgotten to put a suppression',
suppression_parser=ReadDrMemorySuppressions)
named_twice = """
UNADDRESSABLE ACCESS
name=http://crbug.com/1234
*!foo
UNADDRESSABLE ACCESS
name=http://crbug.com/1234
*!bar
"""
TestFailPresubmit(named_twice, 'defined more than once',
suppression_parser=ReadDrMemorySuppressions)
forgot_stack = """
UNADDRESSABLE ACCESS
name=http://crbug.com/1234
"""
TestFailPresubmit(forgot_stack, 'has no stack frames',
suppression_parser=ReadDrMemorySuppressions)
ends_in_ellipsis = """
UNADDRESSABLE ACCESS
name=http://crbug.com/1234
ntdll.dll!RtlTryEnterCriticalSection
...
"""
TestFailPresubmit(ends_in_ellipsis, 'ends in an ellipsis',
suppression_parser=ReadDrMemorySuppressions)
bad_stack_frame = """
UNADDRESSABLE ACCESS
name=http://crbug.com/1234
fun:memcheck_style_frame
"""
TestFailPresubmit(bad_stack_frame, 'Unexpected stack frame pattern',
suppression_parser=ReadDrMemorySuppressions)
# Test FilenameToTool.
filenames_to_tools = {
"tools/heapcheck/suppressions.txt": "heapcheck",
"tools/valgrind/tsan/suppressions.txt": "tsan",
"tools/valgrind/drmemory/suppressions.txt": "drmemory",
"tools/valgrind/drmemory/suppressions_full.txt": "drmemory",
"tools/valgrind/memcheck/suppressions.txt": "memcheck",
"tools/valgrind/memcheck/suppressions_mac.txt": "memcheck",
"asdf/tools/valgrind/memcheck/suppressions_mac.txt": "memcheck",
"foo/bar/baz/tools/valgrind/memcheck/suppressions_mac.txt": "memcheck",
"foo/bar/baz/tools/valgrind/suppressions.txt": None,
"tools/valgrind/suppressions.txt": None,
}
for (filename, expected_tool) in filenames_to_tools.items():
filename.replace('/', os.sep) # Make the path look native.
tool = FilenameToTool(filename)
assert tool == expected_tool, (
"failed to get expected tool for filename %r, expected %s, got %s" %
(filename, expected_tool, tool))
# Test ValgrindStyleSuppression.__str__.
supp = ValgrindStyleSuppression("http://crbug.com/1234", "Memcheck:Leak",
["...", "fun:foo"], "supp.txt:1")
# Intentional 3-space indent. =/
supp_str = ("{\n"
" http://crbug.com/1234\n"
" Memcheck:Leak\n"
" ...\n"
" fun:foo\n"
"}\n")
assert str(supp) == supp_str, (
"str(supp) != supp_str:\nleft: %s\nright: %s" % (str(supp), supp_str))
# Test DrMemorySuppression.__str__.
supp = DrMemorySuppression(
"http://crbug.com/1234", "LEAK", None, ["...", "*!foo"], "supp.txt:1")
supp_str = ("LEAK\n"
"name=http://crbug.com/1234\n"
"...\n"
"*!foo\n")
assert str(supp) == supp_str, (
"str(supp) != supp_str:\nleft: %s\nright: %s" % (str(supp), supp_str))
supp = DrMemorySuppression(
"http://crbug.com/1234", "UNINITIALIZED READ", "test 0x08(%eax) $0x01",
["ntdll.dll!*", "*!foo"], "supp.txt:1")
supp_str = ("UNINITIALIZED READ\n"
"name=http://crbug.com/1234\n"
"instruction=test 0x08(%eax) $0x01\n"
"ntdll.dll!*\n"
"*!foo\n")
assert str(supp) == supp_str, (
"str(supp) != supp_str:\nleft: %s\nright: %s" % (str(supp), supp_str))
if __name__ == '__main__':
SelfTest()
print 'PASS'
| apache-2.0 |
PageArkanis/StEVE | steve/constellation.py | 1 | 1217 | from steve.backend.sqlitedb import SDB
from steve.system import System
class Constellation(object):
def __init__(self, universe, data):
self.universe = universe
self.regionID = data[0]
self.uid = data[1]
self.name = data[2]
self.x = data[3]
self.y = data[4]
self.z = data[5]
self.xMin = data[6]
self.xMax = data[7]
self.yMin = data[8]
self.yMax = data[9]
self.zMin = data[10]
self.zMax = data[11]
self.factionID = data[12]
self.radius = data[13]
self._systems = {}
@property
def system(self):
if len(self._constellations) == 0:
query = 'SELECT * from mapSolarSystems WHERE constellationID = %' % self.uid
for entry in SDB.queryAll(query):
system = System(self.universe, entry)
self._systems[system.name] = system
self._systems[system.uid] = system
return self._systems
@property
def region(self):
return self.universe.regions[self.regionID]
| agpl-3.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/reportlab/graphics/charts/lineplots.py | 9 | 46974 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/lineplots.py
__version__=''' $Id$ '''
__doc__="""This module defines a very preliminary Line Plot example."""
import string, time
from reportlab.lib import colors
from reportlab.lib.validators import *
from reportlab.lib.attrmap import *
from reportlab.graphics.shapes import Drawing, Group, Rect, Line, PolyLine, Polygon, _SetKeyWordArgs
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection, PropHolder
from reportlab.graphics.charts.textlabels import Label
from reportlab.graphics.charts.axes import XValueAxis, YValueAxis, AdjYValueAxis, NormalDateXValueAxis
from reportlab.graphics.charts.utils import *
from reportlab.graphics.widgets.markers import uSymbol2Symbol, isSymbol, makeMarker
from reportlab.graphics.widgets.grids import Grid, DoubleGrid, ShadedRect, ShadedPolygon
from reportlab.pdfbase.pdfmetrics import stringWidth, getFont
from reportlab.graphics.charts.areas import PlotArea
# This might be moved again from here...
class LinePlotProperties(PropHolder):
_attrMap = AttrMap(
strokeWidth = AttrMapValue(isNumber, desc='Width of a line.'),
strokeColor = AttrMapValue(isColorOrNone, desc='Color of a line.'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array of a line.'),
symbol = AttrMapValue(None, desc='Widget placed at data points.',advancedUsage=1),
shader = AttrMapValue(None, desc='Shader Class.',advancedUsage=1),
filler = AttrMapValue(None, desc='Filler Class.',advancedUsage=1),
name = AttrMapValue(isStringOrNone, desc='Name of the line.'),
inFill = AttrMapValue(isBoolean, desc='If true flood fill to x axis',advancedUsage=1),
)
class Shader(_SetKeyWordArgs):
_attrMap = AttrMap(BASE=PlotArea,
vertical = AttrMapValue(isBoolean, desc='If true shade to x axis'),
colors = AttrMapValue(SequenceOf(isColorOrNone,lo=2,hi=2), desc='(AxisColor, LineColor)'),
)
def shade(self, lp, g, rowNo, rowColor, row):
c = [None,None]
c = getattr(self,'colors',c) or c
if not c[0]: c[0] = getattr(lp,'fillColor',colors.white)
if not c[1]: c[1] = rowColor
class NoFiller:
def fill(self, lp, g, rowNo, rowColor, points):
pass
class Filler:
'''mixin providing simple polygon fill'''
_attrMap = AttrMap(
fillColor = AttrMapValue(isColorOrNone, desc='filler interior color'),
strokeColor = AttrMapValue(isColorOrNone, desc='filler edge color'),
strokeWidth = AttrMapValue(isNumberOrNone, desc='filler edge width'),
)
def __init__(self,**kw):
self.__dict__ = kw
def fill(self, lp, g, rowNo, rowColor, points):
g.add(Polygon(points,
fillColor=getattr(self,'fillColor',rowColor),
strokeColor=getattr(self,'strokeColor',rowColor),
strokeWidth=getattr(self,'strokeWidth',0.1)))
class ShadedPolyFiller(Filler,ShadedPolygon):
pass
class PolyFiller(Filler,Polygon):
pass
from reportlab.graphics.charts.linecharts import AbstractLineChart
class LinePlot(AbstractLineChart):
"""Line plot with multiple lines.
Both x- and y-axis are value axis (so there are no seperate
X and Y versions of this class).
"""
_attrMap = AttrMap(BASE=PlotArea,
reversePlotOrder = AttrMapValue(isBoolean, desc='If true reverse plot order.',advancedUsage=1),
lineLabelNudge = AttrMapValue(isNumber, desc='Distance between a data point and its label.',advancedUsage=1),
lineLabels = AttrMapValue(None, desc='Handle to the list of data point labels.'),
lineLabelFormat = AttrMapValue(None, desc='Formatting string or function used for data point labels.'),
lineLabelArray = AttrMapValue(None, desc='explicit array of line label values, must match size of data if present.'),
joinedLines = AttrMapValue(isNumber, desc='Display data points joined with lines if true.'),
strokeColor = AttrMapValue(isColorOrNone, desc='Color used for background border of plot area.'),
fillColor = AttrMapValue(isColorOrNone, desc='Color used for background interior of plot area.'),
lines = AttrMapValue(None, desc='Handle of the lines.'),
xValueAxis = AttrMapValue(None, desc='Handle of the x axis.'),
yValueAxis = AttrMapValue(None, desc='Handle of the y axis.'),
data = AttrMapValue(None, desc='Data to be plotted, list of (lists of) x/y tuples.'),
annotations = AttrMapValue(None, desc='list of callables, will be called with self, xscale, yscale.',advancedUsage=1),
behindAxes = AttrMapValue(isBoolean, desc='If true use separate line group.',advancedUsage=1),
gridFirst = AttrMapValue(isBoolean, desc='If true use draw grids before axes.',advancedUsage=1),
)
def __init__(self):
PlotArea.__init__(self)
self.reversePlotOrder = 0
self.xValueAxis = XValueAxis()
self.yValueAxis = YValueAxis()
# this defines two series of 3 points. Just an example.
self.data = [
((1,1), (2,2), (2.5,1), (3,3), (4,5)),
((1,2), (2,3), (2.5,2), (3,4), (4,6))
]
self.lines = TypedPropertyCollection(LinePlotProperties)
self.lines.strokeWidth = 1
self.lines[0].strokeColor = colors.red
self.lines[1].strokeColor = colors.blue
self.lineLabels = TypedPropertyCollection(Label)
self.lineLabelFormat = None
self.lineLabelArray = None
# this says whether the origin is inside or outside
# the bar - +10 means put the origin ten points
# above the tip of the bar if value > 0, or ten
# points inside if bar value < 0. This is different
# to label dx/dy which are not dependent on the
# sign of the data.
self.lineLabelNudge = 10
# if you have multiple series, by default they butt
# together.
# New line chart attributes.
self.joinedLines = 1 # Connect items with straight lines.
#private attributes
self._inFill = None
self.annotations = []
self.behindAxes = 0
self.gridFirst = 0
def demo(self):
"""Shows basic use of a line chart."""
drawing = Drawing(400, 200)
data = [
((1,1), (2,2), (2.5,1), (3,3), (4,5)),
((1,2), (2,3), (2.5,2), (3.5,5), (4,6))
]
lp = LinePlot()
lp.x = 50
lp.y = 50
lp.height = 125
lp.width = 300
lp.data = data
lp.joinedLines = 1
lp.lineLabelFormat = '%2.0f'
lp.strokeColor = colors.black
lp.lines[0].strokeColor = colors.red
lp.lines[0].symbol = makeMarker('FilledCircle')
lp.lines[1].strokeColor = colors.blue
lp.lines[1].symbol = makeMarker('FilledDiamond')
lp.xValueAxis.valueMin = 0
lp.xValueAxis.valueMax = 5
lp.xValueAxis.valueStep = 1
lp.yValueAxis.valueMin = 0
lp.yValueAxis.valueMax = 7
lp.yValueAxis.valueStep = 1
drawing.add(lp)
return drawing
def calcPositions(self):
"""Works out where they go.
Sets an attribute _positions which is a list of
lists of (x, y) matching the data.
"""
self._seriesCount = len(self.data)
self._rowLength = max(list(map(len,self.data)))
self._positions = []
for rowNo in range(len(self.data)):
line = []
for colNo in range(len(self.data[rowNo])):
datum = self.data[rowNo][colNo] # x,y value
if isinstance(datum[0],str):
x = self.xValueAxis.scale(mktime(mkTimeTuple(datum[0])))
else:
x = self.xValueAxis.scale(datum[0])
y = self.yValueAxis.scale(datum[1])
line.append((x, y))
self._positions.append(line)
def _innerDrawLabel(self, rowNo, colNo, x, y):
"Draw a label for a given item in the list."
labelFmt = self.lineLabelFormat
labelValue = self.data[rowNo][colNo][1] ###
if labelFmt is None:
labelText = None
elif isinstance(labelFmt,str):
if labelFmt == 'values':
labelText = self.lineLabelArray[rowNo][colNo]
else:
labelText = labelFmt % labelValue
elif hasattr(labelFmt,'__call__'):
if not hasattr(labelFmt,'__labelFmtEX__'):
labelText = labelFmt(labelValue)
else:
labelText = labelFmt(self,rowNo,colNo,x,y)
else:
raise ValueError("Unknown formatter type %s, expected string or function"% labelFmt)
if labelText:
label = self.lineLabels[(rowNo, colNo)]
if not label.visible: return
#hack to make sure labels are outside the bar
if y > 0:
label.setOrigin(x, y + self.lineLabelNudge)
else:
label.setOrigin(x, y - self.lineLabelNudge)
label.setText(labelText)
else:
label = None
return label
def drawLabel(self, G, rowNo, colNo, x, y):
'''Draw a label for a given item in the list.
G must have an add method'''
G.add(self._innerDrawLabel(rowNo,colNo,x,y))
def makeLines(self):
g = Group()
bubblePlot = getattr(self,'_bubblePlot',None)
if bubblePlot:
yA = self.yValueAxis
xA = self.xValueAxis
bubbleR = min(yA._bubbleRadius,xA._bubbleRadius)
bubbleMax = xA._bubbleMax
labelFmt = self.lineLabelFormat
P = list(range(len(self._positions)))
if self.reversePlotOrder: P.reverse()
inFill = getattr(self,'_inFill',None)
styleCount = len(self.lines)
if inFill or [rowNo for rowNo in P if getattr(self.lines[rowNo%styleCount],'inFill',False)]:
inFillY = self.xValueAxis._y
inFillX0 = self.yValueAxis._x
inFillX1 = inFillX0 + self.xValueAxis._length
inFillG = getattr(self,'_inFillG',g)
lG = getattr(self,'_lineG',g)
# Iterate over data rows.
for rowNo in P:
row = self._positions[rowNo]
rowStyle = self.lines[rowNo % styleCount]
rowColor = getattr(rowStyle,'strokeColor',None)
dash = getattr(rowStyle, 'strokeDashArray', None)
if hasattr(rowStyle, 'strokeWidth'):
width = rowStyle.strokeWidth
elif hasattr(self.lines, 'strokeWidth'):
width = self.lines.strokeWidth
else:
width = None
# Iterate over data columns.
if self.joinedLines:
points = []
for xy in row:
points += [xy[0], xy[1]]
if inFill or getattr(rowStyle,'inFill',False):
fpoints = [inFillX0,inFillY] + points + [inFillX1,inFillY]
filler = getattr(rowStyle, 'filler', None)
if filler:
filler.fill(self,inFillG,rowNo,rowColor,fpoints)
else:
inFillG.add(Polygon(fpoints,fillColor=rowColor,strokeColor=rowColor,strokeWidth=width or 0.1))
if inFill in (None,0,2):
line = PolyLine(points,strokeColor=rowColor,strokeLineCap=0,strokeLineJoin=1)
if width:
line.strokeWidth = width
if dash:
line.strokeDashArray = dash
lG.add(line)
if hasattr(rowStyle, 'symbol'):
uSymbol = rowStyle.symbol
elif hasattr(self.lines, 'symbol'):
uSymbol = self.lines.symbol
else:
uSymbol = None
if uSymbol:
if bubblePlot: drow = self.data[rowNo]
for j,xy in enumerate(row):
symbol = uSymbol2Symbol(uSymbol,xy[0],xy[1],rowColor)
if symbol:
if bubblePlot:
symbol.size = bubbleR*(drow[j][2]/bubbleMax)**0.5
g.add(symbol)
else:
if bubblePlot: drow = self.data[rowNo]
for j,xy in enumerate(row):
usymbol = getattr(self.lines[rowNo,j],'symbol',None)
if not usymbol: continue
symbol = uSymbol2Symbol(uSymbol,xy[0],xy[1],rowColor)
if symbol:
if bubblePlot:
symbol.size = bubbleR*(drow[j][2]/bubbleMax)**0.5
g.add(symbol)
# Draw data labels.
for colNo in range(len(row)):
x1, y1 = row[colNo]
self.drawLabel(g, rowNo, colNo, x1, y1)
shader = getattr(rowStyle, 'shader', None)
if shader: shader.shade(self,g,rowNo,rowColor,row)
return g
def draw(self):
yA = self.yValueAxis
xA = self.xValueAxis
if getattr(self,'_bubblePlot',None):
yA._bubblePlot = xA._bubblePlot = 1
yA.setPosition(self.x, self.y, self.height)
if yA: yA.joinAxis = xA
if xA: xA.joinAxis = yA
yA.configure(self.data)
# if zero is in chart, put x axis there, otherwise use bottom.
xAxisCrossesAt = yA.scale(0)
if ((xAxisCrossesAt > self.y + self.height) or (xAxisCrossesAt < self.y)):
y = self.y
else:
y = xAxisCrossesAt
xA.setPosition(self.x, y, self.width)
xA.configure(self.data)
self.calcPositions()
g = Group()
g.add(self.makeBackground())
if self._inFill or self.behindAxes:
xA._joinToAxis()
if self._inFill:
self._inFillG = Group()
g.add(self._inFillG)
if self.behindAxes:
self._lineG = Group()
g.add(self._lineG)
xA._joinToAxis()
yA._joinToAxis()
xAex = xA.visibleAxis and [xA._y] or []
yAex = yA.visibleAxis and [yA._x] or []
skipGrid = getattr(xA,'skipGrid','none')
if skipGrid!=None:
if skipGrid in ('both','top'):
yAex.append(xA._x+xA._length)
if skipGrid in ('both','bottom'):
yAex.append(xA._x)
skipGrid = getattr(yA,'skipGrid','none')
if skipGrid!=None:
if skipGrid in ('both','top'):
xAex.append(yA._y+yA._length)
if skipGrid in ('both','bottom'):
xAex.append(yA._y)
if self.gridFirst:
xA.makeGrid(g,parent=self,dim=yA.getGridDims,exclude=yAex)
yA.makeGrid(g,parent=self,dim=xA.getGridDims,exclude=xAex)
g.add(xA.draw())
g.add(yA.draw())
if not self.gridFirst:
xAdgl = getattr(xA,'drawGridLast',False)
yAdgl = getattr(yA,'drawGridLast',False)
if not xAdgl: xA.makeGrid(g,parent=self,dim=yA.getGridDims,exclude=yAex)
if not yAdgl: yA.makeGrid(g,parent=self,dim=xA.getGridDims,exclude=xAex)
annotations = getattr(self,'annotations',[])
for a in annotations:
if getattr(a,'beforeLines',None):
g.add(a(self,xA.scale,yA.scale))
g.add(self.makeLines())
if not self.gridFirst:
if xAdgl: xA.makeGrid(g,parent=self,dim=yA.getGridDims,exclude=yAex)
if yAdgl: yA.makeGrid(g,parent=self,dim=xA.getGridDims,exclude=xAex)
for a in annotations:
if not getattr(a,'beforeLines',None):
g.add(a(self,xA.scale,yA.scale))
return g
def addCrossHair(self,name,xv,yv,strokeColor=colors.black,strokeWidth=1,beforeLines=True):
from reportlab.graphics.shapes import Group, Line
annotations = [a for a in getattr(self,'annotations',[]) if getattr(a,'name',None)!=name]
def annotation(self,xScale,yScale):
x = xScale(xv)
y = yScale(yv)
g = Group()
xA = xScale.__self__ #the x axis
g.add(Line(xA._x,y,xA._x+xA._length,y,strokeColor=strokeColor,strokeWidth=strokeWidth))
yA = yScale.__self__ #the y axis
g.add(Line(x,yA._y,x,yA._y+yA._length,strokeColor=strokeColor,strokeWidth=strokeWidth))
return g
annotation.beforeLines = beforeLines
annotations.append(annotation)
self.annotations = annotations
class LinePlot3D(LinePlot):
_attrMap = AttrMap(BASE=LinePlot,
theta_x = AttrMapValue(isNumber, desc='dx/dz'),
theta_y = AttrMapValue(isNumber, desc='dy/dz'),
zDepth = AttrMapValue(isNumber, desc='depth of an individual series'),
zSpace = AttrMapValue(isNumber, desc='z gap around series'),
)
theta_x = .5
theta_y = .5
zDepth = 10
zSpace = 3
def calcPositions(self):
LinePlot.calcPositions(self)
nSeries = self._seriesCount
zSpace = self.zSpace
zDepth = self.zDepth
if self.xValueAxis.style=='parallel_3d':
_3d_depth = nSeries*zDepth+(nSeries+1)*zSpace
else:
_3d_depth = zDepth + 2*zSpace
self._3d_dx = self.theta_x*_3d_depth
self._3d_dy = self.theta_y*_3d_depth
def _calc_z0(self,rowNo):
zSpace = self.zSpace
if self.xValueAxis.style=='parallel_3d':
z0 = rowNo*(self.zDepth+zSpace)+zSpace
else:
z0 = zSpace
return z0
def _zadjust(self,x,y,z):
return x+z*self.theta_x, y+z*self.theta_y
def makeLines(self):
bubblePlot = getattr(self,'_bubblePlot',None)
assert not bubblePlot, "_bubblePlot not supported for 3d yet"
#if bubblePlot:
# yA = self.yValueAxis
# xA = self.xValueAxis
# bubbleR = min(yA._bubbleRadius,xA._bubbleRadius)
# bubbleMax = xA._bubbleMax
labelFmt = self.lineLabelFormat
positions = self._positions
P = list(range(len(positions)))
if self.reversePlotOrder: P.reverse()
inFill = getattr(self,'_inFill',None)
assert not inFill, "inFill not supported for 3d yet"
#if inFill:
# inFillY = self.xValueAxis._y
# inFillX0 = self.yValueAxis._x
# inFillX1 = inFillX0 + self.xValueAxis._length
# inFillG = getattr(self,'_inFillG',g)
zDepth = self.zDepth
_zadjust = self._zadjust
theta_x = self.theta_x
theta_y = self.theta_y
from reportlab.graphics.charts.linecharts import _FakeGroup
F = _FakeGroup()
from reportlab.graphics.charts.utils3d import _make_3d_line_info, find_intersections
if self.xValueAxis.style!='parallel_3d':
tileWidth = getattr(self,'_3d_tilewidth',1)
if getattr(self,'_find_intersections',None):
from copy import copy
fpositions = list(map(copy,positions))
I = find_intersections(fpositions,small=tileWidth)
ic = None
for i,j,x,y in I:
if ic!=i:
ic = i
jc = 0
else:
jc+=1
fpositions[i].insert(j+jc,(x,y))
tileWidth = None
else:
fpositions = positions
else:
tileWidth = None
fpositions = positions
# Iterate over data rows.
styleCount = len(self.lines)
for rowNo in P:
row = positions[rowNo]
n = len(row)
rowStyle = self.lines[rowNo % styleCount]
rowColor = rowStyle.strokeColor
dash = getattr(rowStyle, 'strokeDashArray', None)
z0 = self._calc_z0(rowNo)
z1 = z0 + zDepth
if hasattr(rowStyle, 'strokeWidth'):
width = rowStyle.strokeWidth
elif hasattr(self.lines, 'strokeWidth'):
width = self.lines.strokeWidth
else:
width = None
# Iterate over data columns.
if self.joinedLines:
if n:
frow = fpositions[rowNo]
x0, y0 = frow[0]
for colNo in range(1,len(frow)):
x1, y1 = frow[colNo]
_make_3d_line_info( F, x0, x1, y0, y1, z0, z1,
theta_x, theta_y,
rowColor, fillColorShaded=None, tileWidth=tileWidth,
strokeColor=None, strokeWidth=None, strokeDashArray=None,
shading=0.1)
x0, y0 = x1, y1
if hasattr(rowStyle, 'symbol'):
uSymbol = rowStyle.symbol
elif hasattr(self.lines, 'symbol'):
uSymbol = self.lines.symbol
else:
uSymbol = None
if uSymbol:
for xy in row:
x1, y1 = row[colNo]
x1, y1 = _zadjust(x1,y1,z0)
symbol = uSymbol2Symbol(uSymbol,xy[0],xy[1],rowColor)
if symbol: F.add((1,z0,z0,x1,y1,symbol))
# Draw data labels.
for colNo in range(n):
x1, y1 = row[colNo]
x1, y1 = _zadjust(x1,y1,z0)
L = self._innerDrawLabel(rowNo, colNo, x1, y1)
if L: F.add((2,z0,z0,x1,y1,L))
F.sort()
g = Group()
for v in F.value(): g.add(v[-1])
return g
_monthlyIndexData = [[(19971202, 100.0),
(19971231, 100.1704367),
(19980131, 101.5639577),
(19980228, 102.1879927),
(19980331, 101.6337257),
(19980430, 102.7640446),
(19980531, 102.9198038),
(19980630, 103.25938789999999),
(19980731, 103.2516421),
(19980831, 105.4744329),
(19980930, 109.3242705),
(19981031, 111.9859291),
(19981130, 110.9184642),
(19981231, 110.9184642),
(19990131, 111.9882532),
(19990228, 109.7912614),
(19990331, 110.24189629999999),
(19990430, 110.4279321),
(19990531, 109.33955469999999),
(19990630, 108.2341748),
(19990731, 110.21294469999999),
(19990831, 110.9683062),
(19990930, 112.4425371),
(19991031, 112.7314032),
(19991130, 112.3509645),
(19991231, 112.3660659),
(20000131, 110.9255248),
(20000229, 110.5266306),
(20000331, 113.3116101),
(20000430, 111.0449133),
(20000531, 111.702717),
(20000630, 113.5832178)],
[(19971202, 100.0),
(19971231, 100.0),
(19980131, 100.8),
(19980228, 102.0),
(19980331, 101.9),
(19980430, 103.0),
(19980531, 103.0),
(19980630, 103.1),
(19980731, 103.1),
(19980831, 102.8),
(19980930, 105.6),
(19981031, 108.3),
(19981130, 108.1),
(19981231, 111.9),
(19990131, 113.1),
(19990228, 110.2),
(19990331, 111.8),
(19990430, 112.3),
(19990531, 110.1),
(19990630, 109.3),
(19990731, 111.2),
(19990831, 111.7),
(19990930, 112.6),
(19991031, 113.2),
(19991130, 113.9),
(19991231, 115.4),
(20000131, 112.7),
(20000229, 113.9),
(20000331, 115.8),
(20000430, 112.2),
(20000531, 112.6),
(20000630, 114.6)]]
class SimpleTimeSeriesPlot(LinePlot):
"""A customized version of LinePlot.
It uses NormalDateXValueAxis() and AdjYValueAxis() for the X and Y axes.
"""
def __init__(self):
LinePlot.__init__(self)
self.xValueAxis = NormalDateXValueAxis()
self.yValueAxis = YValueAxis()
self.data = _monthlyIndexData
class GridLinePlot(SimpleTimeSeriesPlot):
"""A customized version of SimpleTimeSeriesSPlot.
It uses NormalDateXValueAxis() and AdjYValueAxis() for the X and Y axes.
The chart has a default grid background with thin horizontal lines
aligned with the tickmarks (and labels). You can change the back-
ground to be any Grid or ShadedRect, or scale the whole chart.
If you do provide a background, you can specify the colours of the
stripes with 'background.stripeColors'.
"""
_attrMap = AttrMap(BASE=LinePlot,
background = AttrMapValue(None, desc='Background for chart area (now Grid or ShadedRect).'),
scaleFactor = AttrMapValue(isNumberOrNone, desc='Scalefactor to apply to whole drawing.'),
)
def __init__(self):
from reportlab.lib import colors
SimpleTimeSeriesPlot.__init__(self)
self.scaleFactor = None
self.background = Grid()
self.background.orientation = 'horizontal'
self.background.useRects = 0
self.background.useLines = 1
self.background.strokeWidth = 0.5
self.background.strokeColor = colors.black
def demo(self,drawing=None):
from reportlab.lib import colors
if not drawing:
drawing = Drawing(400, 200)
lp = GridLinePlot()
lp.x = 50
lp.y = 50
lp.height = 125
lp.width = 300
lp.data = _monthlyIndexData
lp.joinedLines = 1
lp.strokeColor = colors.black
c0 = colors.PCMYKColor(100,65,0,30, spotName='PANTONE 288 CV', density=100)
lp.lines[0].strokeColor = c0
lp.lines[0].strokeWidth = 2
lp.lines[0].strokeDashArray = None
c1 = colors.PCMYKColor(0,79,91,0, spotName='PANTONE Wm Red CV', density=100)
lp.lines[1].strokeColor = c1
lp.lines[1].strokeWidth = 1
lp.lines[1].strokeDashArray = [3,1]
lp.xValueAxis.labels.fontSize = 10
lp.xValueAxis.labels.textAnchor = 'start'
lp.xValueAxis.labels.boxAnchor = 'w'
lp.xValueAxis.labels.angle = -45
lp.xValueAxis.labels.dx = 0
lp.xValueAxis.labels.dy = -8
lp.xValueAxis.xLabelFormat = '{mm}/{yy}'
lp.yValueAxis.labelTextFormat = '%5d%% '
lp.yValueAxis.tickLeft = 5
lp.yValueAxis.labels.fontSize = 10
lp.background = Grid()
lp.background.stripeColors = [colors.pink, colors.lightblue]
lp.background.orientation = 'vertical'
drawing.add(lp,'plot')
return drawing
def draw(self):
xva, yva = self.xValueAxis, self.yValueAxis
if xva: xva.joinAxis = yva
if yva: yva.joinAxis = xva
yva.setPosition(self.x, self.y, self.height)
yva.configure(self.data)
# if zero is in chart, put x axis there, otherwise
# use bottom.
xAxisCrossesAt = yva.scale(0)
if ((xAxisCrossesAt > self.y + self.height) or (xAxisCrossesAt < self.y)):
y = self.y
else:
y = xAxisCrossesAt
xva.setPosition(self.x, y, self.width)
xva.configure(self.data)
back = self.background
if isinstance(back, Grid):
if back.orientation == 'vertical' and xva._tickValues:
xpos = list(map(xva.scale, [xva._valueMin] + xva._tickValues))
steps = []
for i in range(len(xpos)-1):
steps.append(xpos[i+1] - xpos[i])
back.deltaSteps = steps
elif back.orientation == 'horizontal' and yva._tickValues:
ypos = list(map(yva.scale, [yva._valueMin] + yva._tickValues))
steps = []
for i in range(len(ypos)-1):
steps.append(ypos[i+1] - ypos[i])
back.deltaSteps = steps
elif isinstance(back, DoubleGrid):
# Ideally, these lines would not be needed...
back.grid0.x = self.x
back.grid0.y = self.y
back.grid0.width = self.width
back.grid0.height = self.height
back.grid1.x = self.x
back.grid1.y = self.y
back.grid1.width = self.width
back.grid1.height = self.height
# some room left for optimization...
if back.grid0.orientation == 'vertical' and xva._tickValues:
xpos = list(map(xva.scale, [xva._valueMin] + xva._tickValues))
steps = []
for i in range(len(xpos)-1):
steps.append(xpos[i+1] - xpos[i])
back.grid0.deltaSteps = steps
elif back.grid0.orientation == 'horizontal' and yva._tickValues:
ypos = list(map(yva.scale, [yva._valueMin] + yva._tickValues))
steps = []
for i in range(len(ypos)-1):
steps.append(ypos[i+1] - ypos[i])
back.grid0.deltaSteps = steps
if back.grid1.orientation == 'vertical' and xva._tickValues:
xpos = list(map(xva.scale, [xva._valueMin] + xva._tickValues))
steps = []
for i in range(len(xpos)-1):
steps.append(xpos[i+1] - xpos[i])
back.grid1.deltaSteps = steps
elif back.grid1.orientation == 'horizontal' and yva._tickValues:
ypos = list(map(yva.scale, [yva._valueMin] + yva._tickValues))
steps = []
for i in range(len(ypos)-1):
steps.append(ypos[i+1] - ypos[i])
back.grid1.deltaSteps = steps
self.calcPositions()
width, height, scaleFactor = self.width, self.height, self.scaleFactor
if scaleFactor and scaleFactor!=1:
#g = Drawing(scaleFactor*width, scaleFactor*height)
g.transform = (scaleFactor, 0, 0, scaleFactor,0,0)
else:
g = Group()
g.add(self.makeBackground())
g.add(self.xValueAxis)
g.add(self.yValueAxis)
g.add(self.makeLines())
return g
class AreaLinePlot(LinePlot):
'''we're given data in the form [(X1,Y11,..Y1M)....(Xn,Yn1,...YnM)]'''#'
def __init__(self):
LinePlot.__init__(self)
self._inFill = 1
self.reversePlotOrder = 1
self.data = [(1,20,100,30),(2,11,50,15),(3,15,70,40)]
def draw(self):
try:
odata = self.data
n = len(odata)
m = len(odata[0])
S = n*[0]
self.data = []
for i in range(1,m):
D = []
for j in range(n):
S[j] = S[j] + odata[j][i]
D.append((odata[j][0],S[j]))
self.data.append(D)
return LinePlot.draw(self)
finally:
self.data = odata
class SplitLinePlot(AreaLinePlot):
def __init__(self):
AreaLinePlot.__init__(self)
self.xValueAxis = NormalDateXValueAxis()
self.yValueAxis = AdjYValueAxis()
self.data=[(20030601,0.95,0.05,0.0),(20030701,0.95,0.05,0.0),(20030801,0.95,0.05,0.0),(20030901,0.95,0.05,0.0),(20031001,0.95,0.05,0.0),(20031101,0.95,0.05,0.0),(20031201,0.95,0.05,0.0),(20040101,0.95,0.05,0.0),(20040201,0.95,0.05,0.0),(20040301,0.95,0.05,0.0),(20040401,0.95,0.05,0.0),(20040501,0.95,0.05,0.0),(20040601,0.95,0.05,0.0),(20040701,0.95,0.05,0.0),(20040801,0.95,0.05,0.0),(20040901,0.95,0.05,0.0),(20041001,0.95,0.05,0.0),(20041101,0.95,0.05,0.0),(20041201,0.95,0.05,0.0),(20050101,0.95,0.05,0.0),(20050201,0.95,0.05,0.0),(20050301,0.95,0.05,0.0),(20050401,0.95,0.05,0.0),(20050501,0.95,0.05,0.0),(20050601,0.95,0.05,0.0),(20050701,0.95,0.05,0.0),(20050801,0.95,0.05,0.0),(20050901,0.95,0.05,0.0),(20051001,0.95,0.05,0.0),(20051101,0.95,0.05,0.0),(20051201,0.95,0.05,0.0),(20060101,0.95,0.05,0.0),(20060201,0.95,0.05,0.0),(20060301,0.95,0.05,0.0),(20060401,0.95,0.05,0.0),(20060501,0.95,0.05,0.0),(20060601,0.95,0.05,0.0),(20060701,0.95,0.05,0.0),(20060801,0.95,0.05,0.0),(20060901,0.95,0.05,0.0),(20061001,0.95,0.05,0.0),(20061101,0.95,0.05,0.0),(20061201,0.95,0.05,0.0),(20070101,0.95,0.05,0.0),(20070201,0.95,0.05,0.0),(20070301,0.95,0.05,0.0),(20070401,0.95,0.05,0.0),(20070501,0.95,0.05,0.0),(20070601,0.95,0.05,0.0),(20070701,0.95,0.05,0.0),(20070801,0.95,0.05,0.0),(20070901,0.95,0.05,0.0),(20071001,0.95,0.05,0.0),(20071101,0.95,0.05,0.0),(20071201,0.95,0.05,0.0),(20080101,0.95,0.05,0.0),(20080201,0.95,0.05,0.0),(20080301,0.95,0.05,0.0),(20080401,0.95,0.05,0.0),(20080501,0.95,0.05,0.0),(20080601,0.95,0.05,0.0),(20080701,0.95,0.05,0.0),(20080801,0.95,0.05,0.0),(20080901,0.95,0.05,0.0),(20081001,0.95,0.05,0.0),(20081101,0.95,0.05,0.0),(20081201,0.95,0.05,0.0),(20090101,0.95,0.05,0.0),(20090201,0.91,0.09,0.0),(20090301,0.91,0.09,0.0),(20090401,0.91,0.09,0.0),(20090501,0.91,0.09,0.0),(20090601,0.91,0.09,0.0),(20090701,0.91,0.09,0.0),(20090801,0.91,0.09,0.0),(20090901,0.91,0.09,0.0),(20091001,0.91,0.09,0.0),(20091101,0.91,0.09,0.0),(20091201,0.91,0.09,0.0),(20100101,0.91,0.09,0.0),(20100201,0.81,0.19,0.0),(20100301,0.81,0.19,0.0),(20100401,0.81,0.19,0.0),(20100501,0.81,0.19,0.0),(20100601,0.81,0.19,0.0),(20100701,0.81,0.19,0.0),(20100801,0.81,0.19,0.0),(20100901,0.81,0.19,0.0),(20101001,0.81,0.19,0.0),(20101101,0.81,0.19,0.0),(20101201,0.81,0.19,0.0),(20110101,0.81,0.19,0.0),(20110201,0.72,0.28,0.0),(20110301,0.72,0.28,0.0),(20110401,0.72,0.28,0.0),(20110501,0.72,0.28,0.0),(20110601,0.72,0.28,0.0),(20110701,0.72,0.28,0.0),(20110801,0.72,0.28,0.0),(20110901,0.72,0.28,0.0),(20111001,0.72,0.28,0.0),(20111101,0.72,0.28,0.0),(20111201,0.72,0.28,0.0),(20120101,0.72,0.28,0.0),(20120201,0.53,0.47,0.0),(20120301,0.53,0.47,0.0),(20120401,0.53,0.47,0.0),(20120501,0.53,0.47,0.0),(20120601,0.53,0.47,0.0),(20120701,0.53,0.47,0.0),(20120801,0.53,0.47,0.0),(20120901,0.53,0.47,0.0),(20121001,0.53,0.47,0.0),(20121101,0.53,0.47,0.0),(20121201,0.53,0.47,0.0),(20130101,0.53,0.47,0.0),(20130201,0.44,0.56,0.0),(20130301,0.44,0.56,0.0),(20130401,0.44,0.56,0.0),(20130501,0.44,0.56,0.0),(20130601,0.44,0.56,0.0),(20130701,0.44,0.56,0.0),(20130801,0.44,0.56,0.0),(20130901,0.44,0.56,0.0),(20131001,0.44,0.56,0.0),(20131101,0.44,0.56,0.0),(20131201,0.44,0.56,0.0),(20140101,0.44,0.56,0.0),(20140201,0.36,0.5,0.14),(20140301,0.36,0.5,0.14),(20140401,0.36,0.5,0.14),(20140501,0.36,0.5,0.14),(20140601,0.36,0.5,0.14),(20140701,0.36,0.5,0.14),(20140801,0.36,0.5,0.14),(20140901,0.36,0.5,0.14),(20141001,0.36,0.5,0.14),(20141101,0.36,0.5,0.14),(20141201,0.36,0.5,0.14),(20150101,0.36,0.5,0.14),(20150201,0.3,0.41,0.29),(20150301,0.3,0.41,0.29),(20150401,0.3,0.41,0.29),(20150501,0.3,0.41,0.29),(20150601,0.3,0.41,0.29),(20150701,0.3,0.41,0.29),(20150801,0.3,0.41,0.29),(20150901,0.3,0.41,0.29),(20151001,0.3,0.41,0.29),(20151101,0.3,0.41,0.29),(20151201,0.3,0.41,0.29),(20160101,0.3,0.41,0.29),(20160201,0.26,0.36,0.38),(20160301,0.26,0.36,0.38),(20160401,0.26,0.36,0.38),(20160501,0.26,0.36,0.38),(20160601,0.26,0.36,0.38),(20160701,0.26,0.36,0.38),(20160801,0.26,0.36,0.38),(20160901,0.26,0.36,0.38),(20161001,0.26,0.36,0.38),(20161101,0.26,0.36,0.38),(20161201,0.26,0.36,0.38),(20170101,0.26,0.36,0.38),(20170201,0.2,0.3,0.5),(20170301,0.2,0.3,0.5),(20170401,0.2,0.3,0.5),(20170501,0.2,0.3,0.5),(20170601,0.2,0.3,0.5),(20170701,0.2,0.3,0.5),(20170801,0.2,0.3,0.5),(20170901,0.2,0.3,0.5),(20171001,0.2,0.3,0.5),(20171101,0.2,0.3,0.5),(20171201,0.2,0.3,0.5),(20180101,0.2,0.3,0.5),(20180201,0.13,0.37,0.5),(20180301,0.13,0.37,0.5),(20180401,0.13,0.37,0.5),(20180501,0.13,0.37,0.5),(20180601,0.13,0.37,0.5),(20180701,0.13,0.37,0.5),(20180801,0.13,0.37,0.5),(20180901,0.13,0.37,0.5),(20181001,0.13,0.37,0.5),(20181101,0.13,0.37,0.5),(20181201,0.13,0.37,0.5),(20190101,0.13,0.37,0.5),(20190201,0.1,0.4,0.5),(20190301,0.1,0.4,0.5),(20190401,0.1,0.4,0.5),(20190501,0.1,0.4,0.5),(20190601,0.1,0.4,0.5),(20190701,0.1,0.4,0.5),(20190801,0.1,0.4,0.5),(20190901,0.1,0.4,0.5),(20191001,0.1,0.4,0.5),(20191101,0.1,0.4,0.5),(20191201,0.1,0.4,0.5),(20200101,0.1,0.4,0.5)]
self.yValueAxis.requiredRange = None
self.yValueAxis.leftAxisPercent = 0
self.yValueAxis.leftAxisOrigShiftMin = 0
self.yValueAxis.leftAxisOrigShiftIPC = 0
self.lines[0].strokeColor = colors.toColor(0x0033cc)
self.lines[1].strokeColor = colors.toColor(0x99c3ff)
self.lines[2].strokeColor = colors.toColor(0xCC0033)
def _maxWidth(T, fontName, fontSize):
'''return max stringWidth for the list of strings T'''
if not isinstance(T,(tuple,list)): T = (T,)
T = [_f for _f in T if _f]
return T and max(list(map(lambda t,sW=stringWidth,fN=fontName, fS=fontSize: sW(t,fN,fS),T))) or 0
class ScatterPlot(LinePlot):
"""A scatter plot widget"""
_attrMap = AttrMap(BASE=LinePlot,
width = AttrMapValue(isNumber, desc="Width of the area inside the axes"),
height = AttrMapValue(isNumber, desc="Height of the area inside the axes"),
outerBorderOn = AttrMapValue(isBoolean, desc="Is there an outer border (continuation of axes)"),
outerBorderColor = AttrMapValue(isColorOrNone, desc="Color of outer border (if any)"),
labelOffset = AttrMapValue(isNumber, desc="Space between label and Axis (or other labels)",advancedUsage=1),
axisTickLengths = AttrMapValue(isNumber, desc="Lenth of the ticks on both axes"),
axisStrokeWidth = AttrMapValue(isNumber, desc="Stroke width for both axes"),
xLabel = AttrMapValue(isString, desc="Label for the whole X-Axis"),
yLabel = AttrMapValue(isString, desc="Label for the whole Y-Axis"),
data = AttrMapValue(isAnything, desc='Data points - a list of x/y tuples.'),
strokeColor = AttrMapValue(isColorOrNone, desc='Color used for border of plot area.'),
fillColor = AttrMapValue(isColorOrNone, desc='Color used for background interior of plot area.'),
leftPadding = AttrMapValue(isNumber, desc='Padding on left of drawing'),
rightPadding = AttrMapValue(isNumber, desc='Padding on right of drawing'),
topPadding = AttrMapValue(isNumber, desc='Padding at top of drawing'),
bottomPadding = AttrMapValue(isNumber, desc='Padding at bottom of drawing'),
)
def __init__(self):
LinePlot.__init__(self)
self.width = 142
self.height = 77
self.outerBorderOn = 1
self.outerBorderColor = colors.black
self.background = None
_labelOffset = 3
_axisTickLengths = 2
_axisStrokeWidth = 0.5
self.yValueAxis.valueMin = None
self.yValueAxis.valueMax = None
self.yValueAxis.valueStep = None
self.yValueAxis.labelTextFormat = '%s'
self.xLabel="X Lable"
self.xValueAxis.labels.fontSize = 6
self.yLabel="Y Lable"
self.yValueAxis.labels.fontSize = 6
self.data =[((0.030, 62.73),
(0.074, 54.363),
(1.216, 17.964)),
((1.360, 11.621),
(1.387, 50.011),
(1.428, 68.953)),
((1.444, 86.888),
(1.754, 35.58),
(1.766, 36.05))]
#values for lineplot
self.joinedLines = 0
self.leftPadding=5
self.rightPadding=10
self.topPadding=5
self.bottomPadding=5
self.x = self.leftPadding+_axisTickLengths+(_labelOffset*2)
self.x=self.x+_maxWidth(str(self.yValueAxis.valueMax), self.yValueAxis.labels.fontName, self.yValueAxis.labels.fontSize)
self.y = self.bottomPadding+_axisTickLengths+_labelOffset+self.xValueAxis.labels.fontSize
self.xValueAxis.labels.dy = -_labelOffset
self.xValueAxis.tickDown = _axisTickLengths
self.xValueAxis.strokeWidth = _axisStrokeWidth
self.xValueAxis.rangeRound='both'
self.yValueAxis.labels.dx = -_labelOffset
self.yValueAxis.tickLeft = _axisTickLengths
self.yValueAxis.strokeWidth = _axisStrokeWidth
self.yValueAxis.rangeRound='both'
self.lineLabelFormat="%.2f"
self.lineLabels.fontSize = 5
self.lineLabels.boxAnchor = 'e'
self.lineLabels.dx = -2
self.lineLabelNudge = 0
self.lines.symbol=makeMarker('FilledCircle',size=3)
self.lines[1].symbol=makeMarker('FilledDiamond',size=3)
self.lines[2].symbol=makeMarker('FilledSquare',size=3)
self.lines[2].strokeColor = colors.green
def _getDrawingDimensions(self):
tx = self.leftPadding+self.yValueAxis.tickLeft+(self.yValueAxis.labels.dx*2)+self.xValueAxis.labels.fontSize
tx=tx+(5*_maxWidth(str(self.yValueAxis.valueMax), self.yValueAxis.labels.fontName, self.yValueAxis.labels.fontSize))
tx=tx+self.width+self.rightPadding
t=('%.2f%%'%self.xValueAxis.valueMax)
tx=tx+(_maxWidth(t, self.yValueAxis.labels.fontName, self.yValueAxis.labels.fontSize))
ty = self.bottomPadding+self.xValueAxis.tickDown+(self.xValueAxis.labels.dy*2)+(self.xValueAxis.labels.fontSize*2)
ty=ty+self.yValueAxis.labels.fontSize+self.height+self.topPadding
#print (tx, ty)
return (tx,ty)
def demo(self,drawing=None):
if not drawing:
tx,ty=self._getDrawingDimensions()
drawing = Drawing(tx,ty)
drawing.add(self.draw())
return drawing
def draw(self):
ascent=getFont(self.xValueAxis.labels.fontName).face.ascent
if ascent==0:
ascent=0.718 # default (from helvetica)
ascent=ascent*self.xValueAxis.labels.fontSize # normalize
#basic LinePlot - does the Axes, Ticks etc
lp = LinePlot.draw(self)
xLabel = self.xLabel
if xLabel: #Overall label for the X-axis
xl=Label()
xl.x = (self.x+self.width)/2.0
xl.y = 0
xl.fontName = self.xValueAxis.labels.fontName
xl.fontSize = self.xValueAxis.labels.fontSize
xl.setText(xLabel)
lp.add(xl)
yLabel = self.yLabel
if yLabel: #Overall label for the Y-axis
yl=Label()
yl.angle = 90
yl.x = 0
yl.y = (self.y+self.height/2.0)
yl.fontName = self.yValueAxis.labels.fontName
yl.fontSize = self.yValueAxis.labels.fontSize
yl.setText(yLabel)
lp.add(yl)
# do a bounding box - in the same style as the axes
if self.outerBorderOn:
lp.add(Rect(self.x, self.y, self.width, self.height,
strokeColor = self.outerBorderColor,
strokeWidth = self.yValueAxis.strokeWidth,
fillColor = None))
lp.shift(self.leftPadding, self.bottomPadding)
return lp
def sample1a():
"A line plot with non-equidistant points in x-axis."
drawing = Drawing(400, 200)
data = [
((1,1), (2,2), (2.5,1), (3,3), (4,5)),
((1,2), (2,3), (2.5,2), (3.5,5), (4,6))
]
lp = LinePlot()
lp.x = 50
lp.y = 50
lp.height = 125
lp.width = 300
lp.data = data
lp.joinedLines = 1
lp.strokeColor = colors.black
lp.lines.symbol = makeMarker('UK_Flag')
lp.lines[0].strokeWidth = 2
lp.lines[1].strokeWidth = 4
lp.xValueAxis.valueMin = 0
lp.xValueAxis.valueMax = 5
lp.xValueAxis.valueStep = 1
lp.yValueAxis.valueMin = 0
lp.yValueAxis.valueMax = 7
lp.yValueAxis.valueStep = 1
drawing.add(lp)
return drawing
def sample1b():
"A line plot with non-equidistant points in x-axis."
drawing = Drawing(400, 200)
data = [
((1,1), (2,2), (2.5,1), (3,3), (4,5)),
((1,2), (2,3), (2.5,2), (3.5,5), (4,6))
]
lp = LinePlot()
lp.x = 50
lp.y = 50
lp.height = 125
lp.width = 300
lp.data = data
lp.joinedLines = 1
lp.lines.symbol = makeMarker('Circle')
lp.lineLabelFormat = '%2.0f'
lp.strokeColor = colors.black
lp.xValueAxis.valueMin = 0
lp.xValueAxis.valueMax = 5
lp.xValueAxis.valueSteps = [1, 2, 2.5, 3, 4, 5]
lp.xValueAxis.labelTextFormat = '%2.1f'
lp.yValueAxis.valueMin = 0
lp.yValueAxis.valueMax = 7
lp.yValueAxis.valueStep = 1
drawing.add(lp)
return drawing
def sample1c():
"A line plot with non-equidistant points in x-axis."
drawing = Drawing(400, 200)
data = [
((1,1), (2,2), (2.5,1), (3,3), (4,5)),
((1,2), (2,3), (2.5,2), (3.5,5), (4,6))
]
lp = LinePlot()
lp.x = 50
lp.y = 50
lp.height = 125
lp.width = 300
lp.data = data
lp.joinedLines = 1
lp.lines[0].symbol = makeMarker('FilledCircle')
lp.lines[1].symbol = makeMarker('Circle')
lp.lineLabelFormat = '%2.0f'
lp.strokeColor = colors.black
lp.xValueAxis.valueMin = 0
lp.xValueAxis.valueMax = 5
lp.xValueAxis.valueSteps = [1, 2, 2.5, 3, 4, 5]
lp.xValueAxis.labelTextFormat = '%2.1f'
lp.yValueAxis.valueMin = 0
lp.yValueAxis.valueMax = 7
lp.yValueAxis.valueSteps = [1, 2, 3, 5, 6]
drawing.add(lp)
return drawing
def preprocessData(series):
"Convert date strings into seconds and multiply values by 100."
return [(str2seconds(x[0]), x[1]*100) for x in series]
def sample2():
"A line plot with non-equidistant points in x-axis."
drawing = Drawing(400, 200)
data = [
(('25/11/1991',1),
('30/11/1991',1.000933333),
('31/12/1991',1.0062),
('31/01/1992',1.0112),
('29/02/1992',1.0158),
('31/03/1992',1.020733333),
('30/04/1992',1.026133333),
('31/05/1992',1.030266667),
('30/06/1992',1.034466667),
('31/07/1992',1.038733333),
('31/08/1992',1.0422),
('30/09/1992',1.045533333),
('31/10/1992',1.049866667),
('30/11/1992',1.054733333),
('31/12/1992',1.061),
),
]
data[0] = preprocessData(data[0])
lp = LinePlot()
lp.x = 50
lp.y = 50
lp.height = 125
lp.width = 300
lp.data = data
lp.joinedLines = 1
lp.lines.symbol = makeMarker('FilledDiamond')
lp.strokeColor = colors.black
start = mktime(mkTimeTuple('25/11/1991'))
t0 = mktime(mkTimeTuple('30/11/1991'))
t1 = mktime(mkTimeTuple('31/12/1991'))
t2 = mktime(mkTimeTuple('31/03/1992'))
t3 = mktime(mkTimeTuple('30/06/1992'))
t4 = mktime(mkTimeTuple('30/09/1992'))
end = mktime(mkTimeTuple('31/12/1992'))
lp.xValueAxis.valueMin = start
lp.xValueAxis.valueMax = end
lp.xValueAxis.valueSteps = [start, t0, t1, t2, t3, t4, end]
lp.xValueAxis.labelTextFormat = seconds2str
lp.xValueAxis.labels[1].dy = -20
lp.xValueAxis.labels[2].dy = -35
lp.yValueAxis.labelTextFormat = '%4.2f'
lp.yValueAxis.valueMin = 100
lp.yValueAxis.valueMax = 110
lp.yValueAxis.valueStep = 2
drawing.add(lp)
return drawing
| agpl-3.0 |
jsteemann/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/distutils/command/build_scripts.py | 50 | 4692 | """distutils.command.build_scripts
Implements the Distutils 'build_scripts' command."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: build_scripts.py 69599 2009-02-13 23:02:44Z tarek.ziade $"
import os, re
from stat import ST_MODE
from distutils import sysconfig
from distutils.core import Command
from distutils.dep_util import newer
from distutils.util import convert_path
from distutils import log
# check if Python is called on the first line with this expression
first_line_re = re.compile('^#!.*python[0-9.]*([ \t].*)?$')
class build_scripts (Command):
description = "\"build\" scripts (copy and fixup #! line)"
user_options = [
('build-dir=', 'd', "directory to \"build\" (copy) to"),
('force', 'f', "forcibly build everything (ignore file timestamps"),
('executable=', 'e', "specify final destination interpreter path"),
]
boolean_options = ['force']
def initialize_options (self):
self.build_dir = None
self.scripts = None
self.force = None
self.executable = None
self.outfiles = None
def finalize_options (self):
self.set_undefined_options('build',
('build_scripts', 'build_dir'),
('force', 'force'),
('executable', 'executable'))
self.scripts = self.distribution.scripts
def get_source_files(self):
return self.scripts
def run (self):
if not self.scripts:
return
self.copy_scripts()
def copy_scripts (self):
"""Copy each script listed in 'self.scripts'; if it's marked as a
Python script in the Unix way (first line matches 'first_line_re',
ie. starts with "\#!" and contains "python"), then adjust the first
line to refer to the current Python interpreter as we copy.
"""
self.mkpath(self.build_dir)
outfiles = []
for script in self.scripts:
adjust = 0
script = convert_path(script)
outfile = os.path.join(self.build_dir, os.path.basename(script))
outfiles.append(outfile)
if not self.force and not newer(script, outfile):
log.debug("not copying %s (up-to-date)", script)
continue
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, "r")
except IOError:
if not self.dry_run:
raise
f = None
else:
first_line = f.readline()
if not first_line:
self.warn("%s is an empty file (skipping)" % script)
continue
match = first_line_re.match(first_line)
if match:
adjust = 1
post_interp = match.group(1) or ''
if adjust:
log.info("copying and adjusting %s -> %s", script,
self.build_dir)
if not self.dry_run:
outf = open(outfile, "w")
if not sysconfig.python_build:
outf.write("#!%s%s\n" %
(self.executable,
post_interp))
else:
outf.write("#!%s%s\n" %
(os.path.join(
sysconfig.get_config_var("BINDIR"),
"python%s%s" % (sysconfig.get_config_var("VERSION"),
sysconfig.get_config_var("EXE"))),
post_interp))
outf.writelines(f.readlines())
outf.close()
if f:
f.close()
else:
if f:
f.close()
self.copy_file(script, outfile)
if os.name == 'posix':
for file in outfiles:
if self.dry_run:
log.info("changing mode of %s", file)
else:
oldmode = os.stat(file)[ST_MODE] & 07777
newmode = (oldmode | 0555) & 07777
if newmode != oldmode:
log.info("changing mode of %s from %o to %o",
file, oldmode, newmode)
os.chmod(file, newmode)
# copy_scripts ()
# class build_scripts
| apache-2.0 |
hcarvalhoalves/duplicity | duplicity/path.py | 2 | 26431 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <ben@emerose.org>
# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Wrapper class around a file like "/usr/bin/env"
This class makes certain file operations more convenient and
associates stat information with filenames
"""
import stat, errno, socket, time, re, gzip
from duplicity import file_naming
from duplicity import globals
from duplicity import gpg
from duplicity import tarfile
from duplicity import util
from duplicity import librsync
from duplicity import log #@UnusedImport
from duplicity import dup_time
from duplicity.lazy import * #@UnusedWildImport
_copy_blocksize = 64 * 1024
_tmp_path_counter = 1
class StatResult:
"""Used to emulate the output of os.stat() and related"""
# st_mode is required by the TarInfo class, but it's unclear how
# to generate it from file permissions.
st_mode = 0
class PathException(Exception):
pass
class ROPath:
"""Read only Path
Objects of this class doesn't represent real files, so they don't
have a name. They are required to be indexed though.
"""
def __init__(self, index, stat = None):
"""ROPath initializer"""
self.opened, self.fileobj = None, None
self.index = index
self.stat, self.type = None, None
self.mode, self.devnums = None, None
def set_from_stat(self):
"""Set the value of self.type, self.mode from self.stat"""
if not self.stat:
self.type = None
st_mode = self.stat.st_mode
if stat.S_ISREG(st_mode):
self.type = "reg"
elif stat.S_ISDIR(st_mode):
self.type = "dir"
elif stat.S_ISLNK(st_mode):
self.type = "sym"
elif stat.S_ISFIFO(st_mode):
self.type = "fifo"
elif stat.S_ISSOCK(st_mode):
raise PathException(self.get_relative_path() +
"is a socket, unsupported by tar")
self.type = "sock"
elif stat.S_ISCHR(st_mode):
self.type = "chr"
elif stat.S_ISBLK(st_mode):
self.type = "blk"
else:
raise PathException("Unknown type")
self.mode = stat.S_IMODE(st_mode)
if self.type in ("chr", "blk"):
self.devnums = (os.major(self.stat.st_rdev),
os.minor(self.stat.st_rdev))
def blank(self):
"""Black out self - set type and stat to None"""
self.type, self.stat = None, None
def exists(self):
"""True if corresponding file exists"""
return self.type
def isreg(self):
"""True if self corresponds to regular file"""
return self.type == "reg"
def isdir(self):
"""True if self is dir"""
return self.type == "dir"
def issym(self):
"""True if self is sym"""
return self.type == "sym"
def isfifo(self):
"""True if self is fifo"""
return self.type == "fifo"
def issock(self):
"""True is self is socket"""
return self.type == "sock"
def isdev(self):
"""True is self is a device file"""
return self.type == "chr" or self.type == "blk"
def getdevloc(self):
"""Return device number path resides on"""
return self.stat.st_dev
def getsize(self):
"""Return length in bytes from stat object"""
return self.stat.st_size
def getmtime(self):
"""Return mod time of path in seconds"""
return int(self.stat.st_mtime)
def get_relative_path(self):
"""Return relative path, created from index"""
if self.index:
return "/".join(self.index)
else:
return "."
def getperms(self):
"""Return permissions mode, owner and group"""
s1 = self.stat
return '%s:%s %o' % (s1.st_uid, s1.st_gid, self.mode)
def open(self, mode):
"""Return fileobj associated with self"""
assert mode == "rb" and self.fileobj and not self.opened, \
"%s %s %s" % (mode, self.fileobj, self.opened)
self.opened = 1
return self.fileobj
def get_data(self):
"""Return contents of associated fileobj in string"""
fin = self.open("rb")
buf = fin.read()
assert not fin.close()
return buf
def setfileobj(self, fileobj):
"""Set file object returned by open()"""
assert not self.fileobj
self.fileobj = fileobj
self.opened = None
def init_from_tarinfo(self, tarinfo):
"""Set data from tarinfo object (part of tarfile module)"""
# Set the typepp
type = tarinfo.type
if type == tarfile.REGTYPE or type == tarfile.AREGTYPE:
self.type = "reg"
elif type == tarfile.LNKTYPE:
raise PathException("Hard links not supported yet")
elif type == tarfile.SYMTYPE:
self.type = "sym"
self.symtext = tarinfo.linkname
elif type == tarfile.CHRTYPE:
self.type = "chr"
self.devnums = (tarinfo.devmajor, tarinfo.devminor)
elif type == tarfile.BLKTYPE:
self.type = "blk"
self.devnums = (tarinfo.devmajor, tarinfo.devminor)
elif type == tarfile.DIRTYPE:
self.type = "dir"
elif type == tarfile.FIFOTYPE:
self.type = "fifo"
else:
raise PathException("Unknown tarinfo type %s" % (type,))
self.mode = tarinfo.mode
self.stat = StatResult()
""" Set user and group id
use numeric id if name lookup fails
OR
--numeric-owner is set
"""
try:
if globals.numeric_owner:
raise KeyError
self.stat.st_uid = tarfile.uname2uid(tarinfo.uname)
except KeyError:
self.stat.st_uid = tarinfo.uid
try:
if globals.numeric_owner:
raise KeyError
self.stat.st_gid = tarfile.gname2gid(tarinfo.gname)
except KeyError:
self.stat.st_gid = tarinfo.gid
self.stat.st_mtime = int(tarinfo.mtime)
if self.stat.st_mtime < 0:
log.Warn(_("Warning: %s has negative mtime, treating as 0.")
% (tarinfo.name,))
self.stat.st_mtime = 0
self.stat.st_size = tarinfo.size
def get_ropath(self):
"""Return ropath copy of self"""
new_ropath = ROPath(self.index, self.stat)
new_ropath.type, new_ropath.mode = self.type, self.mode
if self.issym():
new_ropath.symtext = self.symtext
elif self.isdev():
new_ropath.devnums = self.devnums
if self.exists():
new_ropath.stat = self.stat
return new_ropath
def get_tarinfo(self):
"""Generate a tarfile.TarInfo object based on self
Doesn't set size based on stat, because we may want to replace
data wiht other stream. Size should be set separately by
calling function.
"""
ti = tarfile.TarInfo()
if self.index:
ti.name = "/".join(self.index)
else:
ti.name = "."
if self.isdir():
ti.name += "/" # tar dir naming convention
ti.size = 0
if self.type:
# Lots of this is specific to tarfile.py, hope it doesn't
# change much...
if self.isreg():
ti.type = tarfile.REGTYPE
ti.size = self.stat.st_size
elif self.isdir():
ti.type = tarfile.DIRTYPE
elif self.isfifo():
ti.type = tarfile.FIFOTYPE
elif self.issym():
ti.type = tarfile.SYMTYPE
ti.linkname = self.symtext
elif self.isdev():
if self.type == "chr":
ti.type = tarfile.CHRTYPE
else:
ti.type = tarfile.BLKTYPE
ti.devmajor, ti.devminor = self.devnums
else:
raise PathException("Unrecognized type " + str(self.type))
ti.mode = self.mode
ti.uid, ti.gid = self.stat.st_uid, self.stat.st_gid
if self.stat.st_mtime < 0:
log.Warn(_("Warning: %s has negative mtime, treating as 0.")
% (self.get_relative_path(),))
ti.mtime = 0
else:
ti.mtime = int(self.stat.st_mtime)
try:
ti.uname = tarfile.uid2uname(ti.uid)
except KeyError:
pass
try:
ti.gname = tarfile.gid2gname(ti.gid)
except KeyError:
pass
if ti.type in (tarfile.CHRTYPE, tarfile.BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
ti.devmajor, ti.devminor = self.devnums
else:
# Currently we depend on an uninitiliazed tarinfo file to
# already have appropriate headers. Still, might as well
# make sure mode and size set.
ti.mode, ti.size = 0, 0
return ti
def __eq__(self, other):
"""Used to compare two ROPaths. Doesn't look at fileobjs"""
if not self.type and not other.type:
return 1 # neither exists
if not self.stat and other.stat or not other.stat and self.stat:
return 0
if self.type != other.type:
return 0
if self.isreg() or self.isdir() or self.isfifo():
# Don't compare sizes, because we might be comparing
# signature size to size of file.
if not self.perms_equal(other):
return 0
if int(self.stat.st_mtime) == int(other.stat.st_mtime):
return 1
# Below, treat negative mtimes as equal to 0
return self.stat.st_mtime <= 0 and other.stat.st_mtime <= 0
elif self.issym():
# here only symtext matters
return self.symtext == other.symtext
elif self.isdev():
return self.perms_equal(other) and self.devnums == other.devnums
assert 0
def __ne__(self, other):
return not self.__eq__(other)
def compare_verbose(self, other, include_data = 0):
"""Compare ROPaths like __eq__, but log reason if different
This is placed in a separate function from __eq__ because
__eq__ should be very time sensitive, and logging statements
would slow it down. Used when verifying.
If include_data is true, also read all the data of regular
files and see if they differ.
"""
def log_diff(log_string):
log_str = _("Difference found:") + " " + log_string
log.Notice(log_str % (self.get_relative_path(),))
if not self.type and not other.type:
return 1
if not self.stat and other.stat:
log_diff(_("New file %s"))
return 0
if not other.stat and self.stat:
log_diff(_("File %s is missing"))
return 0
if self.type != other.type:
log_diff(_("File %%s has type %s, expected %s") %
(other.type, self.type))
return 0
if self.isreg() or self.isdir() or self.isfifo():
if not self.perms_equal(other):
log_diff(_("File %%s has permissions %s, expected %s") %
(other.getperms(), self.getperms()))
return 0
if ((int(self.stat.st_mtime) != int(other.stat.st_mtime)) and
(self.stat.st_mtime > 0 or other.stat.st_mtime > 0)):
log_diff(_("File %%s has mtime %s, expected %s") %
(dup_time.timetopretty(int(other.stat.st_mtime)),
dup_time.timetopretty(int(self.stat.st_mtime))))
return 0
if self.isreg() and include_data:
if self.compare_data(other):
return 1
else:
log_diff(_("Data for file %s is different"))
return 0
else:
return 1
elif self.issym():
if self.symtext == other.symtext:
return 1
else:
log_diff(_("Symlink %%s points to %s, expected %s") %
(other.symtext, self.symtext))
return 0
elif self.isdev():
if not self.perms_equal(other):
log_diff(_("File %%s has permissions %s, expected %s") %
(other.getperms(), self.getperms()))
return 0
if self.devnums != other.devnums:
log_diff(_("Device file %%s has numbers %s, expected %s")
% (other.devnums, self.devnums))
return 0
return 1
assert 0
def compare_data(self, other):
"""Compare data from two regular files, return true if same"""
f1 = self.open("rb")
f2 = other.open("rb")
def close():
assert not f1.close()
assert not f2.close()
while 1:
buf1 = f1.read(_copy_blocksize)
buf2 = f2.read(_copy_blocksize)
if buf1 != buf2:
close()
return 0
if not buf1:
close()
return 1
def perms_equal(self, other):
"""True if self and other have same permissions and ownership"""
s1, s2 = self.stat, other.stat
return (self.mode == other.mode and
s1.st_gid == s2.st_gid and s1.st_uid == s2.st_uid)
def copy(self, other):
"""Copy self to other. Also copies data. Other must be Path"""
if self.isreg():
other.writefileobj(self.open("rb"))
elif self.isdir():
os.mkdir(other.name)
elif self.issym():
os.symlink(self.symtext, other.name)
os.lchown(other.name, self.stat.st_uid, self.stat.st_gid)
other.setdata()
return # no need to copy symlink attributes
elif self.isfifo():
os.mkfifo(other.name)
elif self.issock():
socket.socket(socket.AF_UNIX).bind(other.name)
elif self.isdev():
if self.type == "chr":
devtype = "c"
else:
devtype = "b"
other.makedev(devtype, *self.devnums)
self.copy_attribs(other)
def copy_attribs(self, other):
"""Only copy attributes from self to other"""
if isinstance(other, Path):
util.maybe_ignore_errors(lambda: os.chown(other.name, self.stat.st_uid, self.stat.st_gid))
util.maybe_ignore_errors(lambda: os.chmod(other.name, self.mode))
util.maybe_ignore_errors(lambda: os.utime(other.name, (time.time(), self.stat.st_mtime)))
other.setdata()
else:
# write results to fake stat object
assert isinstance(other, ROPath)
stat = StatResult()
stat.st_uid, stat.st_gid = self.stat.st_uid, self.stat.st_gid
stat.st_mtime = int(self.stat.st_mtime)
other.stat = stat
other.mode = self.mode
def __repr__(self):
"""Return string representation"""
return "(%s %s)" % (self.index, self.type)
class Path(ROPath):
"""
Path class - wrapper around ordinary local files
Besides caching stat() results, this class organizes various file
code.
"""
regex_chars_to_quote = re.compile("[\\\\\\\"\\$`]")
def rename_index(self, index):
if not globals.rename or not index:
return index # early exit
path = os.path.normcase(os.path.join(*index))
tail = []
while path and path not in globals.rename:
path, extra = os.path.split(path)
tail.insert(0, extra)
if path:
return globals.rename[path].split(os.sep) + tail
else:
return index # no rename found
def __init__(self, base, index = ()):
"""Path initializer"""
# self.opened should be true if the file has been opened, and
# self.fileobj can override returned fileobj
self.opened, self.fileobj = None, None
self.base = base
self.index = self.rename_index(index)
self.name = os.path.join(base, *self.index)
self.setdata()
def setdata(self):
"""Refresh stat cache"""
try:
self.stat = os.lstat(self.name)
except OSError, e:
err_string = errno.errorcode[e[0]]
if err_string in ["ENOENT", "ENOTDIR", "ELOOP", "ENOTCONN"]:
self.stat, self.type = None, None # file doesn't exist
self.mode = None
else:
raise
else:
self.set_from_stat()
if self.issym():
self.symtext = os.readlink(self.name)
def append(self, ext):
"""Return new Path with ext added to index"""
return self.__class__(self.base, self.index + (ext,))
def new_index(self, index):
"""Return new Path with index index"""
return self.__class__(self.base, index)
def listdir(self):
"""Return list generated by os.listdir"""
return os.listdir(self.name)
def isemptydir(self):
"""Return true if path is a directory and is empty"""
return self.isdir() and not self.listdir()
def open(self, mode = "rb"):
"""
Return fileobj associated with self
Usually this is just the file data on disk, but can be
replaced with arbitrary data using the setfileobj method.
"""
assert not self.opened
if self.fileobj:
result = self.fileobj
else:
result = open(self.name, mode)
return result
def makedev(self, type, major, minor):
"""Make a device file with specified type, major/minor nums"""
cmdlist = ['mknod', self.name, type, str(major), str(minor)]
if os.spawnvp(os.P_WAIT, 'mknod', cmdlist) != 0:
raise PathException("Error running %s" % cmdlist)
self.setdata()
def mkdir(self):
"""Make directory(s) at specified path"""
log.Info(_("Making directory %s") % (self.name,))
try:
os.makedirs(self.name)
except OSError:
if (not globals.force):
raise PathException("Error creating directory %s" % (self.name,), 7)
self.setdata()
def delete(self):
"""Remove this file"""
log.Info(_("Deleting %s") % (self.name,))
if self.isdir():
util.ignore_missing(os.rmdir, self.name)
else:
util.ignore_missing(os.unlink, self.name)
self.setdata()
def touch(self):
"""Open the file, write 0 bytes, close"""
log.Info(_("Touching %s") % (self.name,))
fp = self.open("wb")
fp.close()
def deltree(self):
"""Remove self by recursively deleting files under it"""
from duplicity import selection # todo: avoid circ. dep. issue
log.Info(_("Deleting tree %s") % (self.name,))
itr = IterTreeReducer(PathDeleter, [])
for path in selection.Select(self).set_iter():
itr(path.index, path)
itr.Finish()
self.setdata()
def get_parent_dir(self):
"""Return directory that self is in"""
if self.index:
return Path(self.base, self.index[:-1])
else:
components = self.base.split("/")
if len(components) == 2 and not components[0]:
return Path("/") # already in root directory
else:
return Path("/".join(components[:-1]))
def writefileobj(self, fin):
"""Copy file object fin to self. Close both when done."""
fout = self.open("wb")
while 1:
buf = fin.read(_copy_blocksize)
if not buf:
break
fout.write(buf)
if fin.close() or fout.close():
raise PathException("Error closing file object")
self.setdata()
def rename(self, new_path):
"""Rename file at current path to new_path."""
os.rename(self.name, new_path.name)
self.setdata()
new_path.setdata()
def move(self, new_path):
"""Like rename but destination may be on different file system"""
self.copy(new_path)
self.delete()
def chmod(self, mode):
"""Change permissions of the path"""
os.chmod(self.name, mode)
self.setdata()
def patch_with_attribs(self, diff_ropath):
"""Patch self with diff and then copy attributes over"""
assert self.isreg() and diff_ropath.isreg()
temp_path = self.get_temp_in_same_dir()
patch_fileobj = librsync.PatchedFile(self.open("rb"),
diff_ropath.open("rb"))
temp_path.writefileobj(patch_fileobj)
diff_ropath.copy_attribs(temp_path)
temp_path.rename(self)
def get_temp_in_same_dir(self):
"""Return temp non existent path in same directory as self"""
global _tmp_path_counter
parent_dir = self.get_parent_dir()
while 1:
temp_path = parent_dir.append("duplicity_temp." +
str(_tmp_path_counter))
if not temp_path.type:
return temp_path
_tmp_path_counter += 1
assert _tmp_path_counter < 10000, \
"Warning too many temp files created for " + self.name
def compare_recursive(self, other, verbose = None):
"""Compare self to other Path, descending down directories"""
from duplicity import selection # todo: avoid circ. dep. issue
selfsel = selection.Select(self).set_iter()
othersel = selection.Select(other).set_iter()
return Iter.equal(selfsel, othersel, verbose)
def __repr__(self):
"""Return string representation"""
return "(%s %s %s)" % (self.index, self.name, self.type)
def quote(self, s = None):
"""
Return quoted version of s (defaults to self.name)
The output is meant to be interpreted with shells, so can be
used with os.system.
"""
if not s:
s = self.name
return '"%s"' % self.regex_chars_to_quote.sub(lambda m: "\\"+m.group(0), s)
def unquote(self, s):
"""Return unquoted version of string s, as quoted by above quote()"""
assert s[0] == s[-1] == "\"" # string must be quoted by above
result = ""; i = 1
while i < len(s)-1:
if s[i] == "\\":
result += s[i+1]
i += 2
else:
result += s[i]
i += 1
return result
def get_filename(self):
"""Return filename of last component"""
components = self.name.split("/")
assert components and components[-1]
return components[-1]
def get_canonical(self):
"""
Return string of canonical version of path
Remove ".", and trailing slashes where possible. Note that
it's harder to remove "..", as "foo/bar/.." is not necessarily
"foo", so we can't use path.normpath()
"""
newpath = "/".join(filter(lambda x: x and x != ".",
self.name.split("/")))
if self.name[0] == "/":
return "/" + newpath
elif newpath:
return newpath
else:
return "."
class DupPath(Path):
"""
Represent duplicity data files
Based on the file name, files that are compressed or encrypted
will have different open() methods.
"""
def __init__(self, base, index = (), parseresults = None):
"""
DupPath initializer
The actual filename (no directory) must be the single element
of the index, unless parseresults is given.
"""
if parseresults:
self.pr = parseresults
else:
assert len(index) == 1
self.pr = file_naming.parse(index[0])
assert self.pr, "must be a recognizable duplicity file"
Path.__init__(self, base, index)
def filtered_open(self, mode = "rb", gpg_profile = None):
"""
Return fileobj with appropriate encryption/compression
If encryption is specified but no gpg_profile, use
globals.default_profile.
"""
assert not self.opened and not self.fileobj
assert not (self.pr.encrypted and self.pr.compressed)
if gpg_profile:
assert self.pr.encrypted
if self.pr.compressed:
return gzip.GzipFile(self.name, mode)
elif self.pr.encrypted:
if not gpg_profile:
gpg_profile = globals.gpg_profile
if mode == "rb":
return gpg.GPGFile(False, self, gpg_profile)
elif mode == "wb":
return gpg.GPGFile(True, self, gpg_profile)
else:
return self.open(mode)
class PathDeleter(ITRBranch):
"""Delete a directory. Called by Path.deltree"""
def start_process(self, index, path):
self.path = path
def end_process(self):
self.path.delete()
def can_fast_process(self, index, path):
return not path.isdir()
def fast_process(self, index, path):
path.delete()
| gpl-2.0 |
blueyed/coveragepy | tests/test_templite.py | 1 | 10970 | # coding: utf-8
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for coverage.templite."""
import re
from coverage.templite import Templite, TempliteSyntaxError, TempliteValueError
from tests.coveragetest import CoverageTest
# pylint: disable=unused-variable
class AnyOldObject(object):
"""Simple testing object.
Use keyword arguments in the constructor to set attributes on the object.
"""
def __init__(self, **attrs):
for n, v in attrs.items():
setattr(self, n, v)
class TempliteTest(CoverageTest):
"""Tests for Templite."""
run_in_temp_dir = False
def try_render(self, text, ctx=None, result=None):
"""Render `text` through `ctx`, and it had better be `result`.
Result defaults to None so we can shorten the calls where we expect
an exception and never get to the result comparison.
"""
actual = Templite(text).render(ctx or {})
# If result is None, then an exception should have prevented us getting
# to here.
assert result is not None
self.assertEqual(actual, result)
def assertSynErr(self, msg):
"""Assert that a `TempliteSyntaxError` will happen.
A context manager, and the message should be `msg`.
"""
pat = "^" + re.escape(msg) + "$"
return self.assertRaisesRegex(TempliteSyntaxError, pat)
def test_passthrough(self):
# Strings without variables are passed through unchanged.
self.assertEqual(Templite("Hello").render(), "Hello")
self.assertEqual(
Templite("Hello, 20% fun time!").render(),
"Hello, 20% fun time!"
)
def test_variables(self):
# Variables use {{var}} syntax.
self.try_render("Hello, {{name}}!", {'name':'Ned'}, "Hello, Ned!")
def test_undefined_variables(self):
# Using undefined names is an error.
with self.assertRaises(Exception):
self.try_render("Hi, {{name}}!")
def test_pipes(self):
# Variables can be filtered with pipes.
data = {
'name': 'Ned',
'upper': lambda x: x.upper(),
'second': lambda x: x[1],
}
self.try_render("Hello, {{name|upper}}!", data, "Hello, NED!")
# Pipes can be concatenated.
self.try_render("Hello, {{name|upper|second}}!", data, "Hello, E!")
def test_reusability(self):
# A single Templite can be used more than once with different data.
globs = {
'upper': lambda x: x.upper(),
'punct': '!',
}
template = Templite("This is {{name|upper}}{{punct}}", globs)
self.assertEqual(template.render({'name':'Ned'}), "This is NED!")
self.assertEqual(template.render({'name':'Ben'}), "This is BEN!")
def test_attribute(self):
# Variables' attributes can be accessed with dots.
obj = AnyOldObject(a="Ay")
self.try_render("{{obj.a}}", locals(), "Ay")
obj2 = AnyOldObject(obj=obj, b="Bee")
self.try_render("{{obj2.obj.a}} {{obj2.b}}", locals(), "Ay Bee")
def test_member_function(self):
# Variables' member functions can be used, as long as they are nullary.
class WithMemberFns(AnyOldObject):
"""A class to try out member function access."""
def ditto(self):
"""Return twice the .txt attribute."""
return self.txt + self.txt
obj = WithMemberFns(txt="Once")
self.try_render("{{obj.ditto}}", locals(), "OnceOnce")
def test_item_access(self):
# Variables' items can be used.
d = {'a':17, 'b':23}
self.try_render("{{d.a}} < {{d.b}}", locals(), "17 < 23")
def test_loops(self):
# Loops work like in Django.
nums = [1,2,3,4]
self.try_render(
"Look: {% for n in nums %}{{n}}, {% endfor %}done.",
locals(),
"Look: 1, 2, 3, 4, done."
)
# Loop iterables can be filtered.
def rev(l):
"""Return the reverse of `l`."""
l = l[:]
l.reverse()
return l
self.try_render(
"Look: {% for n in nums|rev %}{{n}}, {% endfor %}done.",
locals(),
"Look: 4, 3, 2, 1, done."
)
def test_empty_loops(self):
self.try_render(
"Empty: {% for n in nums %}{{n}}, {% endfor %}done.",
{'nums':[]},
"Empty: done."
)
def test_multiline_loops(self):
self.try_render(
"Look: \n{% for n in nums %}\n{{n}}, \n{% endfor %}done.",
{'nums':[1,2,3]},
"Look: \n\n1, \n\n2, \n\n3, \ndone."
)
def test_multiple_loops(self):
self.try_render(
"{% for n in nums %}{{n}}{% endfor %} and "
"{% for n in nums %}{{n}}{% endfor %}",
{'nums': [1,2,3]},
"123 and 123"
)
def test_comments(self):
# Single-line comments work:
self.try_render(
"Hello, {# Name goes here: #}{{name}}!",
{'name':'Ned'}, "Hello, Ned!"
)
# and so do multi-line comments:
self.try_render(
"Hello, {# Name\ngoes\nhere: #}{{name}}!",
{'name':'Ned'}, "Hello, Ned!"
)
def test_if(self):
self.try_render(
"Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!",
{'ned': 1, 'ben': 0},
"Hi, NED!"
)
self.try_render(
"Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!",
{'ned': 0, 'ben': 1},
"Hi, BEN!"
)
self.try_render(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 0, 'ben': 0},
"Hi, !"
)
self.try_render(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 1, 'ben': 0},
"Hi, NED!"
)
self.try_render(
"Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!",
{'ned': 1, 'ben': 1},
"Hi, NEDBEN!"
)
def test_complex_if(self):
class Complex(AnyOldObject):
"""A class to try out complex data access."""
def getit(self):
"""Return it."""
return self.it
obj = Complex(it={'x':"Hello", 'y': 0})
self.try_render(
"@"
"{% if obj.getit.x %}X{% endif %}"
"{% if obj.getit.y %}Y{% endif %}"
"{% if obj.getit.y|str %}S{% endif %}"
"!",
{ 'obj': obj, 'str': str },
"@XS!"
)
def test_loop_if(self):
self.try_render(
"@{% for n in nums %}{% if n %}Z{% endif %}{{n}}{% endfor %}!",
{'nums': [0,1,2]},
"@0Z1Z2!"
)
self.try_render(
"X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!",
{'nums': [0,1,2]},
"X@012!"
)
self.try_render(
"X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!",
{'nums': []},
"X!"
)
def test_nested_loops(self):
self.try_render(
"@"
"{% for n in nums %}"
"{% for a in abc %}{{a}}{{n}}{% endfor %}"
"{% endfor %}"
"!",
{'nums': [0,1,2], 'abc': ['a', 'b', 'c']},
"@a0b0c0a1b1c1a2b2c2!"
)
def test_whitespace_handling(self):
self.try_render(
"@{% for n in nums %}\n"
" {% for a in abc %}{{a}}{{n}}{% endfor %}\n"
"{% endfor %}!\n",
{'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']},
"@\n a0b0c0\n\n a1b1c1\n\n a2b2c2\n!\n"
)
self.try_render(
"@{% for n in nums -%}\n"
" {% for a in abc -%}\n"
" {# this disappears completely -#}\n"
" {{a -}}\n"
" {{n -}}\n"
" {% endfor %}\n"
"{% endfor %}!\n",
{'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']},
"@a0b0c0\na1b1c1\na2b2c2\n!\n"
)
def test_non_ascii(self):
self.try_render(
u"{{where}} ollǝɥ",
{ 'where': u'ǝɹǝɥʇ' },
u"ǝɹǝɥʇ ollǝɥ"
)
def test_exception_during_evaluation(self):
# TypeError: Couldn't evaluate {{ foo.bar.baz }}:
msg = "Couldn't evaluate None.bar"
with self.assertRaisesRegex(TempliteValueError, msg):
self.try_render(
"Hey {{foo.bar.baz}} there", {'foo': None}, "Hey ??? there"
)
def test_bad_names(self):
with self.assertSynErr("Not a valid name: 'var%&!@'"):
self.try_render("Wat: {{ var%&!@ }}")
with self.assertSynErr("Not a valid name: 'filter%&!@'"):
self.try_render("Wat: {{ foo|filter%&!@ }}")
with self.assertSynErr("Not a valid name: '@'"):
self.try_render("Wat: {% for @ in x %}{% endfor %}")
def test_bogus_tag_syntax(self):
with self.assertSynErr("Don't understand tag: 'bogus'"):
self.try_render("Huh: {% bogus %}!!{% endbogus %}??")
def test_malformed_if(self):
with self.assertSynErr("Don't understand if: '{% if %}'"):
self.try_render("Buh? {% if %}hi!{% endif %}")
with self.assertSynErr("Don't understand if: '{% if this or that %}'"):
self.try_render("Buh? {% if this or that %}hi!{% endif %}")
def test_malformed_for(self):
with self.assertSynErr("Don't understand for: '{% for %}'"):
self.try_render("Weird: {% for %}loop{% endfor %}")
with self.assertSynErr("Don't understand for: '{% for x from y %}'"):
self.try_render("Weird: {% for x from y %}loop{% endfor %}")
with self.assertSynErr("Don't understand for: '{% for x, y in z %}'"):
self.try_render("Weird: {% for x, y in z %}loop{% endfor %}")
def test_bad_nesting(self):
with self.assertSynErr("Unmatched action tag: 'if'"):
self.try_render("{% if x %}X")
with self.assertSynErr("Mismatched end tag: 'for'"):
self.try_render("{% if x %}X{% endfor %}")
with self.assertSynErr("Too many ends: '{% endif %}'"):
self.try_render("{% if x %}{% endif %}{% endif %}")
def test_malformed_end(self):
with self.assertSynErr("Don't understand end: '{% end if %}'"):
self.try_render("{% if x %}X{% end if %}")
with self.assertSynErr("Don't understand end: '{% endif now %}'"):
self.try_render("{% if x %}X{% endif now %}")
| apache-2.0 |
siliconsmiley/QGIS | python/plugins/fTools/tools/doSubsetSelect.py | 11 | 4818 | # -*- coding: utf-8 -*-
#-----------------------------------------------------------
#
# fTools
# Copyright (C) 2008-2011 Carson Farmer
# EMAIL: carson.farmer (at) gmail.com
# WEB : http://www.ftools.ca/fTools.html
#
# A collection of data management and analysis tools for vector data
#
#-----------------------------------------------------------
#
# licensed under the terms of GNU GPL 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#---------------------------------------------------------------------
from PyQt4.QtCore import SIGNAL, QObject
from PyQt4.QtGui import QDialog, QDialogButtonBox, QMessageBox
from qgis.core import QGis, QgsFeature
import random
import ftools_utils
from ui_frmSubsetSelect import Ui_Dialog
class Dialog(QDialog, Ui_Dialog):
def __init__(self, iface):
QDialog.__init__(self, iface.mainWindow())
self.iface = iface
# Set up the user interface from Designer.
self.setupUi(self)
QObject.connect(self.inShape, SIGNAL("currentIndexChanged(QString)"), self.update)
self.setWindowTitle(self.tr("Random selection within subsets"))
self.buttonOk = self.buttonBox_2.button( QDialogButtonBox.Ok )
# populate layer list
self.progressBar.setValue(0)
layers = ftools_utils.getLayerNames([QGis.Point, QGis.Line, QGis.Polygon])
self.inShape.addItems(layers)
def update(self, inputLayer):
self.inField.clear()
changedLayer = ftools_utils.getVectorLayerByName(inputLayer)
changedField = ftools_utils.getFieldList(changedLayer)
for f in changedField:
self.inField.addItem(unicode(f.name()))
maxFeatures = changedLayer.dataProvider().featureCount()
self.spnNumber.setMaximum( maxFeatures )
def accept(self):
self.buttonOk.setEnabled( False )
if self.inShape.currentText() == "":
QMessageBox.information(self, self.tr("Random selection within subsets"), self.tr("Please specify input vector layer"))
elif self.inField.currentText() == "":
QMessageBox.information(self, self.tr("Random selection within subsets"), self.tr("Please specify an input field"))
else:
inVect = self.inShape.currentText()
uidField = self.inField.currentText()
if self.rdoNumber.isChecked():
value = self.spnNumber.value()
perc = False
else:
value = self.spnPercent.value()
perc = True
self.compute(inVect, uidField, value, perc, self.progressBar)
self.progressBar.setValue(100)
self.progressBar.setValue(0)
self.buttonOk.setEnabled( True )
def compute(self, inVect, inField, value, perc, progressBar):
mlayer = ftools_utils.getMapLayerByName(inVect)
mlayer.removeSelection()
vlayer = ftools_utils.getVectorLayerByName(inVect)
vprovider = vlayer.dataProvider()
index = vprovider.fieldNameIndex(inField)
unique = ftools_utils.getUniqueValues(vprovider, int(index))
inFeat = QgsFeature()
selran = []
nFeat = vprovider.featureCount() * len(unique)
nElement = 0
self.progressBar.setValue(0)
self.progressBar.setRange(0, nFeat)
if not len(unique) == mlayer.featureCount():
for i in unique:
fit = vprovider.getFeatures()
FIDs= []
while fit.nextFeature(inFeat):
atMap = inFeat.attributes()
if atMap[index] == i:
FID = inFeat.id()
FIDs.append(FID)
nElement += 1
self.progressBar.setValue(nElement)
if perc: selVal = int(round((value / 100.0000) * len(FIDs), 0))
else: selVal = value
if selVal >= len(FIDs): selFeat = FIDs
else: selFeat = random.sample(FIDs, selVal)
selran.extend(selFeat)
mlayer.setSelectedFeatures(selran)
else:
mlayer.setSelectedFeatures(range(0, mlayer.featureCount()))
| gpl-2.0 |
ktnyt/chainer | tests/chainer_tests/datasets_tests/test_concatenated_dataset.py | 13 | 1854 | import numpy as np
import six
import unittest
from chainer.datasets import ConcatenatedDataset
from chainer import testing
@testing.parameterize(
# basic usage
{'datasets': (
np.random.uniform(size=(5, 3, 48, 32)),
np.random.uniform(size=(15, 3, 64, 48)),
)},
# more than two datasets
{'datasets': (
np.random.uniform(size=(5, 3, 48, 32)),
np.random.uniform(size=(15, 3, 16, 48)),
np.random.uniform(size=(20, 3, 5, 5)),
)},
# single dataset
{'datasets': (
np.random.uniform(size=(5, 3, 48, 32)),
)},
# no dataset
{'datasets': ()},
# some datasets are empty
{'datasets': (
np.random.uniform(size=(5, 3, 48, 32)),
[],
np.random.uniform(size=(20, 3, 5, 5)),
[],
)},
# all datasets are empty
{'datasets': ([], [], [])},
)
class TestConcatenatedDataset(unittest.TestCase):
def setUp(self):
self.concatenated_dataset = ConcatenatedDataset(*self.datasets)
self.expected_dataset = [
sample for dataset in self.datasets for sample in dataset]
def test_concatenated_dataset(self):
self.assertEqual(
len(self.concatenated_dataset), len(self.expected_dataset))
for i, expected in enumerate(self.expected_dataset):
np.testing.assert_equal(self.concatenated_dataset[i], expected)
def test_concatenated_dataset_slice(self):
concatenated_slice = self.concatenated_dataset[1:8:2]
expected_slice = self.concatenated_dataset[1:8:2]
self.assertEqual(
len(concatenated_slice), len(expected_slice))
for concatenated, expected in six.moves.zip(
concatenated_slice, expected_slice):
np.testing.assert_equal(concatenated, expected)
testing.run_module(__name__, __file__)
| mit |
kivra/slackbot | tests/functional/test_functional.py | 13 | 5260 | #!/usr/bin/env python
"""This function tests would start a slackbot, and use slack web api to drive
the tests agains the bot.
"""
import os
import subprocess
import pytest
from os.path import join, abspath, dirname, basename
from tests.functional.driver import Driver
from tests.functional.settings import (
testbot_apitoken, testbot_username,
driver_apitoken, driver_username, test_channel, test_group
)
TRAVIS = 'TRAVIS' in os.environ
def stop_proxy():
os.system('slackbot-test-ctl stopproxy')
def start_proxy():
os.system('slackbot-test-ctl startproxy')
def _start_bot_process():
args = [
'python',
'run.py',
]
if TRAVIS:
args = ['slackbot-test-ctl', 'run'] + args
env = dict(os.environ)
env['SLACKBOT_API_TOKEN'] = testbot_apitoken
env['SLACKBOT_TEST'] = '1'
return subprocess.Popen(args, env=env)
@pytest.yield_fixture(scope='module') # pylint: disable=E1101
def driver():
driver = Driver(driver_apitoken,
driver_username,
testbot_username,
test_channel,
test_group)
driver.start()
p = _start_bot_process()
driver.wait_for_bot_online()
yield driver
p.terminate()
@pytest.fixture(autouse=True) # pylint: disable=E1101
def clear_events(driver):
driver.clear_events()
def test_bot_get_online(driver): # pylint: disable=W0613
pass
def test_bot_respond_to_simple_message(driver):
driver.send_direct_message('hello')
driver.wait_for_bot_direct_message('hello sender!')
def test_bot_respond_to_simple_message_with_formatting(driver):
driver.send_direct_message('hello_formatting')
driver.wait_for_bot_direct_message('_hello_ sender!')
def test_bot_respond_to_simple_message_case_insensitive(driver):
driver.send_direct_message('hEllO')
driver.wait_for_bot_direct_message('hello sender!')
def test_bot_respond_to_simple_message_multiple_plugins(driver):
driver.send_direct_message('hello_formatting hello')
driver.wait_for_bot_direct_messages({'hello sender!', '_hello_ sender!'})
def test_bot_direct_message_with_at_prefix(driver):
driver.send_direct_message('hello', tobot=True)
driver.wait_for_bot_direct_message('hello sender!')
driver.send_direct_message('hello', tobot=True, colon=False)
driver.wait_for_bot_direct_message('hello sender!')
def test_bot_default_reply(driver):
driver.send_direct_message('youdontunderstandthiscommand do you')
driver.wait_for_bot_direct_message('.*You can ask me.*')
def test_bot_upload_file(driver):
png = join(abspath(dirname(__file__)), 'slack.png')
driver.send_direct_message('upload %s' % png)
driver.wait_for_bot_direct_message('uploading slack.png')
driver.wait_for_file_uploaded('slack.png')
def test_bot_upload_file_from_link(driver):
url = 'https://slack.com/favicon.ico'
fname = basename(url)
driver.send_direct_message('upload %s' % url)
driver.wait_for_bot_direct_message('uploading %s' % fname)
def test_bot_reply_to_channel_message(driver):
driver.send_channel_message('hello')
driver.wait_for_bot_channel_message('hello sender!')
driver.send_channel_message('hello', colon=False)
driver.wait_for_bot_channel_message('hello sender!')
def test_bot_listen_to_channel_message(driver):
driver.send_channel_message('hello', tobot=False)
driver.wait_for_bot_channel_message('hello channel!', tosender=False)
def test_bot_reply_to_group_message(driver):
driver.send_group_message('hello')
driver.wait_for_bot_group_message('hello sender!')
driver.send_group_message('hello', colon=False)
driver.wait_for_bot_group_message('hello sender!')
def test_bot_ignores_non_related_message_response_tosender(driver):
driver.send_channel_message('hello', tobot=True)
driver.ensure_only_specificmessage_from_bot('hello sender!', tosender=True)
def test_bot_ignores_non_related_message_response_tochannel(driver):
driver.send_channel_message('hello', tobot=False)
driver.ensure_only_specificmessage_from_bot('hello channel!', tosender=False)
def test_bot_ignores_unknown_message_noresponse_tochannel(driver):
driver.send_channel_message('unknown message', tobot=False)
driver.ensure_no_channel_reply_from_bot()
def test_bot_send_usage_unknown_message_response_tosender(driver):
driver.send_channel_message('unknown message', tobot=True)
driver.ensure_only_specificmessage_from_bot('Bad command "unknown message".+', tosender=False)
def test_bot_reply_to_message_multiple_decorators(driver):
driver.send_channel_message('hello_decorators')
driver.wait_for_bot_channel_message('hello!', tosender=False)
driver.send_channel_message('hello_decorators', tobot=False)
driver.wait_for_bot_channel_message('hello!', tosender=False)
driver.send_direct_message('hello_decorators')
driver.wait_for_bot_direct_message('hello!')
@pytest.mark.skipif(not TRAVIS, reason="only run reconnect tests on travis builds") # pylint: disable=E1101
def test_bot_reconnect(driver):
driver.wait_for_bot_online()
stop_proxy()
driver.wait_for_bot_offline()
start_proxy()
driver.wait_for_bot_online()
test_bot_respond_to_simple_message(driver)
| mit |
AOKP/kernel_sony_apq8064 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
kerr-huang/SL4A | python/src/Lib/test/test_filecmp.py | 77 | 5389 |
import os, filecmp, shutil, tempfile, shutil
import unittest
from test import test_support
class FileCompareTestCase(unittest.TestCase):
def setUp(self):
self.name = test_support.TESTFN
self.name_same = test_support.TESTFN + '-same'
self.name_diff = test_support.TESTFN + '-diff'
data = 'Contents of file go here.\n'
for name in [self.name, self.name_same, self.name_diff]:
output = open(name, 'w')
output.write(data)
output.close()
output = open(self.name_diff, 'a+')
output.write('An extra line.\n')
output.close()
self.dir = tempfile.gettempdir()
def tearDown(self):
os.unlink(self.name)
os.unlink(self.name_same)
os.unlink(self.name_diff)
def test_matching(self):
self.failUnless(filecmp.cmp(self.name, self.name_same),
"Comparing file to itself fails")
self.failUnless(filecmp.cmp(self.name, self.name_same, shallow=False),
"Comparing file to itself fails")
self.failUnless(filecmp.cmp(self.name, self.name, shallow=False),
"Comparing file to identical file fails")
self.failUnless(filecmp.cmp(self.name, self.name),
"Comparing file to identical file fails")
def test_different(self):
self.failIf(filecmp.cmp(self.name, self.name_diff),
"Mismatched files compare as equal")
self.failIf(filecmp.cmp(self.name, self.dir),
"File and directory compare as equal")
class DirCompareTestCase(unittest.TestCase):
def setUp(self):
tmpdir = tempfile.gettempdir()
self.dir = os.path.join(tmpdir, 'dir')
self.dir_same = os.path.join(tmpdir, 'dir-same')
self.dir_diff = os.path.join(tmpdir, 'dir-diff')
self.caseinsensitive = os.path.normcase('A') == os.path.normcase('a')
data = 'Contents of file go here.\n'
for dir in [self.dir, self.dir_same, self.dir_diff]:
shutil.rmtree(dir, True)
os.mkdir(dir)
if self.caseinsensitive and dir is self.dir_same:
fn = 'FiLe' # Verify case-insensitive comparison
else:
fn = 'file'
output = open(os.path.join(dir, fn), 'w')
output.write(data)
output.close()
output = open(os.path.join(self.dir_diff, 'file2'), 'w')
output.write('An extra file.\n')
output.close()
def tearDown(self):
shutil.rmtree(self.dir)
shutil.rmtree(self.dir_same)
shutil.rmtree(self.dir_diff)
def test_cmpfiles(self):
self.failUnless(filecmp.cmpfiles(self.dir, self.dir, ['file']) ==
(['file'], [], []),
"Comparing directory to itself fails")
self.failUnless(filecmp.cmpfiles(self.dir, self.dir_same, ['file']) ==
(['file'], [], []),
"Comparing directory to same fails")
# Try it with shallow=False
self.failUnless(filecmp.cmpfiles(self.dir, self.dir, ['file'],
shallow=False) ==
(['file'], [], []),
"Comparing directory to itself fails")
self.failUnless(filecmp.cmpfiles(self.dir, self.dir_same, ['file'],
shallow=False),
"Comparing directory to same fails")
# Add different file2
output = open(os.path.join(self.dir, 'file2'), 'w')
output.write('Different contents.\n')
output.close()
self.failIf(filecmp.cmpfiles(self.dir, self.dir_same,
['file', 'file2']) ==
(['file'], ['file2'], []),
"Comparing mismatched directories fails")
def test_dircmp(self):
# Check attributes for comparison of two identical directories
d = filecmp.dircmp(self.dir, self.dir_same)
if self.caseinsensitive:
self.assertEqual([d.left_list, d.right_list],[['file'], ['FiLe']])
else:
self.assertEqual([d.left_list, d.right_list],[['file'], ['file']])
self.failUnless(d.common == ['file'])
self.failUnless(d.left_only == d.right_only == [])
self.failUnless(d.same_files == ['file'])
self.failUnless(d.diff_files == [])
# Check attributes for comparison of two different directories
d = filecmp.dircmp(self.dir, self.dir_diff)
self.failUnless(d.left_list == ['file'])
self.failUnless(d.right_list == ['file', 'file2'])
self.failUnless(d.common == ['file'])
self.failUnless(d.left_only == [])
self.failUnless(d.right_only == ['file2'])
self.failUnless(d.same_files == ['file'])
self.failUnless(d.diff_files == [])
# Add different file2
output = open(os.path.join(self.dir, 'file2'), 'w')
output.write('Different contents.\n')
output.close()
d = filecmp.dircmp(self.dir, self.dir_diff)
self.failUnless(d.same_files == ['file'])
self.failUnless(d.diff_files == ['file2'])
def test_main():
test_support.run_unittest(FileCompareTestCase, DirCompareTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
agentr13/python-phonenumbers | python/phonenumbers/pb2/__init__.py | 5 | 2788 | """Translate python-phonenumbers PhoneNumber to/from protobuf PhoneNumber
Examples of use:
>>> import phonenumbers
>>> from phonenumbers.pb2 import phonenumber_pb2, PBToPy, PyToPB
>>> x_py = phonenumbers.PhoneNumber(country_code=44, national_number=7912345678)
>>> print x_py
Country Code: 44 National Number: 7912345678
>>> y_pb = phonenumber_pb2.PhoneNumber()
>>> y_pb.country_code = 44
>>> y_pb.national_number = 7912345678
>>> print str(y_pb).strip()
country_code: 44
national_number: 7912345678
>>> # Check italian_leading_zero default value when not set
>>> y_pb.italian_leading_zero
False
>>> y_py = PBToPy(y_pb)
>>> print y_py
Country Code: 44 National Number: 7912345678
>>> x_pb = PyToPB(x_py)
>>> print str(x_pb).strip()
country_code: 44
national_number: 7912345678
>>> x_py == y_py
True
>>> x_pb == y_pb
True
>>> # Explicitly set the field to its default
>>> y_pb.italian_leading_zero = y_pb.italian_leading_zero
>>> x_pb == y_pb
False
"""
from phonenumber_pb2 import PhoneNumber as PhoneNumberPB
from phonenumbers import PhoneNumber
def PBToPy(numpb):
"""Convert phonenumber_pb2.PhoneNumber to phonenumber.PhoneNumber"""
return PhoneNumber(numpb.country_code if numpb.HasField("country_code") else None,
numpb.national_number if numpb.HasField("national_number") else None,
numpb.extension if numpb.HasField("extension") else None,
numpb.italian_leading_zero if numpb.HasField("italian_leading_zero") else None,
numpb.raw_input if numpb.HasField("raw_input") else None,
numpb.country_code_source if numpb.HasField("country_code_source") else None,
numpb.preferred_domestic_carrier_code if numpb.HasField("preferred_domestic_carrier_code") else None)
def PyToPB(numobj):
"""Convert phonenumber.PhoneNumber to phonenumber_pb2.PhoneNumber"""
numpb = PhoneNumberPB()
if numobj.country_code is not None:
numpb.country_code = numobj.country_code
if numobj.national_number is not None:
numpb.national_number = numobj.national_number
if numobj.extension is not None:
numpb.extension = numobj.extension
if numobj.italian_leading_zero is not None:
numpb.italian_leading_zero = numobj.italian_leading_zero
if numobj.raw_input is not None:
numpb.raw_input = numobj.raw_input
if numobj.country_code_source is not None:
numpb.country_code_source = numobj.country_code_source
if numobj.preferred_domestic_carrier_code is not None:
numpb.preferred_domestic_carrier_code = numobj.preferred_domestic_carrier_code
return numpb
__all__ = ['PBToPy', 'PyToPB']
if __name__ == '__main__': # pragma no cover
import doctest
doctest.testmod()
| apache-2.0 |
andgoldschmidt/iEBE | EBE-Node/iSS/for_paraview/lib/RectilinearGrid.py | 9 | 2857 | #!/usr/bin/env python
"""
RectilinearGrid
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.3 $
$Date: 2001-05-31 17:48:54 $
Pearu Peterson
"""
import DataSet
import common
class RectilinearGrid(DataSet.DataSet):
"""
Usage:
RectilinearGrid(x = <sequence>, y = <sequence>, z = <sequence>)
Attributes:
x
y
z
dimensions
Public methods:
get_size()
get_cell_size()
to_string(format = 'ascii')
get_points()
<DataSetAttr class>(...)
"""
def __init__(self,x=None,y=None,z=None):
self.x = self.get_seq(x,[0])
self.y = self.get_seq(y,[0])
self.z = self.get_seq(z,[0])
self.dimensions = (len(self.x),len(self.y),len(self.z))
if self._check_dimensions():
raise ValueError,'dimensions must be 3-tuple of ints >=1'
def to_string(self, format='ascii'):
tx = self.get_datatype(self.x)
ty = self.get_datatype(self.y)
tz = self.get_datatype(self.z)
ret = ['DATASET RECTILINEAR_GRID',
'DIMENSIONS %s %s %s'%self.dimensions,
'X_COORDINATES %s %s'%(len(self.x),tx),
self.seq_to_string(self.x,format,tx),
'Y_COORDINATES %s %s'%(len(self.y),ty),
self.seq_to_string(self.y,format,ty),
'Z_COORDINATES %s %s'%(len(self.z),tz),
self.seq_to_string(self.z,format,tz)]
return '\n'.join(ret)
def get_points(self):
if hasattr(self,'points'):
return self.points
arr = [(x,y,z) for z in self.z for y in self.y for x in self.x]
self.points = arr
return arr
def rectilinear_grid_fromfile(f,self):
l = common._getline(f).split(' ')
assert l[0].strip().lower() == 'dimensions'
dims = map(eval,l[1:])
assert len(dims)==3
for c in 'xyz':
l = common._getline(f)
k,n,datatype = [s.strip().lower() for s in l.split(' ')]
if k!=c+'_coordinates':
raise ValueError, 'expected %s_coordinates but got %s'%(c,`k`)
n = eval(n)
assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],`datatype`
points = []
while len(points) < n:
points += map(eval,common._getline(f).split(' '))
assert len(points)==n
exec '%s_coords = points'%c
assert map(len,[x_coords,y_coords,z_coords]) == dims
return RectilinearGrid(x_coords,y_coords,z_coords),common._getline(f)
if __name__ == "__main__":
print RectilinearGrid([1,2,2,4,4,5.4])
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.