repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
phil65/KodiDevKit | script.py | 1 | 3245 | # -*- coding: utf8 -*-
# Copyright (C) 2017 - Philipp Temminghoff <phil65@kodi.tv>
# This program is Free Software see LICENSE file for details
import os
import sys
import codecs
import logging
RESULTS_FILE = "results.txt"
settings = {"kodi_path": "C:/Kodi",
"portable_mode": True,
"language_folders": ["resource.language.en_gb", "English"]}
def check_tags(check_type):
"""
triggers of test of type "check_type", then formats and logs them
"""
errors = INFOS.get_check_listitems(check_type)
for e in errors:
logging.info(e["message"])
path = "/".join(e["file"].split(os.sep)[-2:])
logging.info("%s: line %s\n" % (path, e["line"]))
if __name__ == "__main__":
from libs import utils
from libs.infoprovider import InfoProvider
from libs import chardet
from libs.eol import eol
INFOS = InfoProvider()
open(RESULTS_FILE, 'w').close()
INFOS.load_settings(settings)
INFOS.load_data()
filehandler = logging.FileHandler("result.txt", mode="w")
formatter = logging.Formatter('%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
filehandler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(filehandler)
project_folder = sys.argv[1] if len(sys.argv) >= 2 else input("Enter Path to skin: ")
INFOS.init_addon(project_folder)
if len(sys.argv) < 3:
repo = input('Enter Kodi version (%s): ' % " / ".join([item["name"] for item in INFOS.addon.RELEASES]))
else:
repo = sys.argv[2]
INFOS.check_xml_files()
for path in INFOS.addon.get_xml_files():
if utils.check_bom(path):
logging.info("found BOM. File: " + path)
try:
with codecs.open(path, "rb", encoding='utf-8', errors="strict") as f:
text = f.read()
except Exception:
logging.info("Error when trying to read %s as UTF-8" % path)
with codecs.open(path, "rb", errors="ignore") as f:
rawdata = f.read()
encoding = chardet.detect(rawdata)
logging.info("detected encoding: %s" % encoding["encoding"])
with codecs.open(path, "rb", encoding=encoding["encoding"]) as f:
text = f.read()
result = eol.eol_info_from_path_patterns([project_folder],
recursive=True,
includes=[],
excludes=['.svn', '.git'])
for item in result:
if item[1] == '\n' or None:
continue
elif item[1] == '\r':
logging.info("MAC Line Endings detected in " + item[0])
else:
logging.info("Windows Line Endings detected in " + item[0])
logging.info("ADDON DEPENDENCY CHECK")
INFOS.check_dependencies()
logging.info("INCLUDE CHECK")
check_tags("include")
logging.info("VARIABLE CHECK")
check_tags("variable")
logging.info("FONT CHECK")
check_tags("font")
logging.info("LABEL CHECK")
check_tags("label")
logging.info("ID CHECK")
check_tags("id")
logging.info("CHECK FOR COMMON MISTAKES")
check_tags("general")
| gpl-3.0 |
hehongliang/tensorflow | tensorflow/contrib/seq2seq/python/ops/basic_decoder.py | 33 | 5371 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class of Decoders that may sample to generate the next input.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.util import nest
__all__ = [
"BasicDecoderOutput",
"BasicDecoder",
]
class BasicDecoderOutput(
collections.namedtuple("BasicDecoderOutput", ("rnn_output", "sample_id"))):
pass
class BasicDecoder(decoder.Decoder):
"""Basic sampling decoder."""
def __init__(self, cell, helper, initial_state, output_layer=None):
"""Initialize BasicDecoder.
Args:
cell: An `RNNCell` instance.
helper: A `Helper` instance.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
The initial state of the RNNCell.
output_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output prior
to storing the result or sampling.
Raises:
TypeError: if `cell`, `helper` or `output_layer` have an incorrect type.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell)
if not isinstance(helper, helper_py.Helper):
raise TypeError("helper must be a Helper, received: %s" % type(helper))
if (output_layer is not None
and not isinstance(output_layer, layers_base.Layer)):
raise TypeError(
"output_layer must be a Layer, received: %s" % type(output_layer))
self._cell = cell
self._helper = helper
self._initial_state = initial_state
self._output_layer = output_layer
@property
def batch_size(self):
return self._helper.batch_size
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer.compute_output_shape(
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def output_size(self):
# Return the cell output and the id
return BasicDecoderOutput(
rnn_output=self._rnn_output_size(),
sample_id=self._helper.sample_ids_shape)
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and the sample_ids_dtype from the helper.
dtype = nest.flatten(self._initial_state)[0].dtype
return BasicDecoderOutput(
nest.map_structure(lambda _: dtype, self._rnn_output_size()),
self._helper.sample_ids_dtype)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, first_inputs, initial_state)`.
"""
return self._helper.initialize() + (self._initial_state,)
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
with ops.name_scope(name, "BasicDecoderStep", (time, inputs, state)):
cell_outputs, cell_state = self._cell(inputs, state)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
sample_ids = self._helper.sample(
time=time, outputs=cell_outputs, state=cell_state)
(finished, next_inputs, next_state) = self._helper.next_inputs(
time=time,
outputs=cell_outputs,
state=cell_state,
sample_ids=sample_ids)
outputs = BasicDecoderOutput(cell_outputs, sample_ids)
return (outputs, next_state, next_inputs, finished)
| apache-2.0 |
TheWardoctor/Wardoctors-repo | script.module.exodus/lib/resources/lib/sources/en/tvbox.py | 5 | 4717 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json,base64,hashlib,time
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['tvbox.ag']
self.base_link = 'https://tvbox.ag'
self.search_link_tv = 'https://tvbox.ag/tvshows'
self.search_link_movie = 'https://tvbox.ag/movies'
def movie(self, imdb, title, localtitle, aliases, year):
try:
result = client.request(self.search_link_movie)
m = client.parseDOM(result, 'div', attrs={'class': 'masonry'})[0]
m = dom_parser.parse_dom(m, 'a', req='href')
m = [(i.attrs['href'], i.content) for i in m]
m = [(urlparse.urljoin(self.base_link,i[0]), i[1]) for i in m if
cleantitle.get(title) == cleantitle.get(i[1])]
url = m[0][0]
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
result = client.request(self.search_link_tv)
m = client.parseDOM(result, 'div', attrs={'class': 'masonry'})[0]
m = dom_parser.parse_dom(m, 'a', req='href')
m = [(i.attrs['href'], i.content) for i in m]
m = [(urlparse.urljoin(self.base_link, i[0]), i[1]) for i in m if
cleantitle.get(tvshowtitle) == cleantitle.get(i[1])]
url = m[0][0]
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.urljoin(self.base_link, url)
for i in range(3):
result = client.request(url, timeout=10)
if not result == None: break
title = cleantitle.get(title)
premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(premiered)[0]
premiered = '%s/%s/%s' % (premiered[2], premiered[1], premiered[0])
result = re.findall(r'<h\d>Season\s+%s<\/h\d>(.*?<\/table>)' % season, result)[0]
result = dom_parser.parse_dom(result, 'a', attrs={'href': re.compile('.*?episode-%s/' % episode)}, req='href')[0]
url = result.attrs['href']
url = url.encode('utf-8')
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
for i in range(3):
result = client.request(url)
if not result == None: break
links = re.compile('onclick="report\(\'([^\']+)').findall(result)
for link in links:
try:
valid, hoster = source_utils.is_host_valid(link, hostDict)
if not valid: continue
urls, host, direct = source_utils.check_directstreams(link, hoster)
for x in urls:
# if x['quality'] == 'SD':
# try:
# result = client.request(x['url'], timeout=5)
# if 'HDTV' in result or '720' in result: x['quality'] = 'HD'
# if '1080' in result: x['quality'] = '1080p'
# except:
# pass
sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
| apache-2.0 |
ran5515/DeepDecision | tensorflow/contrib/timeseries/python/timeseries/estimators.py | 8 | 19232 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimators for time series models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.python.timeseries import ar_model
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries import model_utils
from tensorflow.contrib.timeseries.python.timeseries import state_management
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import state_space_model
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import structural_ensemble
from tensorflow.contrib.timeseries.python.timeseries.state_space_models.filtering_postprocessor import StateInterpolatingAnomalyDetector
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.estimator.export import export_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.training import training as train
class _TimeSeriesRegressor(estimator_lib.Estimator):
"""An Estimator to fit and evaluate a time series model."""
def __init__(self, model, state_manager=None, optimizer=None, model_dir=None,
config=None):
"""Initialize the Estimator.
Args:
model: The time series model to wrap (inheriting from TimeSeriesModel).
state_manager: The state manager to use, or (by default)
PassthroughStateManager if none is needed.
optimizer: The optimization algorithm to use when training, inheriting
from tf.train.Optimizer. Defaults to Adam with step size 0.02.
model_dir: See `Estimator`.
config: See `Estimator`.
"""
input_statistics_generator = math_utils.InputStatisticsFromMiniBatch(
dtype=model.dtype, num_features=model.num_features)
if state_manager is None:
state_manager = state_management.PassthroughStateManager()
if optimizer is None:
optimizer = train.AdamOptimizer(0.02)
self._model = model
model_fn = model_utils.make_model_fn(
model, state_manager, optimizer,
input_statistics_generator=input_statistics_generator)
super(_TimeSeriesRegressor, self).__init__(
model_fn=model_fn,
model_dir=model_dir,
config=config)
# TODO(allenl): A parsing input receiver function, which takes a serialized
# tf.Example containing all features (times, values, any exogenous features)
# and serialized model state (possibly also as a tf.Example).
def build_raw_serving_input_receiver_fn(self,
exogenous_features=None,
default_batch_size=None,
default_series_length=None):
"""Build an input_receiver_fn for export_savedmodel which accepts arrays.
Args:
exogenous_features: A dictionary mapping feature keys to exogenous
features (either Numpy arrays or Tensors). Used to determine the shapes
of placeholders for these features.
default_batch_size: If specified, must be a scalar integer. Sets the batch
size in the static shape information of all feature Tensors, which means
only this batch size will be accepted by the exported model. If None
(default), static shape information for batch sizes is omitted.
default_series_length: If specified, must be a scalar integer. Sets the
series length in the static shape information of all feature Tensors,
which means only this series length will be accepted by the exported
model. If None (default), static shape information for series length is
omitted.
Returns:
An input_receiver_fn which may be passed to the Estimator's
export_savedmodel.
"""
if exogenous_features is None:
exogenous_features = {}
def _serving_input_receiver_fn():
"""A receiver function to be passed to export_savedmodel."""
placeholders = {}
placeholders[feature_keys.TrainEvalFeatures.TIMES] = (
array_ops.placeholder(
name=feature_keys.TrainEvalFeatures.TIMES,
dtype=dtypes.int64,
shape=[default_batch_size, default_series_length]))
# Values are only necessary when filtering. For prediction the default
# value will be ignored.
placeholders[feature_keys.TrainEvalFeatures.VALUES] = (
array_ops.placeholder_with_default(
name=feature_keys.TrainEvalFeatures.VALUES,
input=array_ops.zeros(
shape=[
default_batch_size
if default_batch_size else 0, default_series_length
if default_series_length else 0, self._model.num_features
],
dtype=self._model.dtype),
shape=(default_batch_size, default_series_length,
self._model.num_features)))
for feature_key, feature_value in exogenous_features.items():
value_tensor = ops.convert_to_tensor(feature_value)
value_tensor.get_shape().with_rank_at_least(2)
feature_shape = value_tensor.get_shape().as_list()
feature_shape[0] = default_batch_size
feature_shape[1] = default_series_length
placeholders[feature_key] = array_ops.placeholder(
dtype=value_tensor.dtype, name=feature_key, shape=feature_shape)
# Models may not know the shape of their state without creating some
# variables/ops. Avoid polluting the default graph by making a new one. We
# use only static metadata from the returned Tensors.
with ops.Graph().as_default():
self._model.initialize_graph()
model_start_state = self._model.get_start_state()
for prefixed_state_name, state_tensor in model_utils.state_to_dictionary(
model_start_state).items():
state_shape_with_batch = tensor_shape.TensorShape(
(default_batch_size,)).concatenate(state_tensor.get_shape())
placeholders[prefixed_state_name] = array_ops.placeholder(
name=prefixed_state_name,
shape=state_shape_with_batch,
dtype=state_tensor.dtype)
return export_lib.ServingInputReceiver(placeholders, placeholders)
return _serving_input_receiver_fn
class ARRegressor(_TimeSeriesRegressor):
"""An Estimator for an (optionally non-linear) autoregressive model.
ARRegressor is a window-based model, inputting fixed windows of length
`input_window_size` and outputting fixed windows of length
`output_window_size`. These two parameters must add up to the window_size
passed to the `Chunker` used to create an `input_fn` for training or
evaluation. `RandomWindowInputFn` is suggested for both training and
evaluation, although it may be seeded for deterministic evaluation.
"""
def __init__(
self, periodicities, input_window_size, output_window_size,
num_features, num_time_buckets=10,
loss=ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS, hidden_layer_sizes=None,
anomaly_prior_probability=None, anomaly_distribution=None,
optimizer=None, model_dir=None, config=None):
"""Initialize the Estimator.
Args:
periodicities: periodicities of the input data, in the same units as the
time feature. Note this can be a single value or a list of values for
multiple periodicities.
input_window_size: Number of past time steps of data to look at when doing
the regression.
output_window_size: Number of future time steps to predict. Note that
setting it to > 1 empirically seems to give a better fit.
num_features: The dimensionality of the time series (one for univariate,
more than one for multivariate).
num_time_buckets: Number of buckets into which to divide (time %
periodicity) for generating time based features.
loss: Loss function to use for training. Currently supported values are
SQUARED_LOSS and NORMAL_LIKELIHOOD_LOSS. Note that for
NORMAL_LIKELIHOOD_LOSS, we train the covariance term as well. For
SQUARED_LOSS, the evaluation loss is reported based on un-scaled
observations and predictions, while the training loss is computed on
normalized data.
hidden_layer_sizes: list of sizes of hidden layers.
anomaly_prior_probability: If specified, constructs a mixture model under
which anomalies (modeled with `anomaly_distribution`) have this prior
probability. See `AnomalyMixtureARModel`.
anomaly_distribution: May not be specified unless
anomaly_prior_probability is specified and is not None. Controls the
distribution of anomalies under the mixture model. Currently either
`ar_model.AnomalyMixtureARModel.GAUSSIAN_ANOMALY` or
`ar_model.AnomalyMixtureARModel.CAUCHY_ANOMALY`. See
`AnomalyMixtureARModel`. Defaults to `GAUSSIAN_ANOMALY`.
optimizer: The optimization algorithm to use when training, inheriting
from tf.train.Optimizer. Defaults to Adagrad with step size 0.1.
model_dir: See `Estimator`.
config: See `Estimator`.
Raises:
ValueError: For invalid combinations of arguments.
"""
if optimizer is None:
optimizer = train.AdagradOptimizer(0.1)
if anomaly_prior_probability is None and anomaly_distribution is not None:
raise ValueError("anomaly_prior_probability is required if "
"anomaly_distribution is specified.")
if anomaly_prior_probability is None:
if anomaly_distribution is None:
anomaly_distribution = ar_model.AnomalyMixtureARModel.GAUSSIAN_ANOMALY
model = ar_model.ARModel(
periodicities=periodicities, num_features=num_features,
num_time_buckets=num_time_buckets,
input_window_size=input_window_size,
output_window_size=output_window_size, loss=loss,
hidden_layer_sizes=hidden_layer_sizes)
else:
if loss != ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS:
raise ValueError(
"AnomalyMixtureARModel only supports "
"ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS for its loss argument.")
model = ar_model.AnomalyMixtureARModel(
periodicities=periodicities,
input_window_size=input_window_size,
output_window_size=output_window_size,
num_features=num_features,
num_time_buckets=num_time_buckets,
hidden_layer_sizes=hidden_layer_sizes,
anomaly_prior_probability=anomaly_prior_probability,
anomaly_distribution=anomaly_distribution)
state_manager = state_management.FilteringOnlyStateManager()
super(ARRegressor, self).__init__(
model=model,
state_manager=state_manager,
optimizer=optimizer,
model_dir=model_dir,
config=config)
class StateSpaceRegressor(_TimeSeriesRegressor):
"""An Estimator for general state space models."""
def __init__(self, model, state_manager=None, optimizer=None, model_dir=None,
config=None):
"""See _TimeSeriesRegressor. Uses the ChainingStateManager by default."""
if not isinstance(model, state_space_model.StateSpaceModel):
raise ValueError(
"StateSpaceRegressor only supports state space models (children of "
"StateSpaceModel) in its `model` argument, got {}.".format(model))
if state_manager is None:
state_manager = state_management.ChainingStateManager()
super(StateSpaceRegressor, self).__init__(
model=model,
state_manager=state_manager,
optimizer=optimizer,
model_dir=model_dir,
config=config)
class StructuralEnsembleRegressor(StateSpaceRegressor):
"""An Estimator for structural time series models.
"Structural" refers to the fact that this model explicitly accounts for
structure in the data, such as periodicity and trends.
`StructuralEnsembleRegressor` is a state space model. It contains components
for modeling level, local linear trends, periodicity, and mean-reverting
transients via a moving average component. Multivariate series are fit with
full covariance matrices for observation and latent state transition noise,
each feature of the multivariate series having its own latent components.
Note that unlike `ARRegressor`, `StructuralEnsembleRegressor` is sequential,
and so accepts variable window sizes with the same model.
For training, `RandomWindowInputFn` is recommended as an `input_fn`. Model
state is managed through `ChainingStateManager`: since state space models are
inherently sequential, we save state from previous iterations to get
approximate/eventual consistency while achieving good performance through
batched computation.
For evaluation, either pass a significant chunk of the series in a single
window (e.g. set `window_size` to the whole series with
`WholeDatasetInputFn`), or use enough random evaluation iterations to cover
several passes through the whole dataset. Either method will ensure that stale
saved state has been flushed.
"""
def __init__(self,
periodicities,
num_features,
cycle_num_latent_values=11,
moving_average_order=4,
autoregressive_order=0,
exogenous_feature_columns=None,
exogenous_update_condition=None,
dtype=dtypes.float64,
anomaly_prior_probability=None,
optimizer=None,
model_dir=None,
config=None):
"""Initialize the Estimator.
Args:
periodicities: The expected periodicity of the data (for example 24 if
feeding hourly data with a daily periodicity, or 60 * 24 if feeding
minute-level data with daily periodicity). Either a scalar or a
list. This parameter can be any real value, and does not control the
size of the model. However, increasing this without increasing
`num_values_per_cycle` will lead to smoother periodic behavior, as the
same number of distinct values will be cycled through over a longer
period of time.
num_features: The dimensionality of the time series (one for univariate,
more than one for multivariate).
cycle_num_latent_values: Along with `moving_average_order` and
`num_features`, controls the latent state size of the model. Square
matrices of size `num_features * (moving_average_order +
cycle_num_latent_values + 3)` are created and multiplied, so larger
values may be slow. The trade-off is with resolution: cycling between
a smaller number of latent values means that only smoother functions
can be modeled.
moving_average_order: Controls model size (along with
`cycle_num_latent_values` and `autoregressive_order`) and the number
of steps before transient deviations revert to the mean defined by the
period and level/trend components.
autoregressive_order: Each contribution from this component is a linear
combination of this many previous contributions. Also helps to
determine the model size. Learning autoregressive coefficients
typically requires more steps and a smaller step size than other
components.
exogenous_feature_columns: A list of tf.contrib.layers.FeatureColumn
objects (for example tf.contrib.layers.embedding_column) corresponding
to exogenous features which provide extra information to the model but
are not part of the series to be predicted. Passed to
tf.contrib.layers.input_from_feature_columns.
exogenous_update_condition: A function taking two Tensor arguments,
`times` (shape [batch size]) and `features` (a dictionary mapping
exogenous feature keys to Tensors with shapes [batch size, ...]), and
returning a boolean Tensor with shape [batch size] indicating whether
state should be updated using exogenous features for each part of the
batch. Where it is False, no exogenous update is performed. If None
(default), exogenous updates are always performed. Useful for avoiding
"leaky" frequent exogenous updates when sparse updates are
desired. Called only during graph construction. See the "known
anomaly" example for example usage.
dtype: The floating point data type to compute with. float32 may be
faster, but can be problematic for larger models and longer time series.
anomaly_prior_probability: If not None, the model attempts to
automatically detect and ignore anomalies during training. This
parameter then controls the prior probability of an anomaly. Values
closer to 0 mean that points will be discarded less frequently. The
default value (None) means that anomalies are not discarded, which may
be slightly faster.
optimizer: The optimization algorithm to use when training, inheriting
from tf.train.Optimizer. Defaults to Adam with step size 0.02.
model_dir: See `Estimator`.
config: See `Estimator`.
"""
if anomaly_prior_probability is not None:
filtering_postprocessor = StateInterpolatingAnomalyDetector(
anomaly_prior_probability=anomaly_prior_probability)
else:
filtering_postprocessor = None
state_space_model_configuration = (
state_space_model.StateSpaceModelConfiguration(
num_features=num_features,
dtype=dtype,
filtering_postprocessor=filtering_postprocessor,
exogenous_feature_columns=exogenous_feature_columns,
exogenous_update_condition=exogenous_update_condition))
model = structural_ensemble.MultiResolutionStructuralEnsemble(
cycle_num_latent_values=cycle_num_latent_values,
moving_average_order=moving_average_order,
autoregressive_order=autoregressive_order,
periodicities=periodicities,
configuration=state_space_model_configuration)
super(StructuralEnsembleRegressor, self).__init__(
model=model,
optimizer=optimizer,
model_dir=model_dir,
config=config)
| apache-2.0 |
baolocdo/python | node_modules/meanio/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/ninja_syntax.py | 2485 | 5536 | # This file comes from
# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py
# Do not edit! Edit the upstream one instead.
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import textwrap
import re
def escape_path(word):
return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def pool(self, name, depth):
self._line('pool %s' % name)
self.variable('depth', depth, indent=1)
def rule(self, name, command, description=None, depfile=None,
generator=False, pool=None, restat=False, rspfile=None,
rspfile_content=None, deps=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if pool:
self.variable('pool', pool, indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
if deps:
self.variable('deps', deps, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None):
outputs = self._as_list(outputs)
all_inputs = self._as_list(inputs)[:]
out_outputs = list(map(escape_path, outputs))
all_inputs = list(map(escape_path, all_inputs))
if implicit:
implicit = map(escape_path, self._as_list(implicit))
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = map(escape_path, self._as_list(order_only))
all_inputs.append('||')
all_inputs.extend(order_only)
self._line('build %s: %s' % (' '.join(out_outputs),
' '.join([rule] + all_inputs)))
if variables:
if isinstance(variables, dict):
iterator = iter(variables.items())
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(self._as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(leading_space) + len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def _as_list(self, input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')
| mit |
codenote/chromium-test | chrome/test/functional/chromeos_longterm_test.py | 79 | 4331 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
import pyauto_functional
import pyauto
import pyauto_utils
import timer_queue
class ChromeOSLongTerm(pyauto.PyUITest):
"""Set of long running tests for ChromeOS.
This class is comprised of several tests that perform long term tests.
"""
def _ActivateTabWithURL(self, url):
"""Activates the window that has the given tab url.
Args:
url: The url of the tab to find.
Returns:
An array of the index values of the tab and window. Returns None if the
tab connot be found.
"""
info = self.GetBrowserInfo()
windows = info['windows']
for window_index, window in enumerate(windows):
tabs = window['tabs']
for tab_index, tab in enumerate(tabs):
tab['url'] = tab['url'].strip('/')
if tab['url'] == url:
self.ActivateTab(tab_index, window_index)
return [tab_index, window_index]
return None
def _SetupLongTermWindow(self, long_term_pages):
"""Appends a list of tab to the current active window.
Args:
long_term_pages: The list of urls to open.
"""
for url in long_term_pages:
self.AppendTab(pyauto.GURL(url))
def _RefreshLongTermWindow(self, long_term_pages):
""" Refreshes all of the tabs from the given list.
Args:
long_term_pages: The list of urls to refresh.
"""
for page in long_term_pages:
long_index = self._ActivateTabWithURL(page)
if not long_index:
logging.info('Unable to find page with url: %s.')
else:
self.ActivateTab(long_index[0], long_index[1])
self.ReloadActiveTab(long_index[1])
def _ConfigureNewWindow(self, pages, incognito=False):
"""Setups a windows with multiple tabs running.
This method acts as a state machine. If a window containing a tab with the
url of the first item of pages it closes that window. If that window
cannot be found then a new window with the urls in pages is opened.
Args:
pages: The list of urls to load.
"""
page_index = self._ActivateTabWithURL(pages[0])
if not page_index:
# This means the pages do not exist, load them
if incognito:
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
else:
self.OpenNewBrowserWindow(True)
for url in pages:
self.AppendTab(pyauto.GURL(url), self.GetBrowserWindowCount() - 1)
# Cycle through the pages to make sure they render
win = self.GetBrowserInfo()['windows'][self.GetBrowserWindowCount() - 1]
for tab in win['tabs']:
self.ActivateTab(tab['index'], self.GetBrowserWindowCount() - 1)
# Give the plugin time to activate
time.sleep(1.5)
else:
self.CloseBrowserWindow(page_index[1])
def testLongTerm(self):
"""Main entry point for the long term tests.
This method will spin in a while loop forever until it encounters a keyboard
interrupt. Other worker methods will be managed by the TimerQueue.
"""
long_term_pages = ['http://news.google.com', 'http://www.engadget.com',
'http://www.washingtonpost.com']
flash_pages = [
'http://www.craftymind.com/factory/guimark2/FlashChartingTest.swf',
'http://www.craftymind.com/factory/guimark2/FlashGamingTest.swf',
'http://www.craftymind.com/factory/guimark2/FlashTextTest.swf']
incognito_pages = ['http://www.msn.com', 'http://www.ebay.com',
'http://www.bu.edu', 'http://www.youtube.com']
start_time = time.time()
self._SetupLongTermWindow(long_term_pages)
timers = timer_queue.TimerQueue()
timers.AddTimer(self._ConfigureNewWindow, 90, args=(flash_pages,))
timers.AddTimer(self._RefreshLongTermWindow, 30, args=(long_term_pages,))
timers.AddTimer(self._ConfigureNewWindow, 15, args=(incognito_pages, True))
timers.start()
try:
while True:
if not timers.is_alive():
logging.error('Timer queue died, shutting down.')
return
time.sleep(1)
except KeyboardInterrupt:
# Kill the timers
timers.Stop()
if __name__ == '__main__':
pyauto_functional.Main()
| bsd-3-clause |
x2Ident/x2Ident | mitmproxy/mitmproxy/models/http.py | 2 | 8213 | from __future__ import absolute_import, print_function, division
import cgi
import warnings
from mitmproxy.models.flow import Flow
from netlib import version
from netlib.http import Headers
from netlib.http import Request
from netlib.http import Response
from netlib.http import status_codes
from netlib.tcp import Address
class MessageMixin(object):
def get_decoded_content(self):
"""
Returns the decoded content based on the current Content-Encoding
header.
Doesn't change the message iteself or its headers.
"""
warnings.warn(".get_decoded_content() is deprecated, please use .content directly instead.", DeprecationWarning)
return self.content
class HTTPRequest(MessageMixin, Request):
"""
A mitmproxy HTTP request.
This is a very thin wrapper on top of :py:class:`netlib.http.Request` and
may be removed in the future.
"""
def __init__(
self,
first_line_format,
method,
scheme,
host,
port,
path,
http_version,
headers,
content,
timestamp_start=None,
timestamp_end=None,
is_replay=False,
stickycookie=False,
stickyauth=False,
):
Request.__init__(
self,
first_line_format,
method,
scheme,
host,
port,
path,
http_version,
headers,
content,
timestamp_start,
timestamp_end,
)
# Have this request's cookies been modified by sticky cookies or auth?
self.stickycookie = stickycookie
self.stickyauth = stickyauth
# Is this request replayed?
self.is_replay = is_replay
def get_state(self):
state = super(HTTPRequest, self).get_state()
state.update(
stickycookie=self.stickycookie,
stickyauth=self.stickyauth,
is_replay=self.is_replay,
)
return state
def set_state(self, state):
self.stickycookie = state.pop("stickycookie")
self.stickyauth = state.pop("stickyauth")
self.is_replay = state.pop("is_replay")
super(HTTPRequest, self).set_state(state)
@classmethod
def wrap(self, request):
"""
Wraps an existing :py:class:`netlib.http.Request`.
"""
req = HTTPRequest(
first_line_format=request.data.first_line_format,
method=request.data.method,
scheme=request.data.scheme,
host=request.data.host,
port=request.data.port,
path=request.data.path,
http_version=request.data.http_version,
headers=request.data.headers,
content=request.data.content,
timestamp_start=request.data.timestamp_start,
timestamp_end=request.data.timestamp_end,
)
return req
def __hash__(self):
return id(self)
class HTTPResponse(MessageMixin, Response):
"""
A mitmproxy HTTP response.
This is a very thin wrapper on top of :py:class:`netlib.http.Response` and
may be removed in the future.
"""
def __init__(
self,
http_version,
status_code,
reason,
headers,
content,
timestamp_start=None,
timestamp_end=None,
is_replay=False
):
Response.__init__(
self,
http_version,
status_code,
reason,
headers,
content,
timestamp_start=timestamp_start,
timestamp_end=timestamp_end,
)
# Is this request replayed?
self.is_replay = is_replay
self.stream = False
@classmethod
def wrap(self, response):
"""
Wraps an existing :py:class:`netlib.http.Response`.
"""
resp = HTTPResponse(
http_version=response.data.http_version,
status_code=response.data.status_code,
reason=response.data.reason,
headers=response.data.headers,
content=response.data.content,
timestamp_start=response.data.timestamp_start,
timestamp_end=response.data.timestamp_end,
)
return resp
class HTTPFlow(Flow):
"""
A HTTPFlow is a collection of objects representing a single HTTP
transaction.
Attributes:
request: :py:class:`HTTPRequest` object
response: :py:class:`HTTPResponse` object
error: :py:class:`Error` object
server_conn: :py:class:`ServerConnection` object
client_conn: :py:class:`ClientConnection` object
intercepted: Is this flow currently being intercepted?
live: Does this flow have a live client connection?
Note that it's possible for a Flow to have both a response and an error
object. This might happen, for instance, when a response was received
from the server, but there was an error sending it back to the client.
"""
def __init__(self, client_conn, server_conn, live=None):
super(HTTPFlow, self).__init__("http", client_conn, server_conn, live)
self.request = None
"""@type: HTTPRequest"""
self.response = None
"""@type: HTTPResponse"""
_stateobject_attributes = Flow._stateobject_attributes.copy()
_stateobject_attributes.update(
request=HTTPRequest,
response=HTTPResponse
)
def __repr__(self):
s = "<HTTPFlow"
for a in ("request", "response", "error", "client_conn", "server_conn"):
if getattr(self, a, False):
s += "\r\n %s = {flow.%s}" % (a, a)
s += ">"
return s.format(flow=self)
def copy(self):
f = super(HTTPFlow, self).copy()
if self.request:
f.request = self.request.copy()
if self.response:
f.response = self.response.copy()
return f
def replace(self, pattern, repl, *args, **kwargs):
"""
Replaces a regular expression pattern with repl in both request and
response of the flow. Encoded content will be decoded before
replacement, and re-encoded afterwards.
Returns the number of replacements made.
"""
c = self.request.replace(pattern, repl, *args, **kwargs)
if self.response:
c += self.response.replace(pattern, repl, *args, **kwargs)
return c
def make_error_response(status_code, message, headers=None):
response = status_codes.RESPONSES.get(status_code, "Unknown")
body = """
<html>
<head>
<title>%d %s</title>
</head>
<body>%s</body>
</html>
""".strip() % (status_code, response, cgi.escape(message))
body = body.encode("utf8", "replace")
if not headers:
headers = Headers(
Server=version.MITMPROXY,
Connection="close",
Content_Length=str(len(body)),
Content_Type="text/html"
)
return HTTPResponse(
b"HTTP/1.1",
status_code,
response,
headers,
body,
)
def make_connect_request(address):
address = Address.wrap(address)
return HTTPRequest(
"authority", b"CONNECT", None, address.host, address.port, None, b"HTTP/1.1",
Headers(), b""
)
def make_connect_response(http_version):
# Do not send any response headers as it breaks proxying non-80 ports on
# Android emulators using the -http-proxy option.
return HTTPResponse(
http_version,
200,
b"Connection established",
Headers(),
b"",
)
expect_continue_response = HTTPResponse(b"HTTP/1.1", 100, b"Continue", Headers(), b"")
| gpl-3.0 |
javrasya/luigi | luigi/contrib/hdfs/error.py | 86 | 1168 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The implementations of the hdfs clients. The hadoop cli client and the
snakebite client.
"""
class HDFSCliError(Exception):
def __init__(self, command, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
msg = ("Command %r failed [exit code %d]\n"
"---stdout---\n"
"%s\n"
"---stderr---\n"
"%s"
"------------") % (command, returncode, stdout, stderr)
super(HDFSCliError, self).__init__(msg)
| apache-2.0 |
mrquim/repository.mrquim | repo/plugin.video.castaway/resources/lib/sources/p2p_sport/livefootballol_ch.py | 4 | 1950 | from __future__ import unicode_literals
from resources.lib.modules import client,convert,control
from resources.lib.modules.log_utils import log
import re, urllib,sys,os
AddonPath = control.addonPath
IconPath = AddonPath + "/resources/media/"
def icon_path(filename):
return os.path.join(IconPath, filename)
class info():
def __init__(self):
self.mode = 'livefootballol_ch'
self.name = 'livefootballol.com (channels)'
self.icon = 'livefootballol.png'
self.paginated = False
self.categorized = False
self.multilink = False
class main():
def __init__(self,url = 'http://www.livefootballol.com/acestream-channel-list-new.html'):
self.base = 'http://www.livefootballol.com/'
self.url = url
def channels(self):
html = client.request(self.url, referer=self.base)
html = convert.unescape(html.decode('utf-8'))
channels=re.compile('<strong>(.+?)</strong></a></td>\s*<td>(.+?)</td>\s*<td>(.+?)</td>\s*<td>(.+?)</td>').findall(html)
events = self.__prepare_channels(channels)
html = client.request('http://www.livefootballol.com/sopcast-channel-list.html', referer=self.base)
html = convert.unescape(html.decode('utf-8'))
channels=re.compile('<strong>(.+?)</strong></a></td>\s*<td>(.+?)</td>\s*<td>(.+?)</td>\s*<td>(.+?)</td>').findall(html)
events = self.__prepare_channels(channels, ev=events)
events.sort(key=lambda x: x[1])
return events
def __prepare_channels(self,channels, ev=[]):
new=ev
for channel in channels:
log(channel)
url = channel[1]
title = channel[0].replace('AceStream','').encode('utf-8', 'xmlcharrefreplace')
lang=channel[2].encode('utf-8', 'xmlcharrefreplace')
bitrate = channel[3]
title = '%s [%s] - %s kbps'%(title.decode('utf-8'),lang,bitrate)
new.append((url,title.encode('utf-8'),icon_path(info().icon)))
return new
def resolve(self,url):
import liveresolver
return liveresolver.resolve(url,cache_timeout=0) | gpl-2.0 |
loveyoupeng/rt | modules/web/src/main/native/Tools/Scripts/webkitpy/tool/commands/analyzechangelog.py | 122 | 9725 | # Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import re
import time
from webkitpy.common.checkout.scm.detection import SCMDetector
from webkitpy.common.checkout.changelog import ChangeLog
from webkitpy.common.config.contributionareas import ContributionAreas
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.system.executive import Executive
from webkitpy.tool.multicommandtool import Command
from webkitpy.tool import steps
class AnalyzeChangeLog(Command):
name = "analyze-changelog"
help_text = "Experimental command for analyzing change logs."
long_help = "This command parses changelogs in a specified directory and summarizes the result as JSON files."
def __init__(self):
options = [
steps.Options.changelog_count,
]
Command.__init__(self, options=options)
@staticmethod
def _enumerate_changelogs(filesystem, dirname, changelog_count):
changelogs = [filesystem.join(dirname, filename) for filename in filesystem.listdir(dirname) if re.match('^ChangeLog(-(\d{4}-\d{2}-\d{2}))?$', filename)]
# Make sure ChangeLog shows up before ChangeLog-2011-01-01
changelogs = sorted(changelogs, key=lambda filename: filename + 'X', reverse=True)
return changelogs[:changelog_count]
@staticmethod
def _generate_jsons(filesystem, jsons, output_dir):
for filename in jsons:
print ' Generating', filename
filesystem.write_text_file(filesystem.join(output_dir, filename), json.dumps(jsons[filename], indent=2))
def execute(self, options, args, tool):
filesystem = self._tool.filesystem
if len(args) < 1 or not filesystem.exists(args[0]):
print "Need the directory name to look for changelog as the first argument"
return
changelog_dir = filesystem.abspath(args[0])
if len(args) < 2 or not filesystem.exists(args[1]):
print "Need the output directory name as the second argument"
return
output_dir = args[1]
startTime = time.time()
print 'Enumerating ChangeLog files...'
changelogs = AnalyzeChangeLog._enumerate_changelogs(filesystem, changelog_dir, options.changelog_count)
analyzer = ChangeLogAnalyzer(tool, changelogs)
analyzer.analyze()
print 'Generating json files...'
json_files = {
'summary.json': analyzer.summary(),
'contributors.json': analyzer.contributors_statistics(),
'areas.json': analyzer.areas_statistics(),
}
AnalyzeChangeLog._generate_jsons(filesystem, json_files, output_dir)
commands_dir = filesystem.dirname(filesystem.path_to_module(self.__module__))
print commands_dir
filesystem.copyfile(filesystem.join(commands_dir, 'data/summary.html'), filesystem.join(output_dir, 'summary.html'))
tick = time.time() - startTime
print 'Finished in %02dm:%02ds' % (int(tick / 60), int(tick % 60))
class ChangeLogAnalyzer(object):
def __init__(self, host, changelog_paths):
self._changelog_paths = changelog_paths
self._filesystem = host.filesystem
self._contribution_areas = ContributionAreas(host.filesystem)
self._scm = host.scm()
self._parsed_revisions = {}
self._contributors_statistics = {}
self._areas_statistics = dict([(area, {'reviewed': 0, 'unreviewed': 0, 'contributors': {}}) for area in self._contribution_areas.names()])
self._summary = {'reviewed': 0, 'unreviewed': 0}
self._longest_filename = max([len(path) - len(self._scm.checkout_root) for path in changelog_paths])
self._filename = ''
self._length_of_previous_output = 0
def contributors_statistics(self):
return self._contributors_statistics
def areas_statistics(self):
return self._areas_statistics
def summary(self):
return self._summary
def _print_status(self, status):
if self._length_of_previous_output:
print "\r" + " " * self._length_of_previous_output,
new_output = ('%' + str(self._longest_filename) + 's: %s') % (self._filename, status)
print "\r" + new_output,
self._length_of_previous_output = len(new_output)
def _set_filename(self, filename):
if self._filename:
print
self._filename = filename
def analyze(self):
for path in self._changelog_paths:
self._set_filename(self._filesystem.relpath(path, self._scm.checkout_root))
with self._filesystem.open_text_file_for_reading(path) as changelog:
self._print_status('Parsing entries...')
number_of_parsed_entries = self._analyze_entries(ChangeLog.parse_entries_from_file(changelog), path)
self._print_status('Done (%d entries)' % number_of_parsed_entries)
print
self._summary['contributors'] = len(self._contributors_statistics)
self._summary['contributors_with_reviews'] = sum([1 for contributor in self._contributors_statistics.values() if contributor['reviews']['total']])
self._summary['contributors_without_reviews'] = self._summary['contributors'] - self._summary['contributors_with_reviews']
def _collect_statistics_for_contributor_area(self, area, contributor, contribution_type, reviewed):
area_contributors = self._areas_statistics[area]['contributors']
if contributor not in area_contributors:
area_contributors[contributor] = {'reviews': 0, 'reviewed': 0, 'unreviewed': 0}
if contribution_type == 'patches':
contribution_type = 'reviewed' if reviewed else 'unreviewed'
area_contributors[contributor][contribution_type] += 1
def _collect_statistics_for_contributor(self, contributor, contribution_type, areas, touched_files, reviewed):
if contributor not in self._contributors_statistics:
self._contributors_statistics[contributor] = {
'reviews': {'total': 0, 'areas': {}, 'files': {}},
'patches': {'reviewed': 0, 'unreviewed': 0, 'areas': {}, 'files': {}}}
statistics = self._contributors_statistics[contributor][contribution_type]
if contribution_type == 'reviews':
statistics['total'] += 1
elif reviewed:
statistics['reviewed'] += 1
else:
statistics['unreviewed'] += 1
for area in areas:
self._increment_dictionary_value(statistics['areas'], area)
self._collect_statistics_for_contributor_area(area, contributor, contribution_type, reviewed)
for touchedfile in touched_files:
self._increment_dictionary_value(statistics['files'], touchedfile)
def _increment_dictionary_value(self, dictionary, key):
dictionary[key] = dictionary.get(key, 0) + 1
def _analyze_entries(self, entries, changelog_path):
dirname = self._filesystem.dirname(changelog_path)
i = 0
for i, entry in enumerate(entries):
self._print_status('(%s) entries' % i)
assert(entry.authors())
touchedfiles_for_entry = [self._filesystem.relpath(self._filesystem.join(dirname, name), self._scm.checkout_root) for name in entry.touched_files()]
areas_for_entry = self._contribution_areas.areas_for_touched_files(touchedfiles_for_entry)
authors_for_entry = entry.authors()
reviewers_for_entry = entry.reviewers()
for reviewer in reviewers_for_entry:
self._collect_statistics_for_contributor(reviewer.full_name, 'reviews', areas_for_entry, touchedfiles_for_entry, reviewed=True)
for author in authors_for_entry:
self._collect_statistics_for_contributor(author['name'], 'patches', areas_for_entry, touchedfiles_for_entry,
reviewed=bool(reviewers_for_entry))
for area in areas_for_entry:
self._areas_statistics[area]['reviewed' if reviewers_for_entry else 'unreviewed'] += 1
self._summary['reviewed' if reviewers_for_entry else 'unreviewed'] += 1
self._print_status('(%s) entries' % i)
return i
| gpl-2.0 |
prakritish/ansible | lib/ansible/modules/system/seport.py | 69 | 9176 | #!/usr/bin/python
# (c) 2014, Dan Keder <dan.keder@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: seport
short_description: Manages SELinux network port type definitions
description:
- Manages SELinux network port type definitions.
version_added: "2.0"
options:
ports:
description:
- Ports or port ranges, separated by a comma
required: true
default: null
proto:
description:
- Protocol for the specified port.
required: true
default: null
choices: [ 'tcp', 'udp' ]
setype:
description:
- SELinux type for the specified port.
required: true
default: null
state:
description:
- Desired boolean value.
required: true
default: present
choices: [ 'present', 'absent' ]
reload:
description:
- Reload SELinux policy after commit.
required: false
default: yes
notes:
- The changes are persistent across reboots
- Not tested on any debian based system
requirements: [ 'libselinux-python', 'policycoreutils-python' ]
author: Dan Keder
'''
EXAMPLES = '''
# Allow Apache to listen on tcp port 8888
- seport:
ports: 8888
proto: tcp
setype: http_port_t
state: present
# Allow sshd to listen on tcp port 8991
- seport:
ports: 8991
proto: tcp
setype: ssh_port_t
state: present
# Allow memcached to listen on tcp ports 10000-10100 and 10112
- seport:
ports: 10000-10100,10112
proto: tcp
setype: memcache_port_t
state: present
'''
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
import seobject
HAVE_SEOBJECT=True
except ImportError:
HAVE_SEOBJECT=False
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
def semanage_port_get_ports(seport, setype, proto):
""" Get the list of ports that have the specified type definition.
:param seport: Instance of seobject.portRecords
:type setype: str
:param setype: SELinux type.
:type proto: str
:param proto: Protocol ('tcp' or 'udp')
:rtype: list
:return: List of ports that have the specified SELinux type.
"""
records = seport.get_all_by_type()
if (setype, proto) in records:
return records[(setype, proto)]
else:
return []
def semanage_port_get_type(seport, port, proto):
""" Get the SELinux type of the specified port.
:param seport: Instance of seobject.portRecords
:type port: str
:param port: Port or port range (example: "8080", "8080-9090")
:type proto: str
:param proto: Protocol ('tcp' or 'udp')
:rtype: tuple
:return: Tuple containing the SELinux type and MLS/MCS level, or None if not found.
"""
ports = port.split('-', 1)
if len(ports) == 1:
ports.extend(ports)
key = (int(ports[0]), int(ports[1]), proto)
records = seport.get_all()
if key in records:
return records[key]
else:
return None
def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore=''):
""" Add SELinux port type definition to the policy.
:type module: AnsibleModule
:param module: Ansible module
:type ports: list
:param ports: List of ports and port ranges to add (e.g. ["8080", "8080-9090"])
:type proto: str
:param proto: Protocol ('tcp' or 'udp')
:type setype: str
:param setype: SELinux type
:type do_reload: bool
:param do_reload: Whether to reload SELinux policy after commit
:type serange: str
:param serange: SELinux MLS/MCS range (defaults to 's0')
:type sestore: str
:param sestore: SELinux store
:rtype: bool
:return: True if the policy was changed, otherwise False
"""
try:
seport = seobject.portRecords(sestore)
seport.set_reload(do_reload)
change = False
ports_by_type = semanage_port_get_ports(seport, setype, proto)
for port in ports:
if port not in ports_by_type:
change = True
port_type = semanage_port_get_type(seport, port, proto)
if port_type is None and not module.check_mode:
seport.add(port, proto, serange, setype)
elif port_type is not None and not module.check_mode:
seport.modify(port, proto, serange, setype)
except ValueError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
except IOError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
except KeyError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
except OSError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
except RuntimeError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
return change
def semanage_port_del(module, ports, proto, setype, do_reload, sestore=''):
""" Delete SELinux port type definition from the policy.
:type module: AnsibleModule
:param module: Ansible module
:type ports: list
:param ports: List of ports and port ranges to delete (e.g. ["8080", "8080-9090"])
:type proto: str
:param proto: Protocol ('tcp' or 'udp')
:type setype: str
:param setype: SELinux type.
:type do_reload: bool
:param do_reload: Whether to reload SELinux policy after commit
:type sestore: str
:param sestore: SELinux store
:rtype: bool
:return: True if the policy was changed, otherwise False
"""
try:
seport = seobject.portRecords(sestore)
seport.set_reload(do_reload)
change = False
ports_by_type = semanage_port_get_ports(seport, setype, proto)
for port in ports:
if port in ports_by_type:
change = True
if not module.check_mode:
seport.delete(port, proto)
except ValueError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
except IOError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
except KeyError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
except OSError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
except RuntimeError:
e = get_exception()
module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
return change
def main():
module = AnsibleModule(
argument_spec={
'ports': {
'required': True,
},
'proto': {
'required': True,
'choices': ['tcp', 'udp'],
},
'setype': {
'required': True,
},
'state': {
'required': True,
'choices': ['present', 'absent'],
},
'reload': {
'required': False,
'type': 'bool',
'default': 'yes',
},
},
supports_check_mode=True
)
if not HAVE_SELINUX:
module.fail_json(msg="This module requires libselinux-python")
if not HAVE_SEOBJECT:
module.fail_json(msg="This module requires policycoreutils-python")
if not selinux.is_selinux_enabled():
module.fail_json(msg="SELinux is disabled on this host.")
ports = [x.strip() for x in str(module.params['ports']).split(',')]
proto = module.params['proto']
setype = module.params['setype']
state = module.params['state']
do_reload = module.params['reload']
result = {
'ports': ports,
'proto': proto,
'setype': setype,
'state': state,
}
if state == 'present':
result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload)
elif state == 'absent':
result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload)
else:
module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
mozillazg/Unidecode | unidecode/x08c.py | 251 | 4630 | data = (
'Yu ', # 0x00
'Shui ', # 0x01
'Shen ', # 0x02
'Diao ', # 0x03
'Chan ', # 0x04
'Liang ', # 0x05
'Zhun ', # 0x06
'Sui ', # 0x07
'Tan ', # 0x08
'Shen ', # 0x09
'Yi ', # 0x0a
'Mou ', # 0x0b
'Chen ', # 0x0c
'Die ', # 0x0d
'Huang ', # 0x0e
'Jian ', # 0x0f
'Xie ', # 0x10
'Nue ', # 0x11
'Ye ', # 0x12
'Wei ', # 0x13
'E ', # 0x14
'Yu ', # 0x15
'Xuan ', # 0x16
'Chan ', # 0x17
'Zi ', # 0x18
'An ', # 0x19
'Yan ', # 0x1a
'Di ', # 0x1b
'Mi ', # 0x1c
'Pian ', # 0x1d
'Xu ', # 0x1e
'Mo ', # 0x1f
'Dang ', # 0x20
'Su ', # 0x21
'Xie ', # 0x22
'Yao ', # 0x23
'Bang ', # 0x24
'Shi ', # 0x25
'Qian ', # 0x26
'Mi ', # 0x27
'Jin ', # 0x28
'Man ', # 0x29
'Zhe ', # 0x2a
'Jian ', # 0x2b
'Miu ', # 0x2c
'Tan ', # 0x2d
'Zen ', # 0x2e
'Qiao ', # 0x2f
'Lan ', # 0x30
'Pu ', # 0x31
'Jue ', # 0x32
'Yan ', # 0x33
'Qian ', # 0x34
'Zhan ', # 0x35
'Chen ', # 0x36
'Gu ', # 0x37
'Qian ', # 0x38
'Hong ', # 0x39
'Xia ', # 0x3a
'Jue ', # 0x3b
'Hong ', # 0x3c
'Han ', # 0x3d
'Hong ', # 0x3e
'Xi ', # 0x3f
'Xi ', # 0x40
'Huo ', # 0x41
'Liao ', # 0x42
'Han ', # 0x43
'Du ', # 0x44
'Long ', # 0x45
'Dou ', # 0x46
'Jiang ', # 0x47
'Qi ', # 0x48
'Shi ', # 0x49
'Li ', # 0x4a
'Deng ', # 0x4b
'Wan ', # 0x4c
'Bi ', # 0x4d
'Shu ', # 0x4e
'Xian ', # 0x4f
'Feng ', # 0x50
'Zhi ', # 0x51
'Zhi ', # 0x52
'Yan ', # 0x53
'Yan ', # 0x54
'Shi ', # 0x55
'Chu ', # 0x56
'Hui ', # 0x57
'Tun ', # 0x58
'Yi ', # 0x59
'Tun ', # 0x5a
'Yi ', # 0x5b
'Jian ', # 0x5c
'Ba ', # 0x5d
'Hou ', # 0x5e
'E ', # 0x5f
'Cu ', # 0x60
'Xiang ', # 0x61
'Huan ', # 0x62
'Jian ', # 0x63
'Ken ', # 0x64
'Gai ', # 0x65
'Qu ', # 0x66
'Fu ', # 0x67
'Xi ', # 0x68
'Bin ', # 0x69
'Hao ', # 0x6a
'Yu ', # 0x6b
'Zhu ', # 0x6c
'Jia ', # 0x6d
'[?] ', # 0x6e
'Xi ', # 0x6f
'Bo ', # 0x70
'Wen ', # 0x71
'Huan ', # 0x72
'Bin ', # 0x73
'Di ', # 0x74
'Zong ', # 0x75
'Fen ', # 0x76
'Yi ', # 0x77
'Zhi ', # 0x78
'Bao ', # 0x79
'Chai ', # 0x7a
'Han ', # 0x7b
'Pi ', # 0x7c
'Na ', # 0x7d
'Pi ', # 0x7e
'Gou ', # 0x7f
'Na ', # 0x80
'You ', # 0x81
'Diao ', # 0x82
'Mo ', # 0x83
'Si ', # 0x84
'Xiu ', # 0x85
'Huan ', # 0x86
'Kun ', # 0x87
'He ', # 0x88
'He ', # 0x89
'Mo ', # 0x8a
'Han ', # 0x8b
'Mao ', # 0x8c
'Li ', # 0x8d
'Ni ', # 0x8e
'Bi ', # 0x8f
'Yu ', # 0x90
'Jia ', # 0x91
'Tuan ', # 0x92
'Mao ', # 0x93
'Pi ', # 0x94
'Xi ', # 0x95
'E ', # 0x96
'Ju ', # 0x97
'Mo ', # 0x98
'Chu ', # 0x99
'Tan ', # 0x9a
'Huan ', # 0x9b
'Jue ', # 0x9c
'Bei ', # 0x9d
'Zhen ', # 0x9e
'Yuan ', # 0x9f
'Fu ', # 0xa0
'Cai ', # 0xa1
'Gong ', # 0xa2
'Te ', # 0xa3
'Yi ', # 0xa4
'Hang ', # 0xa5
'Wan ', # 0xa6
'Pin ', # 0xa7
'Huo ', # 0xa8
'Fan ', # 0xa9
'Tan ', # 0xaa
'Guan ', # 0xab
'Ze ', # 0xac
'Zhi ', # 0xad
'Er ', # 0xae
'Zhu ', # 0xaf
'Shi ', # 0xb0
'Bi ', # 0xb1
'Zi ', # 0xb2
'Er ', # 0xb3
'Gui ', # 0xb4
'Pian ', # 0xb5
'Bian ', # 0xb6
'Mai ', # 0xb7
'Dai ', # 0xb8
'Sheng ', # 0xb9
'Kuang ', # 0xba
'Fei ', # 0xbb
'Tie ', # 0xbc
'Yi ', # 0xbd
'Chi ', # 0xbe
'Mao ', # 0xbf
'He ', # 0xc0
'Bi ', # 0xc1
'Lu ', # 0xc2
'Ren ', # 0xc3
'Hui ', # 0xc4
'Gai ', # 0xc5
'Pian ', # 0xc6
'Zi ', # 0xc7
'Jia ', # 0xc8
'Xu ', # 0xc9
'Zei ', # 0xca
'Jiao ', # 0xcb
'Gai ', # 0xcc
'Zang ', # 0xcd
'Jian ', # 0xce
'Ying ', # 0xcf
'Xun ', # 0xd0
'Zhen ', # 0xd1
'She ', # 0xd2
'Bin ', # 0xd3
'Bin ', # 0xd4
'Qiu ', # 0xd5
'She ', # 0xd6
'Chuan ', # 0xd7
'Zang ', # 0xd8
'Zhou ', # 0xd9
'Lai ', # 0xda
'Zan ', # 0xdb
'Si ', # 0xdc
'Chen ', # 0xdd
'Shang ', # 0xde
'Tian ', # 0xdf
'Pei ', # 0xe0
'Geng ', # 0xe1
'Xian ', # 0xe2
'Mai ', # 0xe3
'Jian ', # 0xe4
'Sui ', # 0xe5
'Fu ', # 0xe6
'Tan ', # 0xe7
'Cong ', # 0xe8
'Cong ', # 0xe9
'Zhi ', # 0xea
'Ji ', # 0xeb
'Zhang ', # 0xec
'Du ', # 0xed
'Jin ', # 0xee
'Xiong ', # 0xef
'Shun ', # 0xf0
'Yun ', # 0xf1
'Bao ', # 0xf2
'Zai ', # 0xf3
'Lai ', # 0xf4
'Feng ', # 0xf5
'Cang ', # 0xf6
'Ji ', # 0xf7
'Sheng ', # 0xf8
'Ai ', # 0xf9
'Zhuan ', # 0xfa
'Fu ', # 0xfb
'Gou ', # 0xfc
'Sai ', # 0xfd
'Ze ', # 0xfe
'Liao ', # 0xff
)
| gpl-2.0 |
davidcusatis/ursula | library/neutron_router.py | 6 | 6967 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansibleworks.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
from neutronclient.neutron import client
from keystoneclient.v2_0 import client as ksclient
except ImportError:
print("failed=True msg='neutronclient and keystone client are required'")
DOCUMENTATION = '''
---
module: neutron_router
short_description: Create or Remove router from openstack
description:
- Create or Delete routers from OpenStack
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35358/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be give to the router
required: true
default: None
tenant_name:
description:
- Name of the tenant for which the router has to be created, if none router would be created for the login tenant.
required: false
default: None
admin_state_up:
description:
- desired admin state of the created router .
required: false
default: true
requirements: ["neutronclient", "keystoneclient"]
'''
EXAMPLES = '''
# Creates a router for tenant admin
neutron_router: state=present login_username=admin login_password=admin login_tenant_name=admin name=router1"
'''
_os_keystone = None
_os_tenant_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'),
cacert=kwargs.get('cacert'))
except Exception as e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception as e:
module.fail_json(msg = "Error getting endpoint for glance: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception as e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
login_tenant_name = module.params['login_tenant_name']
else:
login_tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == login_tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the paramters")
def _get_router_id(module, neutron):
kwargs = {
'name': module.params['name'],
'tenant_id': _os_tenant_id,
}
try:
routers = neutron.list_routers(**kwargs)
except Exception as e:
module.fail_json(msg = "Error in getting the router list: %s " % e.message)
if not routers['routers']:
return None
return routers['routers'][0]['id']
def _create_router(module, neutron):
router = {
'name': module.params['name'],
'tenant_id': _os_tenant_id,
'admin_state_up': module.params['admin_state_up'],
}
try:
new_router = neutron.create_router(dict(router=router))
except Exception as e:
module.fail_json( msg = "Error in creating router: %s" % e.message)
return new_router['router']['id']
def _delete_router(module, neutron, router_id):
try:
neutron.delete_router(router_id)
except:
module.fail_json("Error in deleting the router")
return True
def main():
module = AnsibleModule(
argument_spec = dict(
login_username = dict(default='admin'),
login_password = dict(required=True),
login_tenant_name = dict(required='True'),
auth_url = dict(default='http://127.0.0.1:35358/v2.0/'),
cacert = dict(default=None),
region_name = dict(default=None),
name = dict(required=True),
tenant_name = dict(default=None),
state = dict(default='present', choices=['absent', 'present']),
admin_state_up = dict(type='bool', default=True),
),
)
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
if module.params['state'] == 'present':
router_id = _get_router_id(module, neutron)
if not router_id:
router_id = _create_router(module, neutron)
module.exit_json(changed=True, result="Created", id=router_id)
else:
module.exit_json(changed=False, result="success" , id=router_id)
else:
router_id = _get_router_id(module, neutron)
if not router_id:
module.exit_json(changed=False, result="success")
else:
_delete_router(module, neutron, router_id)
module.exit_json(changed=True, result="deleted")
# this is magic, see lib/ansible/module.params['common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
| mit |
ErinCall/sync-engine | inbox/actions/base.py | 3 | 7381 | """ Code for propagating Inbox datastore changes to account backends.
Syncback actions don't update anything in the local datastore; the Inbox
datastore is updated asynchronously (see namespace.py) and bookkeeping about
the account backend state is updated when the changes show up in the mail sync
engine.
Dealing with write actions separately from read syncing allows us more
flexibility in responsiveness/latency on data propagation, and also makes us
unable to royally mess up a sync and e.g. accidentally delete a bunch of
messages on the account backend because our local datastore is messed up.
This read/write separation also allows us to easily disable syncback for
testing.
The main problem the separation presents is the fact that the read syncing
needs to deal with the fact that the local datastore may have new changes to
it that are not yet reflected in the account backend. In practice, this is
not really a problem because of the limited ways mail messages can change.
(For more details, see individual account backend submodules.)
ACTIONS MUST BE IDEMPOTENT! We are going to have task workers guarantee
at-least-once semantics.
"""
from inbox.actions.backends import module_registry
from inbox.models import Account, Message
from inbox.sendmail.base import generate_attachments
from inbox.sendmail.message import create_email
from nylas.logging import get_logger
log = get_logger()
def mark_unread(account_id, message_id, db_session, args):
unread = args['unread']
account = db_session.query(Account).get(account_id)
set_remote_unread = module_registry[account.provider]. \
set_remote_unread
set_remote_unread(account, message_id, db_session, unread)
def mark_starred(account_id, message_id, db_session, args):
starred = args['starred']
account = db_session.query(Account).get(account_id)
set_remote_starred = module_registry[account.provider]. \
set_remote_starred
set_remote_starred(account, message_id, db_session, starred)
def move(account_id, message_id, db_session, args):
destination = args['destination']
account = db_session.query(Account).get(account_id)
remote_move = module_registry[account.provider].remote_move
remote_move(account, message_id, db_session, destination)
def change_labels(account_id, message_id, db_session, args):
added_labels = args['added_labels']
removed_labels = args['removed_labels']
account = db_session.query(Account).get(account_id)
assert account.provider == 'gmail'
remote_change_labels = module_registry[account.provider]. \
remote_change_labels
remote_change_labels(account, message_id, db_session, removed_labels,
added_labels)
def create_folder(account_id, category_id, db_session):
account = db_session.query(Account).get(account_id)
remote_create = module_registry[account.provider].remote_create_folder
remote_create(account, category_id, db_session)
def create_label(account_id, category_id, db_session):
account = db_session.query(Account).get(account_id)
assert account.provider == 'gmail'
remote_create = module_registry[account.provider].remote_create_label
remote_create(account, category_id, db_session)
def delete_label(account_id, category_id, db_session):
account = db_session.query(Account).get(account_id)
assert account.provider == 'gmail'
remote_delete = module_registry[account.provider].remote_delete_label
remote_delete(account, category_id, db_session)
def update_folder(account_id, category_id, db_session, args):
old_name = args['old_name']
account = db_session.query(Account).get(account_id)
remote_update = module_registry[account.provider].remote_update_folder
remote_update(account, category_id, db_session, old_name)
def delete_folder(account_id, category_id, db_session):
account = db_session.query(Account).get(account_id)
remote_delete = module_registry[account.provider].remote_delete_folder
remote_delete(account, category_id, db_session)
def update_label(account_id, category_id, db_session, args):
old_name = args['old_name']
account = db_session.query(Account).get(account_id)
assert account.provider == 'gmail'
remote_update = module_registry[account.provider].remote_update_label
remote_update(account, category_id, db_session, old_name)
def _create_email(account, message):
blocks = [p.block for p in message.attachments]
attachments = generate_attachments(blocks)
from_name, from_email = message.from_addr[0]
msg = create_email(from_name=from_name,
from_email=from_email,
reply_to=message.reply_to,
inbox_uid=message.inbox_uid,
to_addr=message.to_addr,
cc_addr=message.cc_addr,
bcc_addr=message.bcc_addr,
subject=message.subject,
html=message.body,
in_reply_to=message.in_reply_to,
references=message.references,
attachments=attachments)
return msg
def save_draft(account_id, message_id, db_session, args):
""" Sync a new/updated draft back to the remote backend. """
account = db_session.query(Account).get(account_id)
message = db_session.query(Message).get(message_id)
version = args.get('version')
if message is None:
log.info('tried to save nonexistent message as draft',
message_id=message_id, account_id=account_id)
return
if not message.is_draft:
log.warning('tried to save non-draft message as draft',
message_id=message_id,
account_id=account_id)
return
if version != message.version:
log.warning('tried to save outdated version of draft')
return
mimemsg = _create_email(account, message)
remote_save_draft = module_registry[account.provider].remote_save_draft
remote_save_draft(account, mimemsg, db_session, message.created_at)
def delete_draft(account_id, draft_id, db_session, args):
"""
Delete a draft from the remote backend. `args` should contain an
`inbox_uid` or a `message_id_header` key. This is used to find the draft on
"the backend.
"""
inbox_uid = args.get('inbox_uid')
message_id_header = args.get('message_id_header')
assert inbox_uid or message_id_header, 'Need at least one header value'
account = db_session.query(Account).get(account_id)
remote_delete_draft = module_registry[account.provider].remote_delete_draft
remote_delete_draft(account, inbox_uid, message_id_header, db_session)
def save_sent_email(account_id, message_id, db_session):
"""
Create an email on the remote backend. Generic providers expect
us to create a copy of the message in the sent folder.
"""
account = db_session.query(Account).get(account_id)
message = db_session.query(Message).get(message_id)
if message is None:
log.info('tried to create nonexistent message',
message_id=message_id, account_id=account_id)
return
mimemsg = _create_email(account, message)
remote_save_sent = module_registry[account.provider].remote_save_sent
remote_save_sent(account, mimemsg, message.created_at)
| agpl-3.0 |
Abjad/abjad | tests/test_NumberedPitchClass.py | 1 | 1452 | import typing
import pytest
import abjad
values: typing.List[typing.Tuple] = []
values.extend((x / 2, (x / 2) % 12) for x in range(-48, 49))
values.extend(
[
("bf,", 10),
("c'", 0),
("cs'", 1),
("gff''", 5),
("", 0),
("dss,,", 4),
("fake", ValueError),
(("bf", 2), 10),
(("c", 4), 0),
(("cs", 4), 1),
(("dss", 1), 4),
(("gff", 5), 5),
(abjad.NamedPitch("bs'"), 0),
(abjad.NamedPitch("c"), 0),
(abjad.NamedPitch("cf,"), 11),
(abjad.NamedPitch(), 0),
(abjad.NamedPitchClass("cs'"), 1),
(abjad.NamedPitchClass("c"), 0),
(abjad.NamedPitchClass("cf,"), 11),
(None, 0),
(abjad.NumberedPitch("bs'"), 0),
(abjad.NumberedPitch("c"), 0),
(abjad.NumberedPitch("cf,"), 11),
(abjad.NumberedPitch(), 0),
(abjad.NumberedPitchClass("bs'"), 0),
(abjad.NumberedPitchClass("c"), 0),
(abjad.NumberedPitchClass("cf,"), 11),
]
)
@pytest.mark.parametrize("input_, expected_semitones", values)
def test_init(input_, expected_semitones):
if isinstance(expected_semitones, type) and issubclass(
expected_semitones, Exception
):
with pytest.raises(expected_semitones):
abjad.NumberedPitchClass(input_)
return
instance = abjad.NumberedPitchClass(input_)
assert float(instance) == expected_semitones
| gpl-3.0 |
seecr/meresco-solr | meresco/solr/fields2solrdoc.py | 1 | 2921 | ## begin license ##
#
# "Meresco Solr" is a set of components and tools
# to integrate Solr into "Meresco."
#
# Copyright (C) 2011-2013 Seecr (Seek You Too B.V.) http://seecr.nl
# Copyright (C) 2012 SURF http://www.surf.nl
# Copyright (C) 2012-2013 Stichting Kennisnet http://www.kennisnet.nl
#
# This file is part of "Meresco Solr"
#
# "Meresco Solr" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Solr" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Solr"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from meresco.core import Observable
from xml.sax.saxutils import escape as escapeXml
from itertools import chain
class Fields2SolrDoc(Observable):
def __init__(self, transactionName, partname="solr", singularValueFields=None, isSingularValueField=None):
Observable.__init__(self)
self._transactionName = transactionName
self._partname = partname
if singularValueFields and isSingularValueField:
raise ValueError("Use either 'singularValueFields' or 'isSingularValueField'")
self._isSingularValueField = isSingularValueField
if singularValueFields:
singularValueFields = set(singularValueFields)
self._isSingularValueField = lambda name: name in singularValueFields
def begin(self, name):
if name != self._transactionName:
return
tx = self.ctx.tx
tx.join(self)
def addField(self, name, value):
tx = self.ctx.tx
valueList = tx.objectScope(self).setdefault(name, [])
if not self._isSingularValueField is None:
if len(valueList) == 1 and self._isSingularValueField(name):
return
valueList.append(value)
def commit(self, id):
tx = self.ctx.tx
fields = tx.objectScope(self)
if not fields:
return
recordIdentifier = tx.locals["id"]
specialFields = [
('__id__', recordIdentifier),
]
def fieldStatement(key, value):
return '<field name="%s">%s</field>' % (escapeXml(key), escapeXml(value))
allFields = ((k, v) for k, vs in fields.items() for v in vs)
xml = "<doc xmlns=''>%s</doc>" % ''.join(fieldStatement(*args) for args in chain(iter(specialFields), allFields))
yield self.all.add(identifier=recordIdentifier, partname=self._partname, data=xml)
| gpl-2.0 |
bayusantoso/final-assignment-web-ontology | IMPLEMENTATION/Application/SourceCode/GOApps/flask/Lib/site-packages/setuptools/command/setopt.py | 458 | 5080 | from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
import distutils
import os
from setuptools import Command
__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
def config_file(kind="local"):
"""Get the filename of the distutils, local, global, or per-user config
`kind` must be one of "local", "global", or "user"
"""
if kind == 'local':
return 'setup.cfg'
if kind == 'global':
return os.path.join(
os.path.dirname(distutils.__file__), 'distutils.cfg'
)
if kind == 'user':
dot = os.name == 'posix' and '.' or ''
return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
raise ValueError(
"config_file() type must be 'local', 'global', or 'user'", kind
)
def edit_config(filename, settings, dry_run=False):
"""Edit a configuration file to include `settings`
`settings` is a dictionary of dictionaries or ``None`` values, keyed by
command/section name. A ``None`` value means to delete the entire section,
while a dictionary lists settings to be changed or deleted in that section.
A setting of ``None`` means to delete that setting.
"""
from setuptools.compat import ConfigParser
log.debug("Reading configuration from %s", filename)
opts = ConfigParser.RawConfigParser()
opts.read([filename])
for section, options in settings.items():
if options is None:
log.info("Deleting section [%s] from %s", section, filename)
opts.remove_section(section)
else:
if not opts.has_section(section):
log.debug("Adding new section [%s] to %s", section, filename)
opts.add_section(section)
for option, value in options.items():
if value is None:
log.debug(
"Deleting %s.%s from %s",
section, option, filename
)
opts.remove_option(section, option)
if not opts.options(section):
log.info("Deleting empty [%s] section from %s",
section, filename)
opts.remove_section(section)
else:
log.debug(
"Setting %s.%s to %r in %s",
section, option, value, filename
)
opts.set(section, option, value)
log.info("Writing %s", filename)
if not dry_run:
with open(filename, 'w') as f:
opts.write(f)
class option_base(Command):
"""Abstract base class for commands that mess with config files"""
user_options = [
('global-config', 'g',
"save options to the site-wide distutils.cfg file"),
('user-config', 'u',
"save options to the current user's pydistutils.cfg file"),
('filename=', 'f',
"configuration file to use (default=setup.cfg)"),
]
boolean_options = [
'global-config', 'user-config',
]
def initialize_options(self):
self.global_config = None
self.user_config = None
self.filename = None
def finalize_options(self):
filenames = []
if self.global_config:
filenames.append(config_file('global'))
if self.user_config:
filenames.append(config_file('user'))
if self.filename is not None:
filenames.append(self.filename)
if not filenames:
filenames.append(config_file('local'))
if len(filenames) > 1:
raise DistutilsOptionError(
"Must specify only one configuration file option",
filenames
)
self.filename, = filenames
class setopt(option_base):
"""Save command-line options to a file"""
description = "set an option in setup.cfg or another config file"
user_options = [
('command=', 'c', 'command to set an option for'),
('option=', 'o', 'option to set'),
('set-value=', 's', 'value of the option'),
('remove', 'r', 'remove (unset) the value'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.command = None
self.option = None
self.set_value = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.command is None or self.option is None:
raise DistutilsOptionError("Must specify --command *and* --option")
if self.set_value is None and not self.remove:
raise DistutilsOptionError("Must specify --set-value or --remove")
def run(self):
edit_config(
self.filename, {
self.command: {self.option.replace('-', '_'): self.set_value}
},
self.dry_run
)
| gpl-3.0 |
TheMutley/openpilot | pyextra/jinja2/lexer.py | 119 | 28238 | # -*- coding: utf-8 -*-
"""
jinja2.lexer
~~~~~~~~~~~~
This module implements a Jinja / Python combination lexer. The
`Lexer` class provided by this module is used to do some preprocessing
for Jinja.
On the one hand it filters out invalid operators like the bitshift
operators we don't allow in templates. On the other hand it separates
template code and python code in expressions.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import sys
from operator import itemgetter
from collections import deque
from jinja2.exceptions import TemplateSyntaxError
from jinja2.utils import LRUCache
from jinja2._compat import iteritems, implements_iterator, text_type, intern
# cache for the lexers. Exists in order to be able to have multiple
# environments with the same lexer
_lexer_cache = LRUCache(50)
# static regular expressions
whitespace_re = re.compile(r'\s+', re.U)
string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
integer_re = re.compile(r'\d+')
def _make_name_re():
try:
compile('föö', '<unknown>', 'eval')
except SyntaxError:
return re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b')
import jinja2
from jinja2 import _stringdefs
name_re = re.compile(r'[%s][%s]*' % (_stringdefs.xid_start,
_stringdefs.xid_continue))
# Save some memory here
sys.modules.pop('jinja2._stringdefs')
del _stringdefs
del jinja2._stringdefs
return name_re
# we use the unicode identifier rule if this python version is able
# to handle unicode identifiers, otherwise the standard ASCII one.
name_re = _make_name_re()
del _make_name_re
float_re = re.compile(r'(?<!\.)\d+\.\d+')
newline_re = re.compile(r'(\r\n|\r|\n)')
# internal the tokens and keep references to them
TOKEN_ADD = intern('add')
TOKEN_ASSIGN = intern('assign')
TOKEN_COLON = intern('colon')
TOKEN_COMMA = intern('comma')
TOKEN_DIV = intern('div')
TOKEN_DOT = intern('dot')
TOKEN_EQ = intern('eq')
TOKEN_FLOORDIV = intern('floordiv')
TOKEN_GT = intern('gt')
TOKEN_GTEQ = intern('gteq')
TOKEN_LBRACE = intern('lbrace')
TOKEN_LBRACKET = intern('lbracket')
TOKEN_LPAREN = intern('lparen')
TOKEN_LT = intern('lt')
TOKEN_LTEQ = intern('lteq')
TOKEN_MOD = intern('mod')
TOKEN_MUL = intern('mul')
TOKEN_NE = intern('ne')
TOKEN_PIPE = intern('pipe')
TOKEN_POW = intern('pow')
TOKEN_RBRACE = intern('rbrace')
TOKEN_RBRACKET = intern('rbracket')
TOKEN_RPAREN = intern('rparen')
TOKEN_SEMICOLON = intern('semicolon')
TOKEN_SUB = intern('sub')
TOKEN_TILDE = intern('tilde')
TOKEN_WHITESPACE = intern('whitespace')
TOKEN_FLOAT = intern('float')
TOKEN_INTEGER = intern('integer')
TOKEN_NAME = intern('name')
TOKEN_STRING = intern('string')
TOKEN_OPERATOR = intern('operator')
TOKEN_BLOCK_BEGIN = intern('block_begin')
TOKEN_BLOCK_END = intern('block_end')
TOKEN_VARIABLE_BEGIN = intern('variable_begin')
TOKEN_VARIABLE_END = intern('variable_end')
TOKEN_RAW_BEGIN = intern('raw_begin')
TOKEN_RAW_END = intern('raw_end')
TOKEN_COMMENT_BEGIN = intern('comment_begin')
TOKEN_COMMENT_END = intern('comment_end')
TOKEN_COMMENT = intern('comment')
TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin')
TOKEN_LINESTATEMENT_END = intern('linestatement_end')
TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin')
TOKEN_LINECOMMENT_END = intern('linecomment_end')
TOKEN_LINECOMMENT = intern('linecomment')
TOKEN_DATA = intern('data')
TOKEN_INITIAL = intern('initial')
TOKEN_EOF = intern('eof')
# bind operators to token types
operators = {
'+': TOKEN_ADD,
'-': TOKEN_SUB,
'/': TOKEN_DIV,
'//': TOKEN_FLOORDIV,
'*': TOKEN_MUL,
'%': TOKEN_MOD,
'**': TOKEN_POW,
'~': TOKEN_TILDE,
'[': TOKEN_LBRACKET,
']': TOKEN_RBRACKET,
'(': TOKEN_LPAREN,
')': TOKEN_RPAREN,
'{': TOKEN_LBRACE,
'}': TOKEN_RBRACE,
'==': TOKEN_EQ,
'!=': TOKEN_NE,
'>': TOKEN_GT,
'>=': TOKEN_GTEQ,
'<': TOKEN_LT,
'<=': TOKEN_LTEQ,
'=': TOKEN_ASSIGN,
'.': TOKEN_DOT,
':': TOKEN_COLON,
'|': TOKEN_PIPE,
',': TOKEN_COMMA,
';': TOKEN_SEMICOLON
}
reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
assert len(operators) == len(reverse_operators), 'operators dropped'
operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
sorted(operators, key=lambda x: -len(x))))
ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
TOKEN_COMMENT_END, TOKEN_WHITESPACE,
TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END,
TOKEN_LINECOMMENT])
ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
TOKEN_COMMENT, TOKEN_LINECOMMENT])
def _describe_token_type(token_type):
if token_type in reverse_operators:
return reverse_operators[token_type]
return {
TOKEN_COMMENT_BEGIN: 'begin of comment',
TOKEN_COMMENT_END: 'end of comment',
TOKEN_COMMENT: 'comment',
TOKEN_LINECOMMENT: 'comment',
TOKEN_BLOCK_BEGIN: 'begin of statement block',
TOKEN_BLOCK_END: 'end of statement block',
TOKEN_VARIABLE_BEGIN: 'begin of print statement',
TOKEN_VARIABLE_END: 'end of print statement',
TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement',
TOKEN_LINESTATEMENT_END: 'end of line statement',
TOKEN_DATA: 'template data / text',
TOKEN_EOF: 'end of template'
}.get(token_type, token_type)
def describe_token(token):
"""Returns a description of the token."""
if token.type == 'name':
return token.value
return _describe_token_type(token.type)
def describe_token_expr(expr):
"""Like `describe_token` but for token expressions."""
if ':' in expr:
type, value = expr.split(':', 1)
if type == 'name':
return value
else:
type = expr
return _describe_token_type(type)
def count_newlines(value):
"""Count the number of newline characters in the string. This is
useful for extensions that filter a stream.
"""
return len(newline_re.findall(value))
def compile_rules(environment):
"""Compiles all the rules from the environment into a list of rules."""
e = re.escape
rules = [
(len(environment.comment_start_string), 'comment',
e(environment.comment_start_string)),
(len(environment.block_start_string), 'block',
e(environment.block_start_string)),
(len(environment.variable_start_string), 'variable',
e(environment.variable_start_string))
]
if environment.line_statement_prefix is not None:
rules.append((len(environment.line_statement_prefix), 'linestatement',
r'^[ \t\v]*' + e(environment.line_statement_prefix)))
if environment.line_comment_prefix is not None:
rules.append((len(environment.line_comment_prefix), 'linecomment',
r'(?:^|(?<=\S))[^\S\r\n]*' +
e(environment.line_comment_prefix)))
return [x[1:] for x in sorted(rules, reverse=True)]
class Failure(object):
"""Class that raises a `TemplateSyntaxError` if called.
Used by the `Lexer` to specify known errors.
"""
def __init__(self, message, cls=TemplateSyntaxError):
self.message = message
self.error_class = cls
def __call__(self, lineno, filename):
raise self.error_class(self.message, lineno, filename)
class Token(tuple):
"""Token class."""
__slots__ = ()
lineno, type, value = (property(itemgetter(x)) for x in range(3))
def __new__(cls, lineno, type, value):
return tuple.__new__(cls, (lineno, intern(str(type)), value))
def __str__(self):
if self.type in reverse_operators:
return reverse_operators[self.type]
elif self.type == 'name':
return self.value
return self.type
def test(self, expr):
"""Test a token against a token expression. This can either be a
token type or ``'token_type:token_value'``. This can only test
against string values and types.
"""
# here we do a regular string equality check as test_any is usually
# passed an iterable of not interned strings.
if self.type == expr:
return True
elif ':' in expr:
return expr.split(':', 1) == [self.type, self.value]
return False
def test_any(self, *iterable):
"""Test against multiple token expressions."""
for expr in iterable:
if self.test(expr):
return True
return False
def __repr__(self):
return 'Token(%r, %r, %r)' % (
self.lineno,
self.type,
self.value
)
@implements_iterator
class TokenStreamIterator(object):
"""The iterator for tokenstreams. Iterate over the stream
until the eof token is reached.
"""
def __init__(self, stream):
self.stream = stream
def __iter__(self):
return self
def __next__(self):
token = self.stream.current
if token.type is TOKEN_EOF:
self.stream.close()
raise StopIteration()
next(self.stream)
return token
@implements_iterator
class TokenStream(object):
"""A token stream is an iterable that yields :class:`Token`\\s. The
parser however does not iterate over it but calls :meth:`next` to go
one token ahead. The current active token is stored as :attr:`current`.
"""
def __init__(self, generator, name, filename):
self._iter = iter(generator)
self._pushed = deque()
self.name = name
self.filename = filename
self.closed = False
self.current = Token(1, TOKEN_INITIAL, '')
next(self)
def __iter__(self):
return TokenStreamIterator(self)
def __bool__(self):
return bool(self._pushed) or self.current.type is not TOKEN_EOF
__nonzero__ = __bool__ # py2
eos = property(lambda x: not x, doc="Are we at the end of the stream?")
def push(self, token):
"""Push a token back to the stream."""
self._pushed.append(token)
def look(self):
"""Look at the next token."""
old_token = next(self)
result = self.current
self.push(result)
self.current = old_token
return result
def skip(self, n=1):
"""Got n tokens ahead."""
for x in range(n):
next(self)
def next_if(self, expr):
"""Perform the token test and return the token if it matched.
Otherwise the return value is `None`.
"""
if self.current.test(expr):
return next(self)
def skip_if(self, expr):
"""Like :meth:`next_if` but only returns `True` or `False`."""
return self.next_if(expr) is not None
def __next__(self):
"""Go one token ahead and return the old one"""
rv = self.current
if self._pushed:
self.current = self._pushed.popleft()
elif self.current.type is not TOKEN_EOF:
try:
self.current = next(self._iter)
except StopIteration:
self.close()
return rv
def close(self):
"""Close the stream."""
self.current = Token(self.current.lineno, TOKEN_EOF, '')
self._iter = None
self.closed = True
def expect(self, expr):
"""Expect a given token type and return it. This accepts the same
argument as :meth:`jinja2.lexer.Token.test`.
"""
if not self.current.test(expr):
expr = describe_token_expr(expr)
if self.current.type is TOKEN_EOF:
raise TemplateSyntaxError('unexpected end of template, '
'expected %r.' % expr,
self.current.lineno,
self.name, self.filename)
raise TemplateSyntaxError("expected token %r, got %r" %
(expr, describe_token(self.current)),
self.current.lineno,
self.name, self.filename)
try:
return self.current
finally:
next(self)
def get_lexer(environment):
"""Return a lexer which is probably cached."""
key = (environment.block_start_string,
environment.block_end_string,
environment.variable_start_string,
environment.variable_end_string,
environment.comment_start_string,
environment.comment_end_string,
environment.line_statement_prefix,
environment.line_comment_prefix,
environment.trim_blocks,
environment.lstrip_blocks,
environment.newline_sequence,
environment.keep_trailing_newline)
lexer = _lexer_cache.get(key)
if lexer is None:
lexer = Lexer(environment)
_lexer_cache[key] = lexer
return lexer
class Lexer(object):
"""Class that implements a lexer for a given environment. Automatically
created by the environment class, usually you don't have to do that.
Note that the lexer is not automatically bound to an environment.
Multiple environments can share the same lexer.
"""
def __init__(self, environment):
# shortcuts
c = lambda x: re.compile(x, re.M | re.S)
e = re.escape
# lexing rules for tags
tag_rules = [
(whitespace_re, TOKEN_WHITESPACE, None),
(float_re, TOKEN_FLOAT, None),
(integer_re, TOKEN_INTEGER, None),
(name_re, TOKEN_NAME, None),
(string_re, TOKEN_STRING, None),
(operator_re, TOKEN_OPERATOR, None)
]
# assemble the root lexing rule. because "|" is ungreedy
# we have to sort by length so that the lexer continues working
# as expected when we have parsing rules like <% for block and
# <%= for variables. (if someone wants asp like syntax)
# variables are just part of the rules if variable processing
# is required.
root_tag_rules = compile_rules(environment)
# block suffix if trimming is enabled
block_suffix_re = environment.trim_blocks and '\\n?' or ''
# strip leading spaces if lstrip_blocks is enabled
prefix_re = {}
if environment.lstrip_blocks:
# use '{%+' to manually disable lstrip_blocks behavior
no_lstrip_re = e('+')
# detect overlap between block and variable or comment strings
block_diff = c(r'^%s(.*)' % e(environment.block_start_string))
# make sure we don't mistake a block for a variable or a comment
m = block_diff.match(environment.comment_start_string)
no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
m = block_diff.match(environment.variable_start_string)
no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
# detect overlap between comment and variable strings
comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string))
m = comment_diff.match(environment.variable_start_string)
no_variable_re = m and r'(?!%s)' % e(m.group(1)) or ''
lstrip_re = r'^[ \t]*'
block_prefix_re = r'%s%s(?!%s)|%s\+?' % (
lstrip_re,
e(environment.block_start_string),
no_lstrip_re,
e(environment.block_start_string),
)
comment_prefix_re = r'%s%s%s|%s\+?' % (
lstrip_re,
e(environment.comment_start_string),
no_variable_re,
e(environment.comment_start_string),
)
prefix_re['block'] = block_prefix_re
prefix_re['comment'] = comment_prefix_re
else:
block_prefix_re = '%s' % e(environment.block_start_string)
self.newline_sequence = environment.newline_sequence
self.keep_trailing_newline = environment.keep_trailing_newline
# global lexing rules
self.rules = {
'root': [
# directives
(c('(.*?)(?:%s)' % '|'.join(
[r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
e(environment.block_start_string),
block_prefix_re,
e(environment.block_end_string),
e(environment.block_end_string)
)] + [
r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r))
for n, r in root_tag_rules
])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
# data
(c('.+'), TOKEN_DATA, None)
],
# comments
TOKEN_COMMENT_BEGIN: [
(c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
e(environment.comment_end_string),
e(environment.comment_end_string),
block_suffix_re
)), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'),
(c('(.)'), (Failure('Missing end of comment tag'),), None)
],
# blocks
TOKEN_BLOCK_BEGIN: [
(c(r'(?:\-%s\s*|%s)%s' % (
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), TOKEN_BLOCK_END, '#pop'),
] + tag_rules,
# variables
TOKEN_VARIABLE_BEGIN: [
(c(r'\-%s\s*|%s' % (
e(environment.variable_end_string),
e(environment.variable_end_string)
)), TOKEN_VARIABLE_END, '#pop')
] + tag_rules,
# raw block
TOKEN_RAW_BEGIN: [
(c(r'(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
e(environment.block_start_string),
block_prefix_re,
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), (TOKEN_DATA, TOKEN_RAW_END), '#pop'),
(c('(.)'), (Failure('Missing end of raw directive'),), None)
],
# line statements
TOKEN_LINESTATEMENT_BEGIN: [
(c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')
] + tag_rules,
# line comments
TOKEN_LINECOMMENT_BEGIN: [
(c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT,
TOKEN_LINECOMMENT_END), '#pop')
]
}
def _normalize_newlines(self, value):
"""Called for strings and template data to normalize it to unicode."""
return newline_re.sub(self.newline_sequence, value)
def tokenize(self, source, name=None, filename=None, state=None):
"""Calls tokeniter + tokenize and wraps it in a token stream.
"""
stream = self.tokeniter(source, name, filename, state)
return TokenStream(self.wrap(stream, name, filename), name, filename)
def wrap(self, stream, name=None, filename=None):
"""This is called with the stream as returned by `tokenize` and wraps
every token in a :class:`Token` and converts the value.
"""
for lineno, token, value in stream:
if token in ignored_tokens:
continue
elif token == 'linestatement_begin':
token = 'block_begin'
elif token == 'linestatement_end':
token = 'block_end'
# we are not interested in those tokens in the parser
elif token in ('raw_begin', 'raw_end'):
continue
elif token == 'data':
value = self._normalize_newlines(value)
elif token == 'keyword':
token = value
elif token == 'name':
value = str(value)
elif token == 'string':
# try to unescape string
try:
value = self._normalize_newlines(value[1:-1]) \
.encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
except Exception as e:
msg = str(e).split(':')[-1].strip()
raise TemplateSyntaxError(msg, lineno, name, filename)
elif token == 'integer':
value = int(value)
elif token == 'float':
value = float(value)
elif token == 'operator':
token = operators[value]
yield Token(lineno, token, value)
def tokeniter(self, source, name, filename=None, state=None):
"""This method tokenizes the text and returns the tokens in a
generator. Use this method if you just want to tokenize a template.
"""
source = text_type(source)
lines = source.splitlines()
if self.keep_trailing_newline and source:
for newline in ('\r\n', '\r', '\n'):
if source.endswith(newline):
lines.append('')
break
source = '\n'.join(lines)
pos = 0
lineno = 1
stack = ['root']
if state is not None and state != 'root':
assert state in ('variable', 'block'), 'invalid state'
stack.append(state + '_begin')
else:
state = 'root'
statetokens = self.rules[stack[-1]]
source_length = len(source)
balancing_stack = []
while 1:
# tokenizer loop
for regex, tokens, new_state in statetokens:
m = regex.match(source, pos)
# if no match we try again with the next rule
if m is None:
continue
# we only match blocks and variables if braces / parentheses
# are balanced. continue parsing with the lower rule which
# is the operator rule. do this only if the end tags look
# like operators
if balancing_stack and \
tokens in ('variable_end', 'block_end',
'linestatement_end'):
continue
# tuples support more options
if isinstance(tokens, tuple):
for idx, token in enumerate(tokens):
# failure group
if token.__class__ is Failure:
raise token(lineno, filename)
# bygroup is a bit more complex, in that case we
# yield for the current token the first named
# group that matched
elif token == '#bygroup':
for key, value in iteritems(m.groupdict()):
if value is not None:
yield lineno, key, value
lineno += value.count('\n')
break
else:
raise RuntimeError('%r wanted to resolve '
'the token dynamically'
' but no group matched'
% regex)
# normal group
else:
data = m.group(idx + 1)
if data or token not in ignore_if_empty:
yield lineno, token, data
lineno += data.count('\n')
# strings as token just are yielded as it.
else:
data = m.group()
# update brace/parentheses balance
if tokens == 'operator':
if data == '{':
balancing_stack.append('}')
elif data == '(':
balancing_stack.append(')')
elif data == '[':
balancing_stack.append(']')
elif data in ('}', ')', ']'):
if not balancing_stack:
raise TemplateSyntaxError('unexpected \'%s\'' %
data, lineno, name,
filename)
expected_op = balancing_stack.pop()
if expected_op != data:
raise TemplateSyntaxError('unexpected \'%s\', '
'expected \'%s\'' %
(data, expected_op),
lineno, name,
filename)
# yield items
if data or tokens not in ignore_if_empty:
yield lineno, tokens, data
lineno += data.count('\n')
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
# in an infinite loop
pos2 = m.end()
# handle state changes
if new_state is not None:
# remove the uppermost state
if new_state == '#pop':
stack.pop()
# resolve the new state by group checking
elif new_state == '#bygroup':
for key, value in iteritems(m.groupdict()):
if value is not None:
stack.append(key)
break
else:
raise RuntimeError('%r wanted to resolve the '
'new state dynamically but'
' no group matched' %
regex)
# direct state name given
else:
stack.append(new_state)
statetokens = self.rules[stack[-1]]
# we are still at the same position and no stack change.
# this means a loop without break condition, avoid that and
# raise error
elif pos2 == pos:
raise RuntimeError('%r yielded empty string without '
'stack change' % regex)
# publish new function and start again
pos = pos2
break
# if loop terminated without break we haven't found a single match
# either we are at the end of the file or we have a problem
else:
# end of text
if pos >= source_length:
return
# something went wrong
raise TemplateSyntaxError('unexpected char %r at %d' %
(source[pos], pos), lineno,
name, filename)
| mit |
yongshengwang/hue | build/env/lib/python2.7/site-packages/MySQL_python-1.2.5-py2.7-linux-x86_64.egg/MySQLdb/cursors.py | 76 | 18276 | """MySQLdb Cursors
This module implements Cursors of various types for MySQLdb. By
default, MySQLdb uses the Cursor class.
"""
import re
import sys
try:
from types import ListType, TupleType, UnicodeType
except ImportError:
# Python 3
ListType = list
TupleType = tuple
UnicodeType = str
restr = r"""
\s
values
\s*
(
\(
[^()']*
(?:
(?:
(?:\(
# ( - editor hightlighting helper
.*
\))
|
'
[^\\']*
(?:\\.[^\\']*)*
'
)
[^()']*
)*
\)
)
"""
insert_values = re.compile(restr, re.S | re.I | re.X)
from _mysql_exceptions import Warning, Error, InterfaceError, DataError, \
DatabaseError, OperationalError, IntegrityError, InternalError, \
NotSupportedError, ProgrammingError
class BaseCursor(object):
"""A base for Cursor classes. Useful attributes:
description
A tuple of DB API 7-tuples describing the columns in
the last executed query; see PEP-249 for details.
description_flags
Tuple of column flags for last query, one entry per column
in the result set. Values correspond to those in
MySQLdb.constants.FLAG. See MySQL documentation (C API)
for more information. Non-standard extension.
arraysize
default number of rows fetchmany() will fetch
"""
from _mysql_exceptions import MySQLError, Warning, Error, InterfaceError, \
DatabaseError, DataError, OperationalError, IntegrityError, \
InternalError, ProgrammingError, NotSupportedError
_defer_warnings = False
def __init__(self, connection):
from weakref import proxy
self.connection = proxy(connection)
self.description = None
self.description_flags = None
self.rowcount = -1
self.arraysize = 1
self._executed = None
self.lastrowid = None
self.messages = []
self.errorhandler = connection.errorhandler
self._result = None
self._warnings = 0
self._info = None
self.rownumber = None
def __del__(self):
self.close()
self.errorhandler = None
self._result = None
def close(self):
"""Close the cursor. No further queries will be possible."""
if not self.connection: return
while self.nextset(): pass
self.connection = None
def _check_executed(self):
if not self._executed:
self.errorhandler(self, ProgrammingError, "execute() first")
def _warning_check(self):
from warnings import warn
if self._warnings:
warnings = self._get_db().show_warnings()
if warnings:
# This is done in two loops in case
# Warnings are set to raise exceptions.
for w in warnings:
self.messages.append((self.Warning, w))
for w in warnings:
warn(w[-1], self.Warning, 3)
elif self._info:
self.messages.append((self.Warning, self._info))
warn(self._info, self.Warning, 3)
def nextset(self):
"""Advance to the next result set.
Returns None if there are no more result sets.
"""
if self._executed:
self.fetchall()
del self.messages[:]
db = self._get_db()
nr = db.next_result()
if nr == -1:
return None
self._do_get_result()
self._post_get_result()
self._warning_check()
return 1
def _post_get_result(self): pass
def _do_get_result(self):
db = self._get_db()
self._result = self._get_result()
self.rowcount = db.affected_rows()
self.rownumber = 0
self.description = self._result and self._result.describe() or None
self.description_flags = self._result and self._result.field_flags() or None
self.lastrowid = db.insert_id()
self._warnings = db.warning_count()
self._info = db.info()
def setinputsizes(self, *args):
"""Does nothing, required by DB API."""
def setoutputsizes(self, *args):
"""Does nothing, required by DB API."""
def _get_db(self):
if not self.connection:
self.errorhandler(self, ProgrammingError, "cursor closed")
return self.connection
def execute(self, query, args=None):
"""Execute a query.
query -- string, query to execute on server
args -- optional sequence or mapping, parameters to use with query.
Note: If args is a sequence, then %s must be used as the
parameter placeholder in the query. If a mapping is used,
%(key)s must be used as the placeholder.
Returns long integer rows affected, if any
"""
del self.messages[:]
db = self._get_db()
if isinstance(query, unicode):
query = query.encode(db.unicode_literal.charset)
if args is not None:
if isinstance(args, dict):
query = query % dict((key, db.literal(item))
for key, item in args.iteritems())
else:
query = query % tuple([db.literal(item) for item in args])
try:
r = None
r = self._query(query)
except TypeError, m:
if m.args[0] in ("not enough arguments for format string",
"not all arguments converted"):
self.messages.append((ProgrammingError, m.args[0]))
self.errorhandler(self, ProgrammingError, m.args[0])
else:
self.messages.append((TypeError, m))
self.errorhandler(self, TypeError, m)
except (SystemExit, KeyboardInterrupt):
raise
except:
exc, value, tb = sys.exc_info()
del tb
self.messages.append((exc, value))
self.errorhandler(self, exc, value)
self._executed = query
if not self._defer_warnings: self._warning_check()
return r
def executemany(self, query, args):
"""Execute a multi-row query.
query -- string, query to execute on server
args
Sequence of sequences or mappings, parameters to use with
query.
Returns long integer rows affected, if any.
This method improves performance on multiple-row INSERT and
REPLACE. Otherwise it is equivalent to looping over args with
execute().
"""
del self.messages[:]
db = self._get_db()
if not args: return
if isinstance(query, unicode):
query = query.encode(db.unicode_literal.charset)
m = insert_values.search(query)
if not m:
r = 0
for a in args:
r = r + self.execute(query, a)
return r
p = m.start(1)
e = m.end(1)
qv = m.group(1)
try:
q = []
for a in args:
if isinstance(a, dict):
q.append(qv % dict((key, db.literal(item))
for key, item in a.iteritems()))
else:
q.append(qv % tuple([db.literal(item) for item in a]))
except TypeError, msg:
if msg.args[0] in ("not enough arguments for format string",
"not all arguments converted"):
self.errorhandler(self, ProgrammingError, msg.args[0])
else:
self.errorhandler(self, TypeError, msg)
except (SystemExit, KeyboardInterrupt):
raise
except:
exc, value, tb = sys.exc_info()
del tb
self.errorhandler(self, exc, value)
r = self._query('\n'.join([query[:p], ',\n'.join(q), query[e:]]))
if not self._defer_warnings: self._warning_check()
return r
def callproc(self, procname, args=()):
"""Execute stored procedure procname with args
procname -- string, name of procedure to execute on server
args -- Sequence of parameters to use with procedure
Returns the original args.
Compatibility warning: PEP-249 specifies that any modified
parameters must be returned. This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query. Since stored
procedures return zero or more result sets, there is no
reliable way to get at OUT or INOUT parameters via callproc.
The server variables are named @_procname_n, where procname
is the parameter above and n is the position of the parameter
(from zero). Once all result sets generated by the procedure
have been fetched, you can issue a SELECT @_procname_0, ...
query using .execute() to get any OUT or INOUT values.
Compatibility warning: The act of calling a stored procedure
itself creates an empty result set. This appears after any
result sets generated by the procedure. This is non-standard
behavior with respect to the DB-API. Be sure to use nextset()
to advance through all result sets; otherwise you may get
disconnected.
"""
db = self._get_db()
for index, arg in enumerate(args):
q = "SET @_%s_%d=%s" % (procname, index,
db.literal(arg))
if isinstance(q, unicode):
q = q.encode(db.unicode_literal.charset)
self._query(q)
self.nextset()
q = "CALL %s(%s)" % (procname,
','.join(['@_%s_%d' % (procname, i)
for i in range(len(args))]))
if type(q) is UnicodeType:
q = q.encode(db.unicode_literal.charset)
self._query(q)
self._executed = q
if not self._defer_warnings: self._warning_check()
return args
def _do_query(self, q):
db = self._get_db()
self._last_executed = q
db.query(q)
self._do_get_result()
return self.rowcount
def _query(self, q): return self._do_query(q)
def _fetch_row(self, size=1):
if not self._result:
return ()
return self._result.fetch_row(size, self._fetch_type)
def __iter__(self):
return iter(self.fetchone, None)
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
class CursorStoreResultMixIn(object):
"""This is a MixIn class which causes the entire result set to be
stored on the client side, i.e. it uses mysql_store_result(). If the
result set can be very large, consider adding a LIMIT clause to your
query, or using CursorUseResultMixIn instead."""
def _get_result(self): return self._get_db().store_result()
def _query(self, q):
rowcount = self._do_query(q)
self._post_get_result()
return rowcount
def _post_get_result(self):
self._rows = self._fetch_row(0)
self._result = None
def fetchone(self):
"""Fetches a single row from the cursor. None indicates that
no more rows are available."""
self._check_executed()
if self.rownumber >= len(self._rows): return None
result = self._rows[self.rownumber]
self.rownumber = self.rownumber+1
return result
def fetchmany(self, size=None):
"""Fetch up to size rows from the cursor. Result set may be smaller
than size. If size is not defined, cursor.arraysize is used."""
self._check_executed()
end = self.rownumber + (size or self.arraysize)
result = self._rows[self.rownumber:end]
self.rownumber = min(end, len(self._rows))
return result
def fetchall(self):
"""Fetchs all available rows from the cursor."""
self._check_executed()
if self.rownumber:
result = self._rows[self.rownumber:]
else:
result = self._rows
self.rownumber = len(self._rows)
return result
def scroll(self, value, mode='relative'):
"""Scroll the cursor in the result set to a new position according
to mode.
If mode is 'relative' (default), value is taken as offset to
the current position in the result set, if set to 'absolute',
value states an absolute target position."""
self._check_executed()
if mode == 'relative':
r = self.rownumber + value
elif mode == 'absolute':
r = value
else:
self.errorhandler(self, ProgrammingError,
"unknown scroll mode %s" % repr(mode))
if r < 0 or r >= len(self._rows):
self.errorhandler(self, IndexError, "out of range")
self.rownumber = r
def __iter__(self):
self._check_executed()
result = self.rownumber and self._rows[self.rownumber:] or self._rows
return iter(result)
class CursorUseResultMixIn(object):
"""This is a MixIn class which causes the result set to be stored
in the server and sent row-by-row to client side, i.e. it uses
mysql_use_result(). You MUST retrieve the entire result set and
close() the cursor before additional queries can be peformed on
the connection."""
_defer_warnings = True
def _get_result(self): return self._get_db().use_result()
def fetchone(self):
"""Fetches a single row from the cursor."""
self._check_executed()
r = self._fetch_row(1)
if not r:
self._warning_check()
return None
self.rownumber = self.rownumber + 1
return r[0]
def fetchmany(self, size=None):
"""Fetch up to size rows from the cursor. Result set may be smaller
than size. If size is not defined, cursor.arraysize is used."""
self._check_executed()
r = self._fetch_row(size or self.arraysize)
self.rownumber = self.rownumber + len(r)
if not r:
self._warning_check()
return r
def fetchall(self):
"""Fetchs all available rows from the cursor."""
self._check_executed()
r = self._fetch_row(0)
self.rownumber = self.rownumber + len(r)
self._warning_check()
return r
def __iter__(self):
return self
def next(self):
row = self.fetchone()
if row is None:
raise StopIteration
return row
class CursorTupleRowsMixIn(object):
"""This is a MixIn class that causes all rows to be returned as tuples,
which is the standard form required by DB API."""
_fetch_type = 0
class CursorDictRowsMixIn(object):
"""This is a MixIn class that causes all rows to be returned as
dictionaries. This is a non-standard feature."""
_fetch_type = 1
def fetchoneDict(self):
"""Fetch a single row as a dictionary. Deprecated:
Use fetchone() instead. Will be removed in 1.3."""
from warnings import warn
warn("fetchoneDict() is non-standard and will be removed in 1.3",
DeprecationWarning, 2)
return self.fetchone()
def fetchmanyDict(self, size=None):
"""Fetch several rows as a list of dictionaries. Deprecated:
Use fetchmany() instead. Will be removed in 1.3."""
from warnings import warn
warn("fetchmanyDict() is non-standard and will be removed in 1.3",
DeprecationWarning, 2)
return self.fetchmany(size)
def fetchallDict(self):
"""Fetch all available rows as a list of dictionaries. Deprecated:
Use fetchall() instead. Will be removed in 1.3."""
from warnings import warn
warn("fetchallDict() is non-standard and will be removed in 1.3",
DeprecationWarning, 2)
return self.fetchall()
class CursorOldDictRowsMixIn(CursorDictRowsMixIn):
"""This is a MixIn class that returns rows as dictionaries with
the same key convention as the old Mysqldb (MySQLmodule). Don't
use this."""
_fetch_type = 2
class Cursor(CursorStoreResultMixIn, CursorTupleRowsMixIn,
BaseCursor):
"""This is the standard Cursor class that returns rows as tuples
and stores the result set in the client."""
class DictCursor(CursorStoreResultMixIn, CursorDictRowsMixIn,
BaseCursor):
"""This is a Cursor class that returns rows as dictionaries and
stores the result set in the client."""
class SSCursor(CursorUseResultMixIn, CursorTupleRowsMixIn,
BaseCursor):
"""This is a Cursor class that returns rows as tuples and stores
the result set in the server."""
class SSDictCursor(CursorUseResultMixIn, CursorDictRowsMixIn,
BaseCursor):
"""This is a Cursor class that returns rows as dictionaries and
stores the result set in the server."""
| apache-2.0 |
jhayworth/config | .emacs.d/elpy/rpc-venv/local/lib/python2.7/encodings/iso8859_8.py | 593 | 11292 | """ Python Character Mapping Codec iso8859_8 generated from 'MAPPINGS/ISO8859/8859-8.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-8',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\ufffe'
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xd7' # 0xAA -> MULTIPLICATION SIGN
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xf7' # 0xBA -> DIVISION SIGN
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u2017' # 0xDF -> DOUBLE LOW LINE
u'\u05d0' # 0xE0 -> HEBREW LETTER ALEF
u'\u05d1' # 0xE1 -> HEBREW LETTER BET
u'\u05d2' # 0xE2 -> HEBREW LETTER GIMEL
u'\u05d3' # 0xE3 -> HEBREW LETTER DALET
u'\u05d4' # 0xE4 -> HEBREW LETTER HE
u'\u05d5' # 0xE5 -> HEBREW LETTER VAV
u'\u05d6' # 0xE6 -> HEBREW LETTER ZAYIN
u'\u05d7' # 0xE7 -> HEBREW LETTER HET
u'\u05d8' # 0xE8 -> HEBREW LETTER TET
u'\u05d9' # 0xE9 -> HEBREW LETTER YOD
u'\u05da' # 0xEA -> HEBREW LETTER FINAL KAF
u'\u05db' # 0xEB -> HEBREW LETTER KAF
u'\u05dc' # 0xEC -> HEBREW LETTER LAMED
u'\u05dd' # 0xED -> HEBREW LETTER FINAL MEM
u'\u05de' # 0xEE -> HEBREW LETTER MEM
u'\u05df' # 0xEF -> HEBREW LETTER FINAL NUN
u'\u05e0' # 0xF0 -> HEBREW LETTER NUN
u'\u05e1' # 0xF1 -> HEBREW LETTER SAMEKH
u'\u05e2' # 0xF2 -> HEBREW LETTER AYIN
u'\u05e3' # 0xF3 -> HEBREW LETTER FINAL PE
u'\u05e4' # 0xF4 -> HEBREW LETTER PE
u'\u05e5' # 0xF5 -> HEBREW LETTER FINAL TSADI
u'\u05e6' # 0xF6 -> HEBREW LETTER TSADI
u'\u05e7' # 0xF7 -> HEBREW LETTER QOF
u'\u05e8' # 0xF8 -> HEBREW LETTER RESH
u'\u05e9' # 0xF9 -> HEBREW LETTER SHIN
u'\u05ea' # 0xFA -> HEBREW LETTER TAV
u'\ufffe'
u'\ufffe'
u'\u200e' # 0xFD -> LEFT-TO-RIGHT MARK
u'\u200f' # 0xFE -> RIGHT-TO-LEFT MARK
u'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
mmazanec22/too-windy | env/lib/python3.5/site-packages/requests/compat.py | 134 | 1627 | # -*- coding: utf-8 -*-
"""
requests.compat
~~~~~~~~~~~~~~~
This module handles import compatibility issues between Python 2 and
Python 3.
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
try:
import simplejson as json
except (ImportError, SyntaxError):
# simplejson does not support Python 3.2, it throws a SyntaxError
# because of u'...' Unicode literals.
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
integer_types = (int, long)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
integer_types = (int,)
| gpl-3.0 |
skycucumber/xuemc | python/venv/lib/python2.7/site-packages/jinja2/bccache.py | 256 | 12289 | # -*- coding: utf-8 -*-
"""
jinja2.bccache
~~~~~~~~~~~~~~
This module implements the bytecode cache system Jinja is optionally
using. This is useful if you have very complex template situations and
the compiliation of all those templates slow down your application too
much.
Situations where this is useful are often forking web applications that
are initialized on the first request.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from os import path, listdir
import os
import stat
import sys
import errno
import marshal
import tempfile
import fnmatch
from hashlib import sha1
from jinja2.utils import open_if_exists
from jinja2._compat import BytesIO, pickle, PY2, text_type
# marshal works better on 3.x, one hack less required
if not PY2:
marshal_dump = marshal.dump
marshal_load = marshal.load
else:
def marshal_dump(code, f):
if isinstance(f, file):
marshal.dump(code, f)
else:
f.write(marshal.dumps(code))
def marshal_load(f):
if isinstance(f, file):
return marshal.load(f)
return marshal.loads(f.read())
bc_version = 2
# magic version used to only change with new jinja versions. With 2.6
# we change this to also take Python version changes into account. The
# reason for this is that Python tends to segfault if fed earlier bytecode
# versions because someone thought it would be a good idea to reuse opcodes
# or make Python incompatible with earlier versions.
bc_magic = 'j2'.encode('ascii') + \
pickle.dumps(bc_version, 2) + \
pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1])
class Bucket(object):
"""Buckets are used to store the bytecode for one template. It's created
and initialized by the bytecode cache and passed to the loading functions.
The buckets get an internal checksum from the cache assigned and use this
to automatically reject outdated cache material. Individual bytecode
cache subclasses don't have to care about cache invalidation.
"""
def __init__(self, environment, key, checksum):
self.environment = environment
self.key = key
self.checksum = checksum
self.reset()
def reset(self):
"""Resets the bucket (unloads the bytecode)."""
self.code = None
def load_bytecode(self, f):
"""Loads bytecode from a file or file like object."""
# make sure the magic header is correct
magic = f.read(len(bc_magic))
if magic != bc_magic:
self.reset()
return
# the source code of the file changed, we need to reload
checksum = pickle.load(f)
if self.checksum != checksum:
self.reset()
return
self.code = marshal_load(f)
def write_bytecode(self, f):
"""Dump the bytecode into the file or file like object passed."""
if self.code is None:
raise TypeError('can\'t write empty bucket')
f.write(bc_magic)
pickle.dump(self.checksum, f, 2)
marshal_dump(self.code, f)
def bytecode_from_string(self, string):
"""Load bytecode from a string."""
self.load_bytecode(BytesIO(string))
def bytecode_to_string(self):
"""Return the bytecode as string."""
out = BytesIO()
self.write_bytecode(out)
return out.getvalue()
class BytecodeCache(object):
"""To implement your own bytecode cache you have to subclass this class
and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
these methods are passed a :class:`~jinja2.bccache.Bucket`.
A very basic bytecode cache that saves the bytecode on the file system::
from os import path
class MyCache(BytecodeCache):
def __init__(self, directory):
self.directory = directory
def load_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
if path.exists(filename):
with open(filename, 'rb') as f:
bucket.load_bytecode(f)
def dump_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
with open(filename, 'wb') as f:
bucket.write_bytecode(f)
A more advanced version of a filesystem based bytecode cache is part of
Jinja2.
"""
def load_bytecode(self, bucket):
"""Subclasses have to override this method to load bytecode into a
bucket. If they are not able to find code in the cache for the
bucket, it must not do anything.
"""
raise NotImplementedError()
def dump_bytecode(self, bucket):
"""Subclasses have to override this method to write the bytecode
from a bucket back to the cache. If it unable to do so it must not
fail silently but raise an exception.
"""
raise NotImplementedError()
def clear(self):
"""Clears the cache. This method is not used by Jinja2 but should be
implemented to allow applications to clear the bytecode cache used
by a particular environment.
"""
def get_cache_key(self, name, filename=None):
"""Returns the unique hash key for this template name."""
hash = sha1(name.encode('utf-8'))
if filename is not None:
filename = '|' + filename
if isinstance(filename, text_type):
filename = filename.encode('utf-8')
hash.update(filename)
return hash.hexdigest()
def get_source_checksum(self, source):
"""Returns a checksum for the source."""
return sha1(source.encode('utf-8')).hexdigest()
def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
"""
key = self.get_cache_key(name, filename)
checksum = self.get_source_checksum(source)
bucket = Bucket(environment, key, checksum)
self.load_bytecode(bucket)
return bucket
def set_bucket(self, bucket):
"""Put the bucket into the cache."""
self.dump_bytecode(bucket)
class FileSystemBytecodeCache(BytecodeCache):
"""A bytecode cache that stores bytecode on the filesystem. It accepts
two arguments: The directory where the cache items are stored and a
pattern string that is used to build the filename.
If no directory is specified a default cache directory is selected. On
Windows the user's temp directory is used, on UNIX systems a directory
is created for the user in the system temp directory.
The pattern can be used to have multiple separate caches operate on the
same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
is replaced with the cache key.
>>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
This bytecode cache supports clearing of the cache using the clear method.
"""
def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
if directory is None:
directory = self._get_default_cache_dir()
self.directory = directory
self.pattern = pattern
def _get_default_cache_dir(self):
tmpdir = tempfile.gettempdir()
# On windows the temporary directory is used specific unless
# explicitly forced otherwise. We can just use that.
if os.name == 'nt':
return tmpdir
if not hasattr(os, 'getuid'):
raise RuntimeError('Cannot determine safe temp directory. You '
'need to explicitly provide one.')
dirname = '_jinja2-cache-%d' % os.getuid()
actual_dir = os.path.join(tmpdir, dirname)
try:
os.mkdir(actual_dir, stat.S_IRWXU) # 0o700
except OSError as e:
if e.errno != errno.EEXIST:
raise
actual_dir_stat = os.lstat(actual_dir)
if actual_dir_stat.st_uid != os.getuid() \
or not stat.S_ISDIR(actual_dir_stat.st_mode) \
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
raise RuntimeError('Temporary directory \'%s\' has an incorrect '
'owner, permissions, or type.' % actual_dir)
return actual_dir
def _get_cache_filename(self, bucket):
return path.join(self.directory, self.pattern % bucket.key)
def load_bytecode(self, bucket):
f = open_if_exists(self._get_cache_filename(bucket), 'rb')
if f is not None:
try:
bucket.load_bytecode(f)
finally:
f.close()
def dump_bytecode(self, bucket):
f = open(self._get_cache_filename(bucket), 'wb')
try:
bucket.write_bytecode(f)
finally:
f.close()
def clear(self):
# imported lazily here because google app-engine doesn't support
# write access on the file system and the function does not exist
# normally.
from os import remove
files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
for filename in files:
try:
remove(path.join(self.directory, filename))
except OSError:
pass
class MemcachedBytecodeCache(BytecodeCache):
"""This class implements a bytecode cache that uses a memcache cache for
storing the information. It does not enforce a specific memcache library
(tummy's memcache or cmemcache) but will accept any class that provides
the minimal interface required.
Libraries compatible with this class:
- `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache
- `python-memcached <http://www.tummy.com/Community/software/python-memcached/>`_
- `cmemcache <http://gijsbert.org/cmemcache/>`_
(Unfortunately the django cache interface is not compatible because it
does not support storing binary data, only unicode. You can however pass
the underlying cache client to the bytecode cache which is available
as `django.core.cache.cache._client`.)
The minimal interface for the client passed to the constructor is this:
.. class:: MinimalClientInterface
.. method:: set(key, value[, timeout])
Stores the bytecode in the cache. `value` is a string and
`timeout` the timeout of the key. If timeout is not provided
a default timeout or no timeout should be assumed, if it's
provided it's an integer with the number of seconds the cache
item should exist.
.. method:: get(key)
Returns the value for the cache key. If the item does not
exist in the cache the return value must be `None`.
The other arguments to the constructor are the prefix for all keys that
is added before the actual cache key and the timeout for the bytecode in
the cache system. We recommend a high (or no) timeout.
This bytecode cache does not support clearing of used items in the cache.
The clear method is a no-operation function.
.. versionadded:: 2.7
Added support for ignoring memcache errors through the
`ignore_memcache_errors` parameter.
"""
def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
ignore_memcache_errors=True):
self.client = client
self.prefix = prefix
self.timeout = timeout
self.ignore_memcache_errors = ignore_memcache_errors
def load_bytecode(self, bucket):
try:
code = self.client.get(self.prefix + bucket.key)
except Exception:
if not self.ignore_memcache_errors:
raise
code = None
if code is not None:
bucket.bytecode_from_string(code)
def dump_bytecode(self, bucket):
args = (self.prefix + bucket.key, bucket.bytecode_to_string())
if self.timeout is not None:
args += (self.timeout,)
try:
self.client.set(*args)
except Exception:
if not self.ignore_memcache_errors:
raise
| gpl-2.0 |
pechatny/basic-flask-app | src/app/flask/lib/python2.7/site-packages/setuptools/command/rotate.py | 285 | 2062 | import distutils, os
from setuptools import Command
from setuptools.compat import basestring
from distutils.util import convert_path
from distutils import log
from distutils.errors import *
class rotate(Command):
"""Delete older distributions"""
description = "delete older distributions, keeping N newest files"
user_options = [
('match=', 'm', "patterns to match (required)"),
('dist-dir=', 'd', "directory where the distributions are"),
('keep=', 'k', "number of matching distributions to keep"),
]
boolean_options = []
def initialize_options(self):
self.match = None
self.dist_dir = None
self.keep = None
def finalize_options(self):
if self.match is None:
raise DistutilsOptionError(
"Must specify one or more (comma-separated) match patterns "
"(e.g. '.zip' or '.egg')"
)
if self.keep is None:
raise DistutilsOptionError("Must specify number of files to keep")
try:
self.keep = int(self.keep)
except ValueError:
raise DistutilsOptionError("--keep must be an integer")
if isinstance(self.match, basestring):
self.match = [
convert_path(p.strip()) for p in self.match.split(',')
]
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
def run(self):
self.run_command("egg_info")
from glob import glob
for pattern in self.match:
pattern = self.distribution.get_name()+'*'+pattern
files = glob(os.path.join(self.dist_dir,pattern))
files = [(os.path.getmtime(f),f) for f in files]
files.sort()
files.reverse()
log.info("%d file(s) matching %s", len(files), pattern)
files = files[self.keep:]
for (t,f) in files:
log.info("Deleting %s", f)
if not self.dry_run:
os.unlink(f)
| mit |
lasote/conan | conans/client/cmd/new.py | 1 | 9249 | import re
from conans.errors import ConanException
from conans.model.ref import ConanFileReference
from conans.client.cmd.new_ci import ci_get_files
conanfile = """from conans import ConanFile, CMake, tools
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
license = "<Put the package license here>"
url = "<Package recipe repository url here, for issues about the package>"
description = "<Description of {package_name} here>"
settings = "os", "compiler", "build_type", "arch"
options = {{"shared": [True, False]}}
default_options = "shared=False"
generators = "cmake"
def source(self):
self.run("git clone https://github.com/memsharded/hello.git")
self.run("cd hello && git checkout static_shared")
# This small hack might be useful to guarantee proper /MT /MD linkage in MSVC
# if the packaged project doesn't have variables to set it properly
tools.replace_in_file("hello/CMakeLists.txt", "PROJECT(MyHello)", '''PROJECT(MyHello)
include(${{CMAKE_BINARY_DIR}}/conanbuildinfo.cmake)
conan_basic_setup()''')
def build(self):
cmake = CMake(self)
cmake.configure(source_dir="%s/hello" % self.source_folder)
cmake.build()
# Explicit way:
# self.run('cmake %s/hello %s' % (self.source_folder, cmake.command_line))
# self.run("cmake --build . %s" % cmake.build_config)
def package(self):
self.copy("*.h", dst="include", src="hello")
self.copy("*hello.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["hello"]
"""
conanfile_bare = """from conans import ConanFile
from conans import tools
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
settings = "os", "compiler", "build_type", "arch"
description = "<Description of {package_name} here>"
url = "None"
license = "None"
def package(self):
self.copy("*")
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
"""
conanfile_sources = """from conans import ConanFile, CMake
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
license = "<Put the package license here>"
url = "<Package recipe repository url here, for issues about the package>"
description = "<Description of {package_name} here>"
settings = "os", "compiler", "build_type", "arch"
options = {{"shared": [True, False]}}
default_options = "shared=False"
generators = "cmake"
exports_sources = "src/*"
def build(self):
cmake = CMake(self)
cmake.configure(source_dir="%s/src" % self.source_folder)
cmake.build()
# Explicit way:
# self.run('cmake %s/src %s' % (self.source_folder, cmake.command_line))
# self.run("cmake --build . %s" % cmake.build_config)
def package(self):
self.copy("*.h", dst="include", src="src")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.dylib*", dst="lib", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["hello"]
"""
conanfile_header = """from conans import ConanFile, tools
import os
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
license = "<Put the package license here>"
url = "<Package recipe repository url here, for issues about the package>"
description = "<Description of {package_name} here>"
# No settings/options are necessary, this is header only
def source(self):
'''retrieval of the source code here. Remember you can also put the code in the folder and
use exports instead of retrieving it with this source() method
'''
#self.run("git clone ...") or
#tools.download("url", "file.zip")
#tools.unzip("file.zip" )
def package(self):
self.copy("*.h", "include")
"""
test_conanfile = """from conans import ConanFile, CMake
import os
class {package_name}TestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
# Current dir is "test_package/build/<build_id>" and CMakeLists.txt is in "test_package"
cmake.configure(source_dir=self.conanfile_directory, build_dir="./")
cmake.build()
def imports(self):
self.copy("*.dll", dst="bin", src="bin")
self.copy("*.dylib*", dst="bin", src="lib")
self.copy('*.so*', dst='bin', src='lib')
def test(self):
os.chdir("bin")
self.run(".%sexample" % os.sep)
"""
test_cmake = """project(PackageTest CXX)
cmake_minimum_required(VERSION 2.8.12)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
add_executable(example example.cpp)
target_link_libraries(example ${CONAN_LIBS})
# CTest is a testing tool that can be used to test your project.
# enable_testing()
# add_test(NAME example
# WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/bin
# COMMAND example)
"""
test_main = """#include <iostream>
#include "hello.h"
int main() {
hello();
}
"""
hello_h = """#pragma once
#ifdef WIN32
#define HELLO_EXPORT __declspec(dllexport)
#else
#define HELLO_EXPORT
#endif
HELLO_EXPORT void hello();
"""
hello_cpp = """#include <iostream>
#include "hello.h"
void hello(){
#ifdef NDEBUG
std::cout << "Hello World Release!" <<std::endl;
#else
std::cout << "Hello World Debug!" <<std::endl;
#endif
}
"""
cmake = """project(MyHello CXX)
cmake_minimum_required(VERSION 2.8)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
add_library(hello hello.cpp)
"""
gitignore_template = """
*.pyc
test_package/build
"""
def cmd_new(ref, header=False, pure_c=False, test=False, exports_sources=False, bare=False,
visual_versions=None, linux_gcc_versions=None, linux_clang_versions=None, osx_clang_versions=None,
shared=None, upload_url=None, gitignore=None, gitlab_gcc_versions=None, gitlab_clang_versions=None):
try:
tokens = ref.split("@")
name, version = tokens[0].split("/")
if len(tokens) == 2:
user, channel = tokens[1].split("/")
else:
user, channel = "user", "channel"
pattern = re.compile('[\W_]+')
package_name = pattern.sub('', name).capitalize()
except ValueError:
raise ConanException("Bad parameter, please use full package name,"
"e.g: MyLib/1.2.3@user/testing")
# Validate it is a valid reference
ConanFileReference(name, version, user, channel)
if header and exports_sources:
raise ConanException("'header' and 'sources' are incompatible options")
if pure_c and (header or exports_sources):
raise ConanException("'pure_c' is incompatible with 'header' and 'sources'")
if bare and (header or exports_sources):
raise ConanException("'bare' is incompatible with 'header' and 'sources'")
if header:
files = {"conanfile.py": conanfile_header.format(name=name, version=version,
package_name=package_name)}
elif exports_sources:
files = {"conanfile.py": conanfile_sources.format(name=name, version=version,
package_name=package_name),
"src/hello.cpp": hello_cpp,
"src/hello.h": hello_h,
"src/CMakeLists.txt": cmake}
elif bare:
files = {"conanfile.py": conanfile_bare.format(name=name, version=version,
package_name=package_name)}
else:
files = {"conanfile.py": conanfile.format(name=name, version=version,
package_name=package_name)}
if pure_c:
config = "\n def configure(self):\n del self.settings.compiler.libcxx"
files["conanfile.py"] = files["conanfile.py"] + config
if test:
files["test_package/conanfile.py"] = test_conanfile.format(name=name, version=version,
user=user, channel=channel,
package_name=package_name)
files["test_package/CMakeLists.txt"] = test_cmake
files["test_package/example.cpp"] = test_main
if gitignore:
files[".gitignore"] = gitignore_template
files.update(ci_get_files(name, version, user, channel, visual_versions,
linux_gcc_versions, linux_clang_versions,
osx_clang_versions, shared, upload_url,
gitlab_gcc_versions, gitlab_clang_versions))
return files
| mit |
kerr-huang/SL4A | python/src/Lib/test/test_md5.py | 194 | 1790 | # Testing md5 module
import warnings
warnings.filterwarnings("ignore", "the md5 module is deprecated.*",
DeprecationWarning)
import unittest
from md5 import md5
from test import test_support
def hexstr(s):
import string
h = string.hexdigits
r = ''
for c in s:
i = ord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r
class MD5_Test(unittest.TestCase):
def md5test(self, s, expected):
self.assertEqual(hexstr(md5(s).digest()), expected)
self.assertEqual(md5(s).hexdigest(), expected)
def test_basics(self):
eq = self.md5test
eq('', 'd41d8cd98f00b204e9800998ecf8427e')
eq('a', '0cc175b9c0f1b6a831c399e269772661')
eq('abc', '900150983cd24fb0d6963f7d28e17f72')
eq('message digest', 'f96b697d7cb7938d525a2f31aaf161d0')
eq('abcdefghijklmnopqrstuvwxyz', 'c3fcd3d76192e4007dfb496cca67e13b')
eq('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'd174ab98d277d9f5a5611c2c9f419d9f')
eq('12345678901234567890123456789012345678901234567890123456789012345678901234567890',
'57edf4a22be3c955ac49da2e2107b67a')
def test_hexdigest(self):
# hexdigest is new with Python 2.0
m = md5('testing the hexdigest method')
h = m.hexdigest()
self.assertEqual(hexstr(m.digest()), h)
def test_large_update(self):
aas = 'a' * 64
bees = 'b' * 64
cees = 'c' * 64
m1 = md5()
m1.update(aas)
m1.update(bees)
m1.update(cees)
m2 = md5()
m2.update(aas + bees + cees)
self.assertEqual(m1.digest(), m2.digest())
def test_main():
test_support.run_unittest(MD5_Test)
if __name__ == '__main__':
test_main()
| apache-2.0 |
grschafer/BejeweledBot | train/agent.py | 1 | 2406 | from pybrain.rl.agents.logging import LoggingAgent
from pybrain.rl.agents.learning import LearningAgent
from scipy import where
from random import choice
class BejeweledAgent(LearningAgent):
def getAction(self):
# get best action for every state observation
# overlay all action values for every state observation, pick best
LoggingAgent.getAction(self)
# for each color, get best action, then pick highest-value action
# among those actions
actions = []
values = []
# TODO: why are same values printed many times in a row here?
#print '========== in agent =========='
#print 'states:', [[i] for i in self.lastobs.flatten()]
for state in self.lastobs:
#print 'state:', state
actions.append(self.module.activate(state))
values.append(self.module.lastMaxActionValue)
#self.module.printState(state)
#print ' best:', actions[-1], 'value:', values[-1]
actionIdx = where(values == max(values))[0]
ch = choice(actionIdx)
self.lastaction = actions[ch]
self.bestState = self.lastobs[ch]
#print 'assigning reward to state', self.bestState
#print 'chosen action:', self.lastaction, 'value:', max(values)
# add a chance to pick a random other action
if self.learning:
self.lastaction = self.learner.explore(self.lastobs, self.lastaction)
#print 'after explorer:', self.lastaction
#print '============= end ============'
return self.lastaction
def giveReward(self, r):
"""Step 3: store observation, action and reward in the history dataset. """
# step 3: assume that state and action have been set
assert self.lastobs != None
assert self.lastaction != None
assert self.lastreward == None
self.lastreward = r
# store state, action and reward in dataset if logging is enabled
if self.logging:
# TODO: assigning reward to only best estimate for now
#for state in self.lastobs:
# TODO: assign reward to state correctly? NO because we're in
# the learner -- learning will be slower though, because of
# false positives for every obs
self.history.addSample(self.bestState, self.lastaction, self.lastreward)
| mit |
FRC-Team-3140/north-american-happiness | lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/big5prober.py | 206 | 1726 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
| mit |
nanditav/15712-TensorFlow | tensorflow/python/kernel_tests/dynamic_stitch_op_test.py | 21 | 5673 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.dynamic_stitch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class DynamicStitchTest(tf.test.TestCase):
def testScalar(self):
with self.test_session():
indices = [tf.constant(0), tf.constant(1)]
data = [tf.constant(40), tf.constant(60)]
for step in -1, 1:
stitched_t = tf.dynamic_stitch(indices[::step], data)
stitched_val = stitched_t.eval()
self.assertAllEqual([40, 60][::step], stitched_val)
# Dimension 0 is determined by the max index in indices, so we
# can only infer that the output is a vector of some unknown
# length.
self.assertEqual([None], stitched_t.get_shape().as_list())
def testSimpleOneDimensional(self):
with self.test_session():
indices = [tf.constant([0, 4, 7]),
tf.constant([1, 6, 2, 3, 5])]
data = [tf.constant([0, 40, 70]),
tf.constant([10, 60, 20, 30, 50])]
stitched_t = tf.dynamic_stitch(indices, data)
stitched_val = stitched_t.eval()
self.assertAllEqual([0, 10, 20, 30, 40, 50, 60, 70], stitched_val)
# Dimension 0 is determined by the max index in indices, so we
# can only infer that the output is a vector of some unknown
# length.
self.assertEqual([None], stitched_t.get_shape().as_list())
def testOneListOneDimensional(self):
with self.test_session():
indices = [tf.constant([1, 6, 2, 3, 5, 0, 4, 7])]
data = [tf.constant([10, 60, 20, 30, 50, 0, 40, 70])]
stitched_t = tf.dynamic_stitch(indices, data)
stitched_val = stitched_t.eval()
self.assertAllEqual([0, 10, 20, 30, 40, 50, 60, 70], stitched_val)
# Dimension 0 is determined by the max index in indices, so we
# can only infer that the output is a vector of some unknown
# length.
self.assertEqual([None], stitched_t.get_shape().as_list())
def testSimpleTwoDimensional(self):
with self.test_session():
indices = [tf.constant([0, 4, 7]),
tf.constant([1, 6]),
tf.constant([2, 3, 5])]
data = [tf.constant([[0, 1], [40, 41], [70, 71]]),
tf.constant([[10, 11], [60, 61]]),
tf.constant([[20, 21], [30, 31], [50, 51]])]
stitched_t = tf.dynamic_stitch(indices, data)
stitched_val = stitched_t.eval()
self.assertAllEqual(
[[0, 1], [10, 11], [20, 21], [30, 31],
[40, 41], [50, 51], [60, 61], [70, 71]], stitched_val)
# Dimension 0 is determined by the max index in indices, so we
# can only infer that the output is a matrix with 2 columns and
# some unknown number of rows.
self.assertEqual([None, 2], stitched_t.get_shape().as_list())
def testHigherRank(self):
with self.test_session() as sess:
indices = [tf.constant(6), tf.constant([4, 1]),
tf.constant([[5, 2], [0, 3]])]
data = [tf.constant([61, 62]), tf.constant([[41, 42], [11, 12]]),
tf.constant([[[51, 52], [21, 22]], [[1, 2], [31, 32]]])]
stitched_t = tf.dynamic_stitch(indices, data)
stitched_val = stitched_t.eval()
correct = 10 * np.arange(7)[:, None] + [1, 2]
self.assertAllEqual(correct, stitched_val)
self.assertEqual([None, 2], stitched_t.get_shape().as_list())
# Test gradients
stitched_grad = 7 * stitched_val
grads = tf.gradients(stitched_t, indices + data, stitched_grad)
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
for datum, grad in zip(data, sess.run(grads[3:])):
self.assertAllEqual(7 * datum.eval(), grad)
def testErrorIndicesMultiDimensional(self):
indices = [tf.constant([0, 4, 7]),
tf.constant([[1, 6, 2, 3, 5]])]
data = [tf.constant([[0, 40, 70]]),
tf.constant([10, 60, 20, 30, 50])]
with self.assertRaises(ValueError):
tf.dynamic_stitch(indices, data)
def testErrorDataNumDimsMismatch(self):
indices = [tf.constant([0, 4, 7]),
tf.constant([1, 6, 2, 3, 5])]
data = [tf.constant([0, 40, 70]),
tf.constant([[10, 60, 20, 30, 50]])]
with self.assertRaises(ValueError):
tf.dynamic_stitch(indices, data)
def testErrorDataDimSizeMismatch(self):
indices = [tf.constant([0, 4, 5]),
tf.constant([1, 6, 2, 3])]
data = [tf.constant([[0], [40], [70]]),
tf.constant([[10, 11], [60, 61], [20, 21], [30, 31]])]
with self.assertRaises(ValueError):
tf.dynamic_stitch(indices, data)
def testErrorDataAndIndicesSizeMismatch(self):
indices = [tf.constant([0, 4, 7]),
tf.constant([1, 6, 2, 3, 5])]
data = [tf.constant([0, 40, 70]),
tf.constant([10, 60, 20, 30])]
with self.assertRaises(ValueError):
tf.dynamic_stitch(indices, data)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
elssar/calibre | src/calibre/ebooks/metadata/sources/openlibrary.py | 14 | 1264 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.ebooks.metadata.sources.base import Source
class OpenLibrary(Source):
name = 'Open Library'
description = _('Downloads covers from The Open Library')
capabilities = frozenset(['cover'])
OPENLIBRARY = 'http://covers.openlibrary.org/b/isbn/%s-L.jpg?default=false'
def download_cover(self, log, result_queue, abort,
title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):
if 'isbn' not in identifiers:
return
isbn = identifiers['isbn']
br = self.browser
try:
ans = br.open_novisit(self.OPENLIBRARY%isbn, timeout=timeout).read()
result_queue.put((self, ans))
except Exception as e:
if callable(getattr(e, 'getcode', None)) and e.getcode() == 404:
log.error('No cover for ISBN: %r found'%isbn)
else:
log.exception('Failed to download cover for ISBN:', isbn)
| gpl-3.0 |
vivekanand1101/fas | plugins/fas-plugin-show/fas_show/help.py | 11 | 2308 | # -*- coding: utf-8 -*-
#
# Copyright © 2008 Ricky Zhou
# Copyright © 2008 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details. You should have
# received a copy of the GNU General Public License along with this program;
# if not, write to the Free Software Foundation, Inc., 51 Franklin Street,
# Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks that are
# incorporated in the source code or documentation are not subject to the GNU
# General Public License and may only be used or replicated with the express
# permission of Red Hat, Inc.
#
# Author(s): Ricky Zhou <ricky@fedoraproject.org>
# Mike McGrath <mmcgrath@redhat.com>
# Yaakov Nemoy <ynemoy@redhat.com>
#
import turbogears
from turbogears import controllers, expose
class Help(controllers.Controller):
help = dict(none=[_('Error'), _('<p>We could not find that help item</p>')],
show_name=[_('Show Name'), _('''<p>A short name to identify the show, perhaps a code name. Include a date or unique number.</p>''')],
show_display_name=[_('Show Display Name'), _('''<p>A longer user readable name to describe the show. Preferably the canonical name provided by the event organizers</p>''')],
show_owner=[_('Show Owner'), _('''<p>The user name of the owner of the event</p>''')],
group=[_('Show Group'),_('''<p>The name of the group of the participants in the event</p>''')],
description=[_('Description'), _('''<p>Be descriptive</p>''')])
def __init__(self):
'''Create a JsonRequest Controller.'''
@expose(template="fas.templates.help")
def get_help(self, helpid='none'):
try:
helpItem = self.help[helpid]
except KeyError:
return dict(title=_('Error'), helpItem=[_('Error'), _('<p>We could not find that help item</p>')])
return dict(help=helpItem)
| gpl-2.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pytz/lazy.py | 514 | 5263 | from threading import RLock
try:
from UserDict import DictMixin
except ImportError:
from collections import Mapping as DictMixin
# With lazy loading, we might end up with multiple threads triggering
# it at the same time. We need a lock.
_fill_lock = RLock()
class LazyDict(DictMixin):
"""Dictionary populated on first use."""
data = None
def __getitem__(self, key):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return self.data[key.upper()]
def __contains__(self, key):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return key in self.data
def __iter__(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return iter(self.data)
def __len__(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return len(self.data)
def keys(self):
if self.data is None:
_fill_lock.acquire()
try:
if self.data is None:
self._fill()
finally:
_fill_lock.release()
return self.data.keys()
class LazyList(list):
"""List populated on first use."""
_props = [
'__str__', '__repr__', '__unicode__',
'__hash__', '__sizeof__', '__cmp__',
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
'append', 'count', 'index', 'extend', 'insert', 'pop', 'remove',
'reverse', 'sort', '__add__', '__radd__', '__iadd__', '__mul__',
'__rmul__', '__imul__', '__contains__', '__len__', '__nonzero__',
'__getitem__', '__setitem__', '__delitem__', '__iter__',
'__reversed__', '__getslice__', '__setslice__', '__delslice__']
def __new__(cls, fill_iter=None):
if fill_iter is None:
return list()
# We need a new class as we will be dynamically messing with its
# methods.
class LazyList(list):
pass
fill_iter = [fill_iter]
def lazy(name):
def _lazy(self, *args, **kw):
_fill_lock.acquire()
try:
if len(fill_iter) > 0:
list.extend(self, fill_iter.pop())
for method_name in cls._props:
delattr(LazyList, method_name)
finally:
_fill_lock.release()
return getattr(list, name)(self, *args, **kw)
return _lazy
for name in cls._props:
setattr(LazyList, name, lazy(name))
new_list = LazyList()
return new_list
# Not all versions of Python declare the same magic methods.
# Filter out properties that don't exist in this version of Python
# from the list.
LazyList._props = [prop for prop in LazyList._props if hasattr(list, prop)]
class LazySet(set):
"""Set populated on first use."""
_props = (
'__str__', '__repr__', '__unicode__',
'__hash__', '__sizeof__', '__cmp__',
'__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__',
'__contains__', '__len__', '__nonzero__',
'__getitem__', '__setitem__', '__delitem__', '__iter__',
'__sub__', '__and__', '__xor__', '__or__',
'__rsub__', '__rand__', '__rxor__', '__ror__',
'__isub__', '__iand__', '__ixor__', '__ior__',
'add', 'clear', 'copy', 'difference', 'difference_update',
'discard', 'intersection', 'intersection_update', 'isdisjoint',
'issubset', 'issuperset', 'pop', 'remove',
'symmetric_difference', 'symmetric_difference_update',
'union', 'update')
def __new__(cls, fill_iter=None):
if fill_iter is None:
return set()
class LazySet(set):
pass
fill_iter = [fill_iter]
def lazy(name):
def _lazy(self, *args, **kw):
_fill_lock.acquire()
try:
if len(fill_iter) > 0:
for i in fill_iter.pop():
set.add(self, i)
for method_name in cls._props:
delattr(LazySet, method_name)
finally:
_fill_lock.release()
return getattr(set, name)(self, *args, **kw)
return _lazy
for name in cls._props:
setattr(LazySet, name, lazy(name))
new_set = LazySet()
return new_set
# Not all versions of Python declare the same magic methods.
# Filter out properties that don't exist in this version of Python
# from the list.
LazySet._props = [prop for prop in LazySet._props if hasattr(set, prop)]
| mit |
mirswamp/java-cli | scripts/func_tests.py | 2 | 43447 | import os.path as osp
import time
import unittest
from confreader import read_conf_into_dict
from org.apache.log4j import BasicConfigurator
from org.apache.log4j.varia import NullAppender
from java.lang import NullPointerException
from org.continuousassurance.swamp.cli import SwampApiWrapper
from org.continuousassurance.swamp.cli.exceptions import InvalidIdentifierException
from org.continuousassurance.swamp.cli.util import AssessmentStatus
from org.continuousassurance.swamp.session import HTTPException
from edu.uiuc.ncsa.security.core.exceptions import GeneralException
class TestSwampApiWrapper(unittest.TestCase):
@classmethod
def setUpClass(cls):
try:
config_file = osp.join(osp.dirname(__file__),
'resources/userinfo.properties')
user_conf = read_conf_into_dict(config_file)
except IOError as err:
print('''Please create "%s" with
username=<swamp-username>
password=<swamp-password>
project=<test-project-uuid>
hostname=<swamp-hostname>
''' % config_file)
raise err
TestSwampApiWrapper.USERNAME = user_conf['username']
TestSwampApiWrapper.PASSWORD = user_conf['password']
# Please enter your default project
TestSwampApiWrapper.PROJECT = user_conf['project']
#TestSwampApiWrapper.HOST = 'https://dt.cosalab.org'
TestSwampApiWrapper.HOST = user_conf.get('hostname',
'https://www.mir-swamp.org')
class TestLogin(TestSwampApiWrapper):
@classmethod
def setUpClass(cls):
super(TestLogin, cls).setUpClass()
def setUp(self):
self.api_wrapper = SwampApiWrapper()
def test_login(self):
self.assertNotEqual(self.api_wrapper.login(TestSwampApiWrapper.USERNAME,
TestSwampApiWrapper.PASSWORD,
TestSwampApiWrapper.HOST),
None, "Login Failed")
def test_login_incorrect(self):
self.assertRaises(HTTPException, self.api_wrapper.login,
TestSwampApiWrapper.USERNAME,
TestSwampApiWrapper.PASSWORD[:-1],
TestSwampApiWrapper.HOST)
@unittest.expectedFailure
def test_login_incorrect2(self):
self.assertRaises(java.net.UnknownHostException,
self.api_wrapper.login,
"bogus-swamp-user",
"bogus-swamp-password",
'https://error-invalid-expect-fail.cosalab.org/')
class TestProjects(TestSwampApiWrapper):
@classmethod
def setUpClass(cls):
super(TestProjects, cls).setUpClass()
def setUp(self):
self.api_wrapper = SwampApiWrapper()
self.api_wrapper.login(TestSwampApiWrapper.USERNAME,
TestSwampApiWrapper.PASSWORD,
TestSwampApiWrapper.HOST)
def test_get_projects(self):
proj_list = self.api_wrapper.getProjectsList()
self.assertIsNotNone(proj_list)
def test_get_projects_fail1(self):
self.api_wrapper.logout()
self.assertRaises(NullPointerException, self.api_wrapper.getProjectsList)
class TestTools(TestSwampApiWrapper):
@classmethod
def setUpClass(cls):
super(TestTools, cls).setUpClass()
def setUp(self):
self.api_wrapper = SwampApiWrapper()
self.api_wrapper.login(TestSwampApiWrapper.USERNAME,
TestSwampApiWrapper.PASSWORD,
TestSwampApiWrapper.HOST)
@unittest.skip('Does not work because it is protected')
def test_get_tools(self):
tool_map = self.api_wrapper.getAllTools(TestSwampApiWrapper.PROJECT)
self.assertIsNotNone(tool_map)
for tool in tool_map.values():
print("%-21s, %-38s, %s\n", tool.getName(),
tool.getSupportedPkgTypes(),
tool.getSupportedPlatforms())
def test_get_tools_supported(self):
tool_list = self.api_wrapper.getTools("C/C++", TestSwampApiWrapper.PROJECT)
self.assertIsNotNone(tool_list)
class TestPlatforms(TestSwampApiWrapper):
@classmethod
def setUpClass(cls):
super(TestPlatforms, cls).setUpClass()
def setUp(self):
self.api_wrapper = SwampApiWrapper()
self.api_wrapper.login(TestSwampApiWrapper.USERNAME,
TestSwampApiWrapper.PASSWORD,
TestSwampApiWrapper.HOST)
def test_get_platforms(self):
plat_map = self.api_wrapper.getAllPlatforms()
self.assertIsNotNone(plat_map)
def test_get_plats_supported(self):
tool = self.api_wrapper.getToolFromName('Findbugs',
TestSwampApiWrapper.PROJECT)
plat_list = self.api_wrapper.getSupportedPlatformVersions(tool.getIdentifierString(),
TestSwampApiWrapper.PROJECT)
for plat in plat_list:
print(plat)
self.assertIsNotNone(plat_list)
class TestUpload(TestSwampApiWrapper):
@classmethod
def setUpClass(cls):
super(TestUpload, cls).setUpClass()
cls.PKG_LIST = list()
cls.api_wrapper = SwampApiWrapper()
cls.api_wrapper.login(TestSwampApiWrapper.USERNAME,
TestSwampApiWrapper.PASSWORD,
TestSwampApiWrapper.HOST)
@classmethod
def tearDownClass(cls):
super(TestUpload, cls).setUpClass()
# for pkg_uuid in cls.PKG_LIST:
# pkg_ver = cls.api_wrapper.getPackageVersion(pkg_uuid,
# TestSwampApiWrapper.PROJECT)
# cls.api_wrapper.deletePackage(pkg_ver.getPackageThing().getIdentifierString(),
# TestSwampApiWrapper.PROJECT)
def test_get_pkg_types(self):
pkg_types = TestUpload.api_wrapper.getPackageTypesList()
self.assertIsNotNone(pkg_types)
def test_get_pkg_list(self):
pkg_list = TestUpload.api_wrapper.getPackagesList(None)
self.assertIsNotNone(pkg_list)
def test_get_pkg_list_from_project(self):
pkg_list = TestUpload.api_wrapper.getPackagesList(TestSwampApiWrapper.PROJECT)
self.assertIsNotNone(pkg_list)
def test_upload_new_pkg1(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/swamp-gradle-example-1.0/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/swamp-gradle-example-1.0/swamp-gradle-example-1.0.zip')
pkg_uuid = TestUpload.api_wrapper.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestUpload.PKG_LIST.append(pkg_uuid)
def test_upload_new_pkg2(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/2048-android-1.8/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/2048-android-1.8/v1.8.zip')
pkg_uuid = TestUpload.api_wrapper.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestUpload.PKG_LIST.append(pkg_uuid)
def test_upload_new_pkg3(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/beautifulsoup4-4.3.2/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/beautifulsoup4-4.3.2/beautifulsoup4-4.3.2.tar.gz')
pkg_uuid = TestUpload.api_wrapper.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestUpload.PKG_LIST.append(pkg_uuid)
def test_upload_new_pkg4(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/capistrano-3.4.0/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/capistrano-3.4.0/capistrano-3.4.0.gem')
pkg_uuid = TestUpload.api_wrapper.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestUpload.PKG_LIST.append(pkg_uuid)
def test_upload_new_pkg5(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/java-cli-1.3.0/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/java-cli-1.3.0/java-cli-1.1.zip')
pkg_uuid = TestUpload.api_wrapper.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestUpload.PKG_LIST.append(pkg_uuid)
def test_upload_new_pkg6(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/lighttpd-1.4.45/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/lighttpd-1.4.45/lighttpd-1.4.45.tar.xz')
pkg_uuid = TestUpload.api_wrapper.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestUpload.PKG_LIST.append(pkg_uuid)
def test_upload_new_pkg7(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/moodle-3.1.1/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/moodle-3.1.1/moodle-3.1.1.zip')
pkg_uuid = TestUpload.api_wrapper.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestUpload.PKG_LIST.append(pkg_uuid)
def test_upload_new_pkg8(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/pylxc-0.0.3/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/pylxc-0.0.3/pylxc-0.0.3.tar.gz')
pkg_uuid = TestUpload.api_wrapper.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestUpload.PKG_LIST.append(pkg_uuid)
def test_upload_new_pkg9(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/railsgoat-9052b4fcf0/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/railsgoat-9052b4fcf0/railsgoat-9052b4fcf0.zip')
pkg_deps = osp.join(osp.dirname(__file__),
'resources/test_packages/railsgoat-9052b4fcf0/pkg-os-dependencies.conf')
pkg_deps_dict = {k.partition('dependencies-')[2]: v
for k,v in read_conf_into_dict(pkg_deps).items()
if k.startswith('dependencies-')}
pkg_uuid = TestUpload.api_wrapper.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
pkg_deps_dict,
True)
self.assertIsNotNone(pkg_uuid)
TestUpload.PKG_LIST.append(pkg_uuid)
def test_upload_new_pkg10(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/sandrorat-apk-unknown/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/sandrorat-apk-unknown/SandroRat.apk')
pkg_uuid = TestUpload.api_wrapper.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestUpload.PKG_LIST.append(pkg_uuid)
def test_upload_new_pkg11(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/scarf-io-1.0/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/scarf-io-1.0/scarf-io.zip')
pkg_uuid = TestUpload.api_wrapper.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestUpload.PKG_LIST.append(pkg_uuid)
def test_upload_new_pkg12(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/sinatra-starter-2ad9cba672/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/sinatra-starter-2ad9cba672/sinatra-starter-2ad9cba672.zip')
pkg_uuid = TestUpload.api_wrapper.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestUpload.PKG_LIST.append(pkg_uuid)
def test_upload_new_pkg13(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/swamp-gradle-example-1.0/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/swamp-gradle-example-1.0/swamp-gradle-example-1.0.zip')
pkg_uuid = TestUpload.api_wrapper.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestUpload.PKG_LIST.append(pkg_uuid)
def test_upload_new_pkg14(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/tomcat-coyote-7.0.27/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/tomcat-coyote-7.0.27/tomcat-coyote-7.0.27.tar.gz')
pkg_uuid = TestUpload.api_wrapper.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestUpload.PKG_LIST.append(pkg_uuid)
def test_upload_new_pkg15(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/wordpress-4.5.1/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/wordpress-4.5.1/WordPress-4.5.1.zip')
pkg_uuid = TestUpload.api_wrapper.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestUpload.PKG_LIST.append(pkg_uuid)
def test_upload_pkg_ver_fail1(self):
# Incorrect project uuid
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/wordpress-4.5.1/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/wordpress-4.5.1/WordPress-4.5.1.zip')
self.assertRaises(InvalidIdentifierException,
TestUpload.api_wrapper.uploadPackage,
pkg_conf,
pkg_archive,
'd47380ea-a4ef-0a88-0a17-aab43d80fdbe',
dict(),
True)
class TestAssess(TestSwampApiWrapper):
@classmethod
def setUpClass(cls):
super(TestAssess, cls).setUpClass()
cls.API_WRAPPER = SwampApiWrapper()
cls.API_WRAPPER.login(TestSwampApiWrapper.USERNAME,
TestSwampApiWrapper.PASSWORD,
TestSwampApiWrapper.HOST)
cls.PKG_LIST = list()
@classmethod
def tearDownClass(cls):
# try:
# print("Waiting 60 seconds to delete a package")
# for pkg_ver_uuid in cls.PKG_LIST:
# pkg_ver = cls.API_WRAPPER.getPackageVersion(pkg_ver_uuid,
# TestSwampApiWrapper.PROJECT)
# cls.API_WRAPPER.deletePackage(pkg_ver.getPackageThing().getIdentifierString(),
# TestSwampApiWrapper.PROJECT)
# except InvalidIdentifierException as err:
# print(err)
cls.API_WRAPPER.logout()
def test_get_run_assess1(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/swamp-gradle-example-1.0/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/swamp-gradle-example-1.0/swamp-gradle-example-1.0.zip')
pkg_uuid = TestAssess.API_WRAPPER.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestAssess.PKG_LIST.append(pkg_uuid)
tool = TestAssess.API_WRAPPER.getToolFromName('error-prone',
TestSwampApiWrapper.PROJECT)
arun_uuid = TestAssess.API_WRAPPER.runAssessment(pkg_uuid,
[tool.getIdentifierString()],
TestSwampApiWrapper.PROJECT,
None)
self.assertIsNotNone(arun_uuid)
def test_get_run_assess2(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/2048-android-1.8/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/2048-android-1.8/v1.8.zip')
pkg_uuid = TestAssess.API_WRAPPER.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestAssess.PKG_LIST.append(pkg_uuid)
tool = TestAssess.API_WRAPPER.getToolFromName('Android lint',
TestSwampApiWrapper.PROJECT)
arun_uuid = TestAssess.API_WRAPPER.runAssessment(pkg_uuid,
[tool.getIdentifierString()],
TestSwampApiWrapper.PROJECT,
None)
self.assertIsNotNone(arun_uuid)
def test_get_run_assess3(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/beautifulsoup4-4.3.2/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/beautifulsoup4-4.3.2/beautifulsoup4-4.3.2.tar.gz')
pkg_uuid = TestAssess.API_WRAPPER.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestAssess.PKG_LIST.append(pkg_uuid)
tool = TestAssess.API_WRAPPER.getToolFromName('Bandit',
TestSwampApiWrapper.PROJECT)
arun_uuid = TestAssess.API_WRAPPER.runAssessment(pkg_uuid,
[tool.getIdentifierString()],
TestSwampApiWrapper.PROJECT,
None)
self.assertIsNotNone(arun_uuid)
def test_get_run_assess4(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/capistrano-3.4.0/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/capistrano-3.4.0/capistrano-3.4.0.gem')
pkg_uuid = TestAssess.API_WRAPPER.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestAssess.PKG_LIST.append(pkg_uuid)
tool = TestAssess.API_WRAPPER.getToolFromName('Reek',
TestSwampApiWrapper.PROJECT)
arun_uuid = TestAssess.API_WRAPPER.runAssessment(pkg_uuid,
[tool.getIdentifierString()],
TestSwampApiWrapper.PROJECT,
None)
self.assertIsNotNone(arun_uuid)
def test_get_run_assess5(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/java-cli-1.3.0/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/java-cli-1.3.0/java-cli-1.1.zip')
pkg_uuid = TestAssess.API_WRAPPER.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestAssess.PKG_LIST.append(pkg_uuid)
tool = TestAssess.API_WRAPPER.getToolFromName('Findbugs',
TestSwampApiWrapper.PROJECT)
arun_uuid = TestAssess.API_WRAPPER.runAssessment(pkg_uuid,
[tool.getIdentifierString()],
TestSwampApiWrapper.PROJECT,
None)
self.assertIsNotNone(arun_uuid)
def test_get_run_assess6(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/lighttpd-1.4.45/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/lighttpd-1.4.45/lighttpd-1.4.45.tar.xz')
pkg_uuid = TestAssess.API_WRAPPER.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestAssess.PKG_LIST.append(pkg_uuid)
tool = TestAssess.API_WRAPPER.getToolFromName('Clang Static Analyzer',
TestSwampApiWrapper.PROJECT)
arun_uuid = TestAssess.API_WRAPPER.runAssessment(pkg_uuid,
[tool.getIdentifierString()],
TestSwampApiWrapper.PROJECT,
None)
self.assertIsNotNone(arun_uuid)
def test_get_run_assess7(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/moodle-3.1.1/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/moodle-3.1.1/moodle-3.1.1.zip')
pkg_uuid = TestAssess.API_WRAPPER.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestAssess.PKG_LIST.append(pkg_uuid)
tool = TestAssess.API_WRAPPER.getToolFromName('ESLint',
TestSwampApiWrapper.PROJECT)
arun_uuid = TestAssess.API_WRAPPER.runAssessment(pkg_uuid,
[tool.getIdentifierString()],
TestSwampApiWrapper.PROJECT,
None)
self.assertIsNotNone(arun_uuid)
def test_get_run_assess8(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/pylxc-0.0.3/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/pylxc-0.0.3/pylxc-0.0.3.tar.gz')
pkg_uuid = TestAssess.API_WRAPPER.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestAssess.PKG_LIST.append(pkg_uuid)
tool = TestAssess.API_WRAPPER.getToolFromName('Pylint',
TestSwampApiWrapper.PROJECT)
arun_uuid = TestAssess.API_WRAPPER.runAssessment(pkg_uuid,
[tool.getIdentifierString()],
TestSwampApiWrapper.PROJECT,
None)
self.assertIsNotNone(arun_uuid)
def test_get_run_assess9(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/railsgoat-9052b4fcf0/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/railsgoat-9052b4fcf0/railsgoat-9052b4fcf0.zip')
pkg_deps = osp.join(osp.dirname(__file__),
'resources/test_packages/railsgoat-9052b4fcf0/pkg-os-dependencies.conf')
pkg_deps_dict = {k.partition('dependencies-')[2]: v
for k,v in read_conf_into_dict(pkg_deps).items()
if k.startswith('dependencies-')}
pkg_uuid = TestAssess.API_WRAPPER.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
pkg_deps_dict,
True)
self.assertIsNotNone(pkg_uuid)
TestAssess.PKG_LIST.append(pkg_uuid)
tool = TestAssess.API_WRAPPER.getToolFromName('Brakeman',
TestSwampApiWrapper.PROJECT)
arun_uuid = TestAssess.API_WRAPPER.runAssessment(pkg_uuid,
[tool.getIdentifierString()],
TestSwampApiWrapper.PROJECT,
None)
self.assertIsNotNone(arun_uuid)
def test_get_run_assess10(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/sandrorat-apk-unknown/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/sandrorat-apk-unknown/SandroRat.apk')
pkg_uuid = TestAssess.API_WRAPPER.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestAssess.PKG_LIST.append(pkg_uuid)
tool = TestAssess.API_WRAPPER.getToolFromName('RevealDroid',
TestSwampApiWrapper.PROJECT)
arun_uuid = TestAssess.API_WRAPPER.runAssessment(pkg_uuid,
[tool.getIdentifierString()],
TestSwampApiWrapper.PROJECT,
None)
self.assertIsNotNone(arun_uuid)
def test_get_run_assess11(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/scarf-io-1.0/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/scarf-io-1.0/scarf-io.zip')
pkg_uuid = TestAssess.API_WRAPPER.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestAssess.PKG_LIST.append(pkg_uuid)
tool = TestAssess.API_WRAPPER.getToolFromName('OWASP Dependency Check',
TestSwampApiWrapper.PROJECT)
arun_uuid = TestAssess.API_WRAPPER.runAssessment(pkg_uuid,
[tool.getIdentifierString()],
TestSwampApiWrapper.PROJECT,
None)
self.assertIsNotNone(arun_uuid)
def test_get_run_assess12(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/sinatra-starter-2ad9cba672/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/sinatra-starter-2ad9cba672/sinatra-starter-2ad9cba672.zip')
pkg_uuid = TestAssess.API_WRAPPER.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestAssess.PKG_LIST.append(pkg_uuid)
tool = TestAssess.API_WRAPPER.getToolFromName('Dawn',
TestSwampApiWrapper.PROJECT)
arun_uuid = TestAssess.API_WRAPPER.runAssessment(pkg_uuid,
[tool.getIdentifierString()],
TestSwampApiWrapper.PROJECT,
None)
self.assertIsNotNone(arun_uuid)
def test_get_run_assess13(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/swamp-gradle-example-1.0/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/swamp-gradle-example-1.0/swamp-gradle-example-1.0.zip')
pkg_uuid = TestAssess.API_WRAPPER.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestAssess.PKG_LIST.append(pkg_uuid)
tool = TestAssess.API_WRAPPER.getToolFromName('checkstyle',
TestSwampApiWrapper.PROJECT)
arun_uuid = TestAssess.API_WRAPPER.runAssessment(pkg_uuid,
[tool.getIdentifierString()],
TestSwampApiWrapper.PROJECT,
None)
self.assertIsNotNone(arun_uuid)
def test_get_run_assess14(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/tomcat-coyote-7.0.27/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/tomcat-coyote-7.0.27/tomcat-coyote-7.0.27.tar.gz')
pkg_uuid = TestAssess.API_WRAPPER.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestAssess.PKG_LIST.append(pkg_uuid)
tool = TestAssess.API_WRAPPER.getToolFromName('Findbugs',
TestSwampApiWrapper.PROJECT)
arun_uuid = TestAssess.API_WRAPPER.runAssessment(pkg_uuid,
[tool.getIdentifierString()],
TestSwampApiWrapper.PROJECT,
None)
self.assertIsNotNone(arun_uuid)
def test_get_run_assess15(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/wordpress-4.5.1/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/wordpress-4.5.1/WordPress-4.5.1.zip')
pkg_uuid = TestAssess.API_WRAPPER.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestAssess.PKG_LIST.append(pkg_uuid)
tool = TestAssess.API_WRAPPER.getToolFromName('PHPMD',
TestSwampApiWrapper.PROJECT)
arun_uuid = TestAssess.API_WRAPPER.runAssessment(pkg_uuid,
[tool.getIdentifierString()],
TestSwampApiWrapper.PROJECT,
None)
self.assertIsNotNone(arun_uuid)
def test_get_run_assess16(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/c_hashmap-2013-01-08/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/c_hashmap-2013-01-08/c_hashmap-master.zip')
pkg_uuid = TestAssess.API_WRAPPER.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
TestAssess.PKG_LIST.append(pkg_uuid)
tool = TestAssess.API_WRAPPER.getToolFromName('Clang Static Analyzer',
TestSwampApiWrapper.PROJECT)
arun_uuid = TestAssess.API_WRAPPER.runAssessment(pkg_uuid,
[tool.getIdentifierString()],
TestSwampApiWrapper.PROJECT,
None)
self.assertIsNotNone(arun_uuid)
class TestReporting(TestSwampApiWrapper):
@classmethod
def setUpClass(cls):
super(TestReporting, cls).setUpClass()
cls.API_WRAPPER = SwampApiWrapper()
cls.API_WRAPPER.login(TestSwampApiWrapper.USERNAME,
TestSwampApiWrapper.PASSWORD,
TestSwampApiWrapper.HOST)
@classmethod
def tearDownClass(cls):
cls.API_WRAPPER.logout()
def test_get_results1(self):
pkg_conf = osp.join(osp.dirname(__file__),
'resources/test_packages/swamp-gradle-example-1.0/package.conf')
pkg_archive = osp.join(osp.dirname(__file__),
'resources/test_packages/swamp-gradle-example-1.0/swamp-gradle-example-1.0.zip')
pkg_uuid = TestReporting.API_WRAPPER.uploadPackage(pkg_conf,
pkg_archive,
TestSwampApiWrapper.PROJECT,
dict(),
True)
self.assertIsNotNone(pkg_uuid)
tool = TestReporting.API_WRAPPER.getToolFromName('Findbugs',
TestSwampApiWrapper.PROJECT)
pkg_ver = TestReporting.API_WRAPPER.getPackageVersion(pkg_uuid,
TestSwampApiWrapper.PROJECT)
platform = TestReporting.API_WRAPPER.getDefaultPlatformVersion(pkg_ver.getPackageThing().getType())
assessment_run = TestReporting.API_WRAPPER.runAssessment(pkg_ver,
tool,
TestReporting.API_WRAPPER.getProject(TestSwampApiWrapper.PROJECT),
platform)
self.assertIsNotNone(assessment_run)
arun_results_uuid = None
while True:
assessment_record = TestReporting.API_WRAPPER.getAssessmentRecord(TestSwampApiWrapper.PROJECT,
assessment_run.getUUIDString())
status = AssessmentStatus.translateAssessmentStatus(assessment_record.getStatus())
print(status, assessment_record.getStatus())
time.sleep(10);
if status == AssessmentStatus.FAILED or status == AssessmentStatus.SUCCESS:
arun_results_uuid = assessment_record.getAssessmentResultUUID()
break;
outfile = osp.join(osp.dirname(__file__), 'outfile.xml')
TestReporting.API_WRAPPER.getAssessmentResults(TestSwampApiWrapper.PROJECT,
arun_results_uuid,
outfile)
if __name__ == '__main__':
BasicConfigurator.configure(NullAppender())
unittest.main(verbosity=2)
| apache-2.0 |
josephnoir/RIOT | tests/periph_flashpage/tests/01-run.py | 16 | 1369 | #!/usr/bin/env python3
# Copyright (C) 2018 Federico Pellegrin <fede@evolware.org>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
from testrunner import run
def testfunc(child):
# Make sure we are at a clean prompt before starting
child.sendline("")
child.expect('>')
# writes and verifies the last page of the flash
child.sendline("test_last")
child.expect_exact('wrote local page buffer to last flash page')
child.expect('>')
# check if board has raw write capability and if so test that as well
# capability is deduced from help contents
child.sendline("help")
index = child.expect(['test_last_raw', '>'])
if index == 0:
child.sendline("test_last_raw")
child.expect_exact('wrote raw short buffer to last flash page')
child.expect('>')
# check if board has RWWEE capability and if so test that as well
# capability is deduced from help contents
child.sendline("help")
index = child.expect(['test_last_rwwee', '>'])
if index == 0:
child.sendline("test_last_rwwee")
child.expect_exact('wrote local page buffer to last RWWEE flash page')
child.expect('>')
if __name__ == "__main__":
sys.exit(run(testfunc))
| lgpl-2.1 |
annahs/atmos_research | LEO_calc_coating_from_meas_scat_amp_and_write_to_db.py | 1 | 3857 | import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
from pprint import pprint
import sqlite3
import calendar
from datetime import datetime
#id INTEGER PRIMARY KEY AUTOINCREMENT,
#sp2b_file TEXT,
#file_index INT,
#instr TEXT,
#instr_locn TEXT,
#particle_type TEXT,
#particle_dia FLOAT,
#unix_ts_utc FLOAT,
#actual_scat_amp FLOAT,
#actual_peak_pos INT,
#FF_scat_amp FLOAT,
#FF_peak_pos INT,
#FF_gauss_width FLOAT,
#zeroX_to_peak FLOAT,
#LF_scat_amp FLOAT,
#incand_amp FLOAT,
#lag_time_fit_to_incand FLOAT,
#LF_baseline_pct_diff FLOAT,
#rBC_mass_fg FLOAT,
#coat_thickness_nm FLOAT,
#coat_thickness_from_actual_scat_amp FLOAT
#UNIQUE (sp2b_file, file_index, instr)
#connect to database
conn = sqlite3.connect('C:/projects/dbs/SP2_data.db')
c = conn.cursor()
c2 = conn.cursor()
instrument = 'UBCSP2'
instrument_locn = 'WHI'
type_particle = 'incand'
start_date = '20110105'
end_date = '20120601'
lookup_file = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/lookup_tables/coating_lookup_table_WHI_2012_UBCSP2-nc(2p26,1p26).lupckl'
rBC_density = 1.8
incand_sat = 3750
lookup = open(lookup_file, 'r')
lookup_table = pickle.load(lookup)
lookup.close()
c.execute('''SELECT * FROM SP2_coating_analysis''')
names = [description[0] for description in c.description]
pprint(names)
begin_data = calendar.timegm(datetime.strptime(start_date,'%Y%m%d').timetuple())
end_data = calendar.timegm(datetime.strptime(end_date,'%Y%m%d').timetuple())
def get_rBC_mass(incand_pk_ht, year):
if year == 2012:
rBC_mass = 0.003043*incand_pk_ht + 0.24826 #AD corrected linear calibration for UBCSP2 at WHI 2012
if year == 2010:
rBC_mass = 0.01081*incand_pk_ht - 0.32619 #AD corrected linear calibration for ECSP2 at WHI 2010
return rBC_mass
def get_coating_thickness(BC_VED,scat_amp,coating_lookup_table):
#get the coating thicknesses from the lookup table which is a dictionary of dictionaries, the 1st keyed with BC core size and the second being coating thicknesses keyed with calc scat amps
core_diameters = sorted(coating_lookup_table.keys())
prev_diameter = core_diameters[0]
for core_diameter in core_diameters:
if core_diameter > BC_VED:
core_dia_to_use = prev_diameter
break
prev_diameter = core_diameter
#now get the coating thickness for the scat_amp this is the coating thickness based on the raw scattering max
scattering_amps = sorted(coating_lookup_table[core_dia_to_use].keys())
prev_amp = scattering_amps[0]
for scattering_amp in scattering_amps:
if scat_amp < scattering_amp:
scat_amp_to_use = prev_amp
break
prev_amp = scattering_amp
scat_coating_thickness = coating_lookup_table[core_dia_to_use].get(scat_amp_to_use, np.nan) # returns value for the key, or none
return scat_coating_thickness
LOG_EVERY_N = 10000
i = 0
for row in c.execute('''SELECT incand_amp, LF_scat_amp, unix_ts_utc, sp2b_file, file_index, instr FROM SP2_coating_analysis
WHERE instr=? and instr_locn=? and particle_type=? and incand_amp<? and unix_ts_utc>=? and unix_ts_utc<?''',
(instrument,instrument_locn,type_particle,incand_sat,begin_data,end_data)):
incand_amp = row[0]
LF_amp = row[1]
event_time = datetime.utcfromtimestamp(row[2])
file = row[3]
index = row[4]
instrt = row[5]
rBC_mass = get_rBC_mass(incand_amp, event_time.year)
if rBC_mass >= 0.25:
rBC_VED = (((rBC_mass/(10**15*rBC_density))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
coat_th = get_coating_thickness(rBC_VED,LF_amp,lookup_table)
else:
rBC_VED = None
coat_th = None
c2.execute('''UPDATE SP2_coating_analysis SET coat_thickness_from_actual_scat_amp=? WHERE sp2b_file=? and file_index=? and instr=?''', (coat_th, file,index,instrt))
i+=1
if (i % LOG_EVERY_N) == 0:
print 'record: ', i
conn.commit()
conn.close()
| mit |
MarkusTeufelberger/openobject-server | openerp/addons/base/__init__.py | 5 | 1133 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ir
import workflow
import module
import res
import report
import test
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
HyperBaton/ansible | test/units/modules/network/ios/test_ios_facts.py | 9 | 5070 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.ios import ios_facts
from ansible.module_utils.six import assertCountEqual
from units.modules.utils import set_module_args
from .ios_module import TestIosModule, load_fixture
class TestIosFactsModule(TestIosModule):
module = ios_facts
def setUp(self):
super(TestIosFactsModule, self).setUp()
self.mock_run_commands = patch('ansible.module_utils.network.ios.facts.legacy.base.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_get_resource_connection = patch('ansible.module_utils.network.common.facts.facts.get_resource_connection')
self.get_resource_connection = self.mock_get_resource_connection.start()
self.mock_get_capabilities = patch('ansible.module_utils.network.ios.facts.legacy.base.get_capabilities')
self.get_capabilities = self.mock_get_capabilities.start()
self.get_capabilities.return_value = {
'device_info': {
'network_os': 'ios',
'network_os_hostname': 'an-ios-01',
'network_os_image': 'flash0:/vios-adventerprisek9-m',
'network_os_model': 'WS-C3750-24TS',
'network_os_version': '15.6(3)M2'
},
'network_api': 'cliconf'
}
def tearDown(self):
super(TestIosFactsModule, self).tearDown()
self.mock_run_commands.stop()
self.mock_get_capabilities.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
commands = kwargs['commands']
output = list()
for command in commands:
filename = str(command).split(' | ')[0].replace(' ', '_')
output.append(load_fixture('ios_facts_%s' % filename))
return output
self.run_commands.side_effect = load_from_file
def test_ios_facts_stacked(self):
set_module_args(dict(gather_subset='default'))
result = self.execute_module()
self.assertEqual(
result['ansible_facts']['ansible_net_model'], 'WS-C3750-24TS'
)
self.assertEqual(
result['ansible_facts']['ansible_net_serialnum'], 'CAT0726R0ZU'
)
self.assertEqual(
result['ansible_facts']['ansible_net_stacked_models'], ['WS-C3750-24TS-E', 'WS-C3750-24TS-E', 'WS-C3750G-12S-E']
)
self.assertEqual(
result['ansible_facts']['ansible_net_stacked_serialnums'], ['CAT0726R0ZU', 'CAT0726R10A', 'CAT0732R0M4']
)
def test_ios_facts_tunnel_address(self):
set_module_args(dict(gather_subset='interfaces'))
result = self.execute_module()
self.assertEqual(
result['ansible_facts']['ansible_net_interfaces']['GigabitEthernet0/0']['macaddress'], '5e00.0003.0000'
)
self.assertEqual(
result['ansible_facts']['ansible_net_interfaces']['GigabitEthernet1']['macaddress'], '5e00.0006.0000'
)
self.assertIsNone(
result['ansible_facts']['ansible_net_interfaces']['Tunnel1110']['macaddress']
)
def test_ios_facts_filesystems_info(self):
set_module_args(dict(gather_subset='hardware'))
result = self.execute_module()
self.assertEqual(
result['ansible_facts']['ansible_net_filesystems_info']['bootflash:']['spacetotal_kb'], 7712692.0
)
self.assertEqual(
result['ansible_facts']['ansible_net_filesystems_info']['bootflash:']['spacefree_kb'], 6453180.0
)
def test_ios_facts_neighbors(self):
set_module_args(dict(gather_subset='interfaces'))
result = self.execute_module()
assertCountEqual(
self,
result['ansible_facts']['ansible_net_neighbors'].keys(), ['GigabitEthernet1', 'GigabitEthernet3']
)
assertCountEqual(
self,
result['ansible_facts']['ansible_net_neighbors']['GigabitEthernet1'],
[{'host': 'R2', 'port': 'GigabitEthernet2'}, {'host': 'R3', 'port': 'GigabitEthernet3'}]
)
assertCountEqual(
self,
result['ansible_facts']['ansible_net_neighbors']['GigabitEthernet3'], [{'host': 'Rtest', 'port': 'Gi1'}]
)
| gpl-3.0 |
detiber/ansible | lib/ansible/modules/cloud/amazon/ec2_elb.py | 59 | 14113 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = """
---
module: ec2_elb
short_description: De-registers or registers instances from EC2 ELBs
description:
- This module de-registers or registers an AWS EC2 instance from the ELBs
that it belongs to.
- Returns fact "ec2_elbs" which is a list of elbs attached to the instance
if state=absent is passed as an argument.
- Will be marked changed when called only if there are ELBs found to operate on.
version_added: "1.2"
author: "John Jarvis (@jarv)"
options:
state:
description:
- register or deregister the instance
required: true
choices: ['present', 'absent']
instance_id:
description:
- EC2 Instance ID
required: true
ec2_elbs:
description:
- List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register.
required: false
default: None
enable_availability_zone:
description:
- Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already
been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB.
required: false
default: yes
choices: [ "yes", "no" ]
wait:
description:
- Wait for instance registration or deregistration to complete successfully before returning.
required: false
default: yes
choices: [ "yes", "no" ]
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
wait_timeout:
description:
- Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs.
If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no.
required: false
default: 0
version_added: "1.6"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = """
# basic pre_task and post_task example
pre_tasks:
- name: Gathering ec2 facts
action: ec2_facts
- name: Instance De-register
local_action:
module: ec2_elb
instance_id: "{{ ansible_ec2_instance_id }}"
state: absent
roles:
- myrole
post_tasks:
- name: Instance Register
local_action:
module: ec2_elb
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
state: present
with_items: "{{ ec2_elbs }}"
"""
import time
try:
import boto
import boto.ec2
import boto.ec2.autoscale
import boto.ec2.elb
from boto.regioninfo import RegionInfo
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class ElbManager:
"""Handles EC2 instance ELB registration and de-registration"""
def __init__(self, module, instance_id=None, ec2_elbs=None,
region=None, **aws_connect_params):
self.module = module
self.instance_id = instance_id
self.region = region
self.aws_connect_params = aws_connect_params
self.lbs = self._get_instance_lbs(ec2_elbs)
self.changed = False
def deregister(self, wait, timeout):
"""De-register the instance from all ELBs and wait for the ELB
to report it out-of-service"""
for lb in self.lbs:
initial_state = self._get_instance_health(lb)
if initial_state is None:
# Instance isn't registered with this load
# balancer. Ignore it and try the next one.
continue
lb.deregister_instances([self.instance_id])
# The ELB is changing state in some way. Either an instance that's
# InService is moving to OutOfService, or an instance that's
# already OutOfService is being deregistered.
self.changed = True
if wait:
self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout)
def register(self, wait, enable_availability_zone, timeout):
"""Register the instance for all ELBs and wait for the ELB
to report the instance in-service"""
for lb in self.lbs:
initial_state = self._get_instance_health(lb)
if enable_availability_zone:
self._enable_availailability_zone(lb)
lb.register_instances([self.instance_id])
if wait:
self._await_elb_instance_state(lb, 'InService', initial_state, timeout)
else:
# We cannot assume no change was made if we don't wait
# to find out
self.changed = True
def exists(self, lbtest):
""" Verify that the named ELB actually exists """
found = False
for lb in self.lbs:
if lb.name == lbtest:
found=True
break
return found
def _enable_availailability_zone(self, lb):
"""Enable the current instance's availability zone in the provided lb.
Returns True if the zone was enabled or False if no change was made.
lb: load balancer"""
instance = self._get_instance()
if instance.placement in lb.availability_zones:
return False
lb.enable_zones(zones=instance.placement)
# If successful, the new zone will have been added to
# lb.availability_zones
return instance.placement in lb.availability_zones
def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout):
"""Wait for an ELB to change state
lb: load balancer
awaited_state : state to poll for (string)"""
wait_timeout = time.time() + timeout
while True:
instance_state = self._get_instance_health(lb)
if not instance_state:
msg = ("The instance %s could not be put in service on %s."
" Reason: Invalid Instance")
self.module.fail_json(msg=msg % (self.instance_id, lb))
if instance_state.state == awaited_state:
# Check the current state against the initial state, and only set
# changed if they are different.
if (initial_state is None) or (instance_state.state != initial_state.state):
self.changed = True
break
elif self._is_instance_state_pending(instance_state):
# If it's pending, we'll skip further checks and continue waiting
pass
elif (awaited_state == 'InService'
and instance_state.reason_code == "Instance"
and time.time() >= wait_timeout):
# If the reason_code for the instance being out of service is
# "Instance" this indicates a failure state, e.g. the instance
# has failed a health check or the ELB does not have the
# instance's availability zone enabled. The exact reason why is
# described in InstantState.description.
msg = ("The instance %s could not be put in service on %s."
" Reason: %s")
self.module.fail_json(msg=msg % (self.instance_id,
lb,
instance_state.description))
time.sleep(1)
def _is_instance_state_pending(self, instance_state):
"""
Determines whether the instance_state is "pending", meaning there is
an operation under way to bring it in service.
"""
# This is messy, because AWS provides no way to distinguish between
# an instance that is is OutOfService because it's pending vs. OutOfService
# because it's failing health checks. So we're forced to analyze the
# description, which is likely to be brittle.
return (instance_state and 'pending' in instance_state.description)
def _get_instance_health(self, lb):
"""
Check instance health, should return status object or None under
certain error conditions.
"""
try:
status = lb.get_instance_health([self.instance_id])[0]
except boto.exception.BotoServerError as e:
if e.error_code == 'InvalidInstance':
return None
else:
raise
return status
def _get_instance_lbs(self, ec2_elbs=None):
"""Returns a list of ELBs attached to self.instance_id
ec2_elbs: an optional list of elb names that will be used
for elb lookup instead of returning what elbs
are attached to self.instance_id"""
if not ec2_elbs:
ec2_elbs = self._get_auto_scaling_group_lbs()
try:
elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
self.module.fail_json(msg=str(e))
elbs = []
marker = None
while True:
try:
newelbs = elb.get_all_load_balancers(marker=marker)
marker = newelbs.next_marker
elbs.extend(newelbs)
if not marker:
break
except TypeError:
# Older version of boto do not allow for params
elbs = elb.get_all_load_balancers()
break
if ec2_elbs:
lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs)
else:
lbs = []
for lb in elbs:
for info in lb.instances:
if self.instance_id == info.id:
lbs.append(lb)
return lbs
def _get_auto_scaling_group_lbs(self):
"""Returns a list of ELBs associated with self.instance_id
indirectly through its auto scaling group membership"""
try:
asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
self.module.fail_json(msg=str(e))
asg_instances = asg.get_all_autoscaling_instances([self.instance_id])
if len(asg_instances) > 1:
self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.")
if not asg_instances:
asg_elbs = []
else:
asg_name = asg_instances[0].group_name
asgs = asg.get_all_groups([asg_name])
if len(asg_instances) != 1:
self.module.fail_json(msg="Illegal state, expected one auto scaling group.")
asg_elbs = asgs[0].load_balancers
return asg_elbs
def _get_instance(self):
"""Returns a boto.ec2.InstanceObject for self.instance_id"""
try:
ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
self.module.fail_json(msg=str(e))
return ec2.get_only_instances(instance_ids=[self.instance_id])[0]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True},
instance_id={'required': True},
ec2_elbs={'default': None, 'required': False, 'type':'list'},
enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
wait={'required': False, 'default': True, 'type': 'bool'},
wait_timeout={'required': False, 'default': 0, 'type': 'int'}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
ec2_elbs = module.params['ec2_elbs']
wait = module.params['wait']
enable_availability_zone = module.params['enable_availability_zone']
timeout = module.params['wait_timeout']
if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
module.fail_json(msg="ELBs are required for registration")
instance_id = module.params['instance_id']
elb_man = ElbManager(module, instance_id, ec2_elbs, region=region, **aws_connect_params)
if ec2_elbs is not None:
for elb in ec2_elbs:
if not elb_man.exists(elb):
msg="ELB %s does not exist" % elb
module.fail_json(msg=msg)
if module.params['state'] == 'present':
elb_man.register(wait, enable_availability_zone, timeout)
elif module.params['state'] == 'absent':
elb_man.deregister(wait, timeout)
ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]}
ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
pfnet/chainer | tests/chainerx_tests/unit_tests/routines_tests/test_pooling.py | 4 | 7499 | import functools
from operator import mul
import unittest
import chainer
import numpy
import pytest
import chainerx
from chainerx_tests import op_utils
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('x_shape,ksize,stride,pad', [
((2, 3, 4), (1,), 1, 0),
((1, 3, 4), (2, ), 3, 2),
((1, 3, 4), (2,), 3, 2),
((2, 3, 4, 4), (3, 3), 1, 0),
((2, 3, 4, 4), (3, 3), None, 0),
((1, 3, 4, 4), (3, 3), (1, 2), 1),
((1, 3, 4, 4), (3, 3), 2, (2, 0)),
((1, 3, 2, 6, 3), (1, 3, 2), 2, (2, 0, 1)),
((1, 3, 2, 6, 3), (1, 3, 2), (1, 2, 3), (2, 0, 1)),
((2, 3, 2, 6, 3), (1, 3, 2), (1, 2, 3), (2, 0, 1)),
((1, 3, 2, 6, 3, 2), (1, 3, 2, 2), 2, 2),
])
@chainer.testing.parameterize_pytest('cover_all', [True, False])
class TestMaxPool(op_utils.ChainerOpTest):
dodge_nondifferentiable = True
def setup(self, float_dtype):
dtype = float_dtype
ksize = self.ksize
device = chainerx.get_default_device()
if (device.backend.name == 'cuda'
and len(ksize) != 2
and len(ksize) != 3):
raise unittest.SkipTest(
'cuDNN supports only 2 and 3 spatial dimensions')
if dtype == 'float16':
self.check_backward_options.update({'rtol': 5e-2, 'atol': 1e-3})
self.check_double_backward_options.update({
'rtol': 5e-2, 'atol': 1e-3})
self.dtype = dtype
def generate_inputs(self):
x_shape = self.x_shape
dtype = self.dtype
if self.test_name in ('test_backward', 'test_double_backward'):
x = numpy.arange(functools.reduce(mul, x_shape), dtype=dtype)
x = x.reshape(x_shape)
x = 2 * x / x.size - 1
else:
x = numpy.random.randn(*x_shape).astype(dtype, copy=False)
return x,
def forward_chainerx(self, inputs):
x, = inputs
y = chainerx.max_pool(
x, ksize=self.ksize, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
# This function can return -inf (or huge negative numbers in case of
# CUDA) around boundaries.
# Convert them to finite numbers in order to properly calculate numeric
# gradients.
y = chainerx.maximum(y, -1e4)
return y,
def forward_chainer(self, inputs):
x, = inputs
y = chainer.functions.max_pooling_nd(
x, ksize=self.ksize, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
# Convert -inf to finite numbers.
y = chainer.functions.maximum(y, numpy.full_like(y.array, -1e4))
return y,
@pytest.mark.parametrize('x_shape,ksize,stride,pad', [
((1, 3), (), 1, 0), # Requires at least one spatial dimension
((2, 3, 4, 3), (2, 2, 1), 3, 2), # Wrong number of ksize.
((2, 3, 4, 3), (2, 2), (1,), 0), # Wrong number of strides.
((1, 3, 4, 3), (2, 2), 3, (2,)), # Wrong number of paddings.
((4, 4, 2, 2), 5, 3, 0), # Output size should be positive.
])
@pytest.mark.parametrize('cover_all', [True, False])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_max_pool_invalid(
device, x_shape, ksize, stride, pad, cover_all, float_dtype):
x = numpy.random.uniform(-1, 1, x_shape).astype(float_dtype)
x = chainerx.array(x)
with pytest.raises(chainerx.DimensionError):
chainerx.max_pool(
x, ksize=ksize, stride=stride, pad=pad, cover_all=cover_all)
def _get_pad_mode_kwargs(pad_mode, is_chainerx):
# ChainerX
if is_chainerx:
if pad_mode is None:
return {}
return {'pad_mode': pad_mode}
# Chainer
# chainerx `pad_mode` defaults to 'ignore', whereas chainer's default is
# pad_value=0.
if pad_mode == 'zero':
return {'pad_value': 0}
if pad_mode in ('ignore', None):
return {'pad_value': None}
assert False, pad_mode
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('x_shape,ksize,stride,pad', [
((2, 3, 4), (1,), 1, 0),
((1, 3, 4), (2, ), 3, 2),
((1, 3, 4), (2,), 3, 2),
((2, 3, 4, 4), (3, 3), 1, 0),
((2, 3, 4, 4), (3, 3), None, 0),
((1, 3, 4, 4), (3, 3), (1, 2), 1),
((1, 3, 4, 4), (3, 3), 2, (2, 0)),
((1, 3, 2, 6, 3), (1, 3, 2), 2, (2, 0, 1)),
((1, 3, 2, 6, 3), (1, 3, 2), (1, 2, 3), (2, 0, 1)),
((2, 3, 2, 6, 3), (1, 3, 2), (1, 2, 3), (2, 0, 1)),
((1, 3, 2, 6, 3, 2), (1, 3, 1, 1), 1, 1),
])
@chainer.testing.parameterize_pytest('pad_mode', ['zero', 'ignore', None])
# ignore warning occurring when pad_value is None in chainer
@pytest.mark.filterwarnings('ignore:invalid value encountered in true_divide')
class TestAveragePool(op_utils.ChainerOpTest):
def setup(self, float_dtype):
dtype = float_dtype
ksize = self.ksize
device = chainerx.get_default_device()
if (device.backend.name == 'cuda'
and len(ksize) != 2
and len(ksize) != 3):
raise unittest.SkipTest(
'cuDNN supports only 2 and 3 spatial dimensions.')
# TODO(niboshi): average_pool can return nan if pad_mode is 'ignore',
# and numeric gradients cannot be calculated.
# If chainerx.where is implemented, we can replace nans and remove
# this skip.
if self.pad_mode in ('ignore', None):
self.skip_backward_test = True
self.skip_double_backward_test = True
self.check_double_backward_options.update({'rtol': 5e-3, 'atol': 5e-3})
if dtype == 'float16':
self.check_forward_options.update({'rtol': 5e-3, 'atol': 5e-4})
self.check_backward_options.update({'rtol': 5e-2, 'atol': 5e-3})
else:
self.check_backward_options.update({'rtol': 5e-3, 'atol': 5e-3, })
self.dtype = dtype
def generate_inputs(self):
x_shape = self.x_shape
dtype = self.dtype
x = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
return x,
def forward_chainerx(self, inputs):
x, = inputs
pad_mode_kwargs = _get_pad_mode_kwargs(self.pad_mode, True)
y = chainerx.average_pool(
x, ksize=self.ksize, stride=self.stride, pad=self.pad,
**pad_mode_kwargs)
return y,
def forward_chainer(self, inputs):
x, = inputs
pad_value_kwargs = _get_pad_mode_kwargs(self.pad_mode, False)
y = chainer.functions.average_pooling_nd(
x, ksize=self.ksize, stride=self.stride, pad=self.pad,
**pad_value_kwargs)
return y,
@pytest.mark.parametrize('x_shape,ksize,stride,pad', [
((1, 3), (), 1, 0), # Requires at least one spatial dimension
((2, 3, 4, 3), (2, 2, 1), 3, 2), # Wrong number of ksize.
((2, 3, 4, 3), (2, 2), (1,), 0), # Wrong number of strides.
((1, 3, 4, 3), (2, 2), 3, (2,)), # Wrong number of paddings.
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('pad_mode', ['zero', 'ignore', None])
def test_average_pool_invalid(
device, x_shape, ksize, stride, pad, pad_mode, float_dtype):
x = numpy.random.uniform(-1, 1, x_shape).astype(float_dtype)
x = chainerx.array(x)
pad_mode_kwargs = _get_pad_mode_kwargs(pad_mode, True)
with pytest.raises(chainerx.DimensionError):
chainerx.average_pool(
x, ksize=ksize, stride=stride, pad=pad, **pad_mode_kwargs)
| mit |
EsMaSol/xbmc | addons/service.xbmc.versioncheck/lib/common.py | 82 | 7008 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Team-XBMC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import xbmc
import xbmcaddon
import xbmcgui
import xbmcvfs
__addon__ = xbmcaddon.Addon()
__addonversion__ = __addon__.getAddonInfo('version')
__addonname__ = __addon__.getAddonInfo('name')
__addonpath__ = __addon__.getAddonInfo('path').decode('utf-8')
__addonprofile__ = xbmc.translatePath( __addon__.getAddonInfo('profile') ).decode('utf-8')
__icon__ = __addon__.getAddonInfo('icon')
# Fixes unicode problems
def string_unicode(text, encoding='utf-8'):
try:
text = unicode( text, encoding )
except:
pass
return text
def normalize_string(text):
try:
text = unicodedata.normalize('NFKD', string_unicode(text)).encode('ascii', 'ignore')
except:
pass
return text
def localise(id):
string = normalize_string(__addon__.getLocalizedString(id))
return string
def log(txt):
if isinstance (txt,str):
txt = txt.decode("utf-8")
message = u'%s: %s' % ("Version Check", txt)
xbmc.log(msg=message.encode("utf-8"), level=xbmc.LOGDEBUG)
def get_password_from_user():
keyboard = xbmc.Keyboard("", __addonname__ + "," +localise(32022), True)
keyboard.doModal()
if (keyboard.isConfirmed()):
pwd = keyboard.getText()
return pwd
def message_upgrade_success():
xbmc.executebuiltin("XBMC.Notification(%s, %s, %d, %s)" %(__addonname__,
localise(32013),
15000,
__icon__))
def message_restart():
if dialog_yesno(32014):
xbmc.executebuiltin("RestartApp")
def dialog_yesno(line1 = 0, line2 = 0):
return xbmcgui.Dialog().yesno(__addonname__,
localise(line1),
localise(line2))
def upgrade_message(msg, oldversion, upgrade, msg_current, msg_available):
# Don't show while watching a video
while(xbmc.Player().isPlayingVideo() and not xbmc.abortRequested):
xbmc.sleep(1000)
i = 0
while(i < 5 and not xbmc.abortRequested):
xbmc.sleep(1000)
i += 1
if __addon__.getSetting("lastnotified_version") < __addonversion__:
xbmcgui.Dialog().ok(__addonname__,
localise(msg),
localise(32001),
localise(32002))
#__addon__.setSetting("lastnotified_version", __addonversion__)
else:
log("Already notified one time for upgrading.")
def upgrade_message2( version_installed, version_available, version_stable, oldversion, upgrade,):
# shorten releasecandidate to rc
if version_installed['tag'] == 'releasecandidate':
version_installed['tag'] = 'rc'
if version_available['tag'] == 'releasecandidate':
version_available['tag'] = 'rc'
# convert json-rpc result to strings for usage
msg_current = '%i.%i %s%s' %(version_installed['major'],
version_installed['minor'],
version_installed['tag'],
version_installed.get('tagversion',''))
msg_available = version_available['major'] + '.' + version_available['minor'] + ' ' + version_available['tag'] + version_available.get('tagversion','')
msg_stable = version_stable['major'] + '.' + version_stable['minor'] + ' ' + version_stable['tag'] + version_stable.get('tagversion','')
msg = localise(32034) %(msg_current, msg_available)
# Don't show notify while watching a video
while(xbmc.Player().isPlayingVideo() and not xbmc.abortRequested):
xbmc.sleep(1000)
i = 0
while(i < 10 and not xbmc.abortRequested):
xbmc.sleep(1000)
i += 1
# hack: convert current version number to stable string
# so users don't get notified again. remove in future
if __addon__.getSetting("lastnotified_version") == '0.1.24':
__addon__.setSetting("lastnotified_stable", msg_stable)
# Show different dialogs depending if there's a newer stable available.
# Also split them between xbmc and kodi notifications to reduce possible confusion.
# People will find out once they visit the website.
# For stable only notify once and when there's a newer stable available.
# Ignore any add-on updates as those only count for != stable
if oldversion == 'stable' and __addon__.getSetting("lastnotified_stable") != msg_stable:
if xbmcaddon.Addon('xbmc.addon').getAddonInfo('version') < "13.9.0":
xbmcgui.Dialog().ok(__addonname__,
msg,
localise(32030),
localise(32031))
else:
xbmcgui.Dialog().ok(__addonname__,
msg,
localise(32032),
localise(32033))
__addon__.setSetting("lastnotified_stable", msg_stable)
elif oldversion != 'stable' and __addon__.getSetting("lastnotified_version") != msg_available:
if xbmcaddon.Addon('xbmc.addon').getAddonInfo('version') < "13.9.0":
# point them to xbmc.org
xbmcgui.Dialog().ok(__addonname__,
msg,
localise(32035),
localise(32031))
else:
#use kodi.tv
xbmcgui.Dialog().ok(__addonname__,
msg,
localise(32035),
localise(32033))
# older skins don't support a text field in the OK dialog.
# let's use split lines for now. see code above.
'''
msg = localise(32034) %(msg_current, msg_available)
if oldversion == 'stable':
msg = msg + ' ' + localise(32030)
else:
msg = msg + ' ' + localise(32035)
msg = msg + ' ' + localise(32031)
xbmcgui.Dialog().ok(__addonname__, msg)
#__addon__.setSetting("lastnotified_version", __addonversion__)
'''
__addon__.setSetting("lastnotified_version", msg_available)
else:
log("Already notified one time for upgrading.") | gpl-2.0 |
willcode/gnuradio | gr-digital/python/digital/qa_binary_slicer_fb.py | 5 | 1292 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import random
from gnuradio import gr, gr_unittest, digital, blocks
class test_binary_slicer_fb(gr_unittest.TestCase):
def setUp(self):
random.seed(0)
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_binary_slicer_fb(self):
expected_result = (0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1)
src_data = (-1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1)
src_data = [s + (1 - random.random())
for s in src_data] # add some noise
src = blocks.vector_source_f(src_data)
op = digital.binary_slicer_fb()
dst = blocks.vector_sink_b()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run() # run the graph and wait for it to finish
actual_result = dst.data() # fetch the contents of the sink
# print "actual result", actual_result
# print "expected result", expected_result
self.assertFloatTuplesAlmostEqual(expected_result, actual_result)
if __name__ == '__main__':
gr_unittest.run(test_binary_slicer_fb)
| gpl-3.0 |
leorochael/odoo | addons/stock_dropshipping/wizard/__init__.py | 313 | 1077 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_invoice_onshipping
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
haoyunfeix/crosswalk-test-suite | cordova/cordova-feature-android-tests/feature/comm.py | 47 | 16578 | #!/usr/bin/env python
# coding=utf-8
#
# Copyright (c) 2016 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Cici,Li<cici.x.li@intel.com>
# Lin, Wanming <wanming.lin@intel.com>
import os
import sys
import commands
import shutil
import glob
import fnmatch
import re
import json
from os.path import join, getsize
reload(sys)
sys.setdefaultencoding("utf-8")
script_path = os.path.realpath(__file__)
const_path = os.path.dirname(script_path)
tool_path = const_path + "/../tools/"
plugin_tool = const_path + "/../tools/cordova-plugin-crosswalk-webview/"
testapp_path = "/tmp/cordova-sampleapp/"
def setUp():
global ARCH, MODE, device, CROSSWALK_VERSION, CROSSWALK_BRANCH, PACK_TYPE
device = os.environ.get('DEVICE_ID')
if not device:
print (" get env error\n")
sys.exit(1)
f_arch = open(const_path + "/../arch.txt", 'r')
arch_tmp = f_arch.read()
if arch_tmp.strip("\n\t") == "arm":
ARCH = "arm"
elif arch_tmp.strip("\n\t") == "x86":
ARCH = "x86"
elif arch_tmp.strip("\n\t") == "arm64":
ARCH = "arm64"
elif arch_tmp.strip("\n\t") == "x86_64":
ARCH = "x86_64"
else:
print (
" get arch error, the content of arch.txt should be 'arm' or 'x86' or arm64 or x86_64\n")
sys.exit(1)
f_arch.close()
f_mode = open(const_path + "/../mode.txt", 'r')
mode_tmp = f_mode.read()
if mode_tmp.strip("\n\t") == "shared":
MODE = "shared"
elif mode_tmp.strip("\n\t") == "embedded":
MODE = "embedded"
elif mode_tmp.strip("\n\t") == "lite":
MODE = "lite"
else:
print (
" get mode error, the content of mode.txt should be 'shared' or 'embedded' or 'lite'\n")
sys.exit(1)
f_mode.close()
f_pack_type = open(const_path + "/../pack-type", 'r')
pack_type_tmp = f_pack_type.read()
if pack_type_tmp.strip("\n\t") == "local":
PACK_TYPE = "local"
elif pack_type_tmp.strip("\n\t") == "npm":
PACK_TYPE = "npm"
else:
print (
" get pack type error, the content of pack-type should be 'local' or 'npm'\n")
sys.exit(1)
f_pack_type.close()
with open(const_path + "/../VERSION", "rt") as pkg_version_file:
pkg_version_raw = pkg_version_file.read()
pkg_version_file.close()
pkg_version_json = json.loads(pkg_version_raw)
CROSSWALK_VERSION = pkg_version_json["main-version"]
CROSSWALK_BRANCH = pkg_version_json["crosswalk-branch"]
def checkFileSize(file_path, min_size, max_size, self):
print "Check file size from %s --------------> START" % file_path
size = getsize(file_path)/1024/1024
print "this file is %s MB" % size
self.assertTrue(size > min_size)
self.assertTrue(size < max_size)
print "Check file size from %s --------------> OK" % file_path
def installWebviewPlugin(pkg_mode, self, multiple_apks = None):
print "Install Crosswalk WebView Plugin --------------> START"
pkg_mode_tmp = "core"
if pkg_mode == "shared":
pkg_mode_tmp = "shared"
xwalk_version = "%s" % CROSSWALK_VERSION
if CROSSWALK_BRANCH == "beta":
xwalk_version = "org.xwalk:xwalk_%s_library_beta:%s" % (pkg_mode_tmp, CROSSWALK_VERSION)
plugin_crosswalk_source = plugin_tool
if PACK_TYPE == "npm":
plugin_crosswalk_source = "cordova-plugin-crosswalk-webview"
plugin_install_cmd = "cordova plugin add %s --variable XWALK_MODE=\"%s\"" \
" --variable XWALK_VERSION=\"%s\"" % (plugin_crosswalk_source, pkg_mode, xwalk_version)
if multiple_apks is not None:
plugin_install_cmd = plugin_install_cmd + " --variable XWALKMULTIPLEAPK=\"%s\"" % multiple_apks
print plugin_install_cmd
pluginstatus = commands.getstatusoutput(plugin_install_cmd)
self.assertEquals(0, pluginstatus[0])
def create(appname, pkgname, mode, sourcecodepath, replace_index_list, self, extra_plugin = None, multiple_apks = None):
os.chdir(tool_path)
if os.path.exists(os.path.join(tool_path, appname)):
print "Existing %s project, try to clean up..." % appname
do_remove(glob.glob(os.path.join(tool_path, appname)))
print "Create project %s ----------------> START" % appname
cmd = "cordova create %s %s %s" % (appname, pkgname, appname)
createstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, createstatus[0])
print "\nGenerate project %s ----------------> OK\n" % appname
result = commands.getstatusoutput("ls")
self.assertIn(appname, result[1])
project_root = os.path.join(tool_path, appname)
os.chdir(project_root)
if not replace_key(os.path.join(project_root, 'config.xml'),
'<widget android-activityName="%s"' % appname, '<widget'):
print "replace key '<widget' failed."
return False
if not replace_key(os.path.join(project_root, 'config.xml'),
' <allow-navigation href="*" />\n</widget>', '</widget>'):
print "replace key '</widget>' failed."
return False
print "Add android platforms to this project --------------> START"
cordova_platform_cmd = "cordova platform add android"
platformstatus = commands.getstatusoutput(cordova_platform_cmd)
self.assertEquals(0, platformstatus[0])
installWebviewPlugin(mode, self, multiple_apks)
if replace_index_list is not None and len(replace_index_list) >= 2:
index_file_path = os.path.join(project_root, "www", "index.html")
key = replace_index_list[0]
content = replace_index_list[1]
if not replace_key(index_file_path, content, key):
print "replace key: " + key + " failed."
return False
if sourcecodepath is not None:
do_remove(glob.glob(os.path.join(project_root, "www")))
do_copy(sourcecodepath, os.path.join(tool_path, appname, "www"))
def buildGoogleApp(appname, sourcecodepath, self):
os.chdir(tool_path)
if os.path.exists(os.path.join(tool_path, appname)):
print "Existing %s project, try to clean up..." % appname
do_remove(glob.glob(os.path.join(tool_path, appname)))
print "Build project %s ----------------> START" % appname
if sourcecodepath is None:
print "sourcecodepath can't be none"
return False
if checkContains(appname, "CIRC"):
cordova_app = os.path.join(tool_path, "circ")
create_cmd = "cca create " + appname + " --link-to circ/package"
elif checkContains(appname, "EH"):
cordova_app = os.path.join(tool_path, "workshop-cca-eh")
create_cmd = "cca create " + appname + " --link-to workshop-cca-eh/workshop/step4"
if os.path.exists(cordova_app):
do_remove(glob.glob(cordova_app))
if not do_copy(sourcecodepath, cordova_app):
return False
print create_cmd
buildstatus = commands.getstatusoutput(create_cmd)
self.assertEquals(0, buildstatus[0])
os.chdir(os.path.join(tool_path, appname))
print "Add android platforms to this project --------------> START"
add_android_cmd = "cca platform add android"
addstatus = commands.getstatusoutput(add_android_cmd)
self.assertEquals(0, addstatus[0])
print "uninstall webview default plugin from this project --------------> START"
plugin_uninstall_webview = "cordova plugin remove cordova-plugin-crosswalk-webview"
uninstallStatus = commands.getstatusoutput(plugin_uninstall_webview)
self.assertEquals(0, uninstallStatus[0])
installWebviewPlugin(MODE, self)
build_cmd = "cca build android"
if ARCH == "x86_64" or ARCH == "arm64":
build_cmd = "cca build android --xwalk64bit"
buildstatus = commands.getstatusoutput(build_cmd)
self.assertEquals(0, buildstatus[0])
checkApkExist(appname, self)
def build(appname, isDebug, self, isCopy=False, isMultipleApk=True):
os.chdir(os.path.join(tool_path, appname))
print "Build project %s ----------------> START" % appname
pack_arch_tmp = ARCH
if ARCH == "x86_64":
pack_arch_tmp = "x86 --xwalk64bit"
elif ARCH == "arm64":
pack_arch_tmp = "arm --xwalk64bit"
cmd_mode = ""
apk_name_mode = "debug"
if isDebug == 1:
print "build debug app"
cmd_mode = "--debug"
elif isDebug == -1:
print "build release app"
cmd_mode = "--release"
apk_name_mode = "release-unsigned"
cmd = "cordova build android %s -- --gradleArg=-PcdvBuildArch=%s" % (cmd_mode, pack_arch_tmp)
print cmd
buildstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, buildstatus[0])
print "\nBuild project %s ----------------> OK\n" % appname
checkApkExist(appname, self, isCopy, isMultipleApk, apk_name_mode)
def checkApkExist(appname, self, isCopy=False, isMultipleApk=True, apk_name_mode="debug"):
print "Check %s Apk Exist ----------------> START" % appname
outputs_dir = os.path.join(
tool_path,
appname,
"platforms",
"android",
"build",
"outputs",
"apk")
apk_name = "android-%s.apk" % apk_name_mode
if isMultipleApk == True and MODE == "embedded":
apk_name_arch = "armv7"
if ARCH != "arm":
apk_name_arch = ARCH
apk_name = "android-%s-%s.apk" % (apk_name_arch, apk_name_mode)
if not os.path.exists(os.path.join(outputs_dir, apk_name)):
apk_name = "%s-%s-%s.apk" % (appname, apk_name_arch, apk_name_mode)
else:
if not os.path.exists(os.path.join(outputs_dir, apk_name)):
apk_name = "%s-%s.apk" % (appname, apk_name_mode)
self.assertTrue(os.path.exists(os.path.join(outputs_dir, apk_name)))
if isCopy == True:
self.assertTrue(do_copy(os.path.join(outputs_dir, apk_name), os.path.join(testapp_path, "%s.apk" % appname)))
print "Check %s Apk Exist ----------------> OK" % appname
def run(appname, self):
os.chdir(os.path.join(tool_path, appname))
print "Run project %s ----------------> START" % appname
cmd = "cordova run android"
print cmd
runstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, runstatus[0])
self.assertIn("LAUNCH SUCCESS", runstatus[1])
print "\nRun project %s ----------------> OK\n" % appname
def app_install(appname, pkgname, self):
print "Install APK ----------------> START"
os.chdir(testapp_path)
apk_file = commands.getstatusoutput("ls | grep %s" % appname)[1]
if apk_file == "":
print "Error: No app: %s found in directory: %s" % (appname, testapp_path)
cmd_inst = "adb -s " + device + " install -r " + apk_file
print cmd_inst
inststatus = commands.getstatusoutput(cmd_inst)
self.assertEquals(0, inststatus[0])
print "Install APK ----------------> OK"
self.assertTrue(check_app_installed(pkgname, self))
def checkContains(origin_str=None, key_str=None):
if origin_str.upper().find(key_str.upper()) >= 0:
return True
return False
def check_app_installed(pkgname, self):
print "Check if app is installed ----------------> START"
cmd_find = "adb -s " + device + \
" shell pm list packages |grep %s" % pkgname
pmstatus = commands.getstatusoutput(cmd_find)
if pmstatus[0] == 0:
print "App is installed."
return True
else:
print "App is uninstalled."
return False
def app_launch(appname, pkgname, self):
print "Launch APK ----------------> START"
cmd = "adb -s " + device + " shell am start -n %s/.%s" % (pkgname, appname)
launchstatus = commands.getstatusoutput(cmd)
self.assertNotIn("error", launchstatus[1].lower())
print "Launch APK ----------------> OK"
# Find whether the app have launched
def check_app_launched(pkgname, self):
cmd_acti = "adb -s " + device + " shell ps | grep %s" % pkgname
launched = commands.getstatusoutput(cmd_acti)
if launched[0] != 0:
print "App haven't launched."
return False
else:
print "App is have launched."
return True
def app_stop(pkgname, self):
print "Stop APK ----------------> START"
cmd = "adb -s " + device + " shell am force-stop %s" % pkgname
stopstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, stopstatus[0])
print "Stop APK ----------------> OK"
def app_uninstall(pkgname, self):
print "Uninstall APK ----------------> START"
cmd_uninst = "adb -s " + device + " uninstall %s" % (pkgname)
unistatus = commands.getstatusoutput(cmd_uninst)
self.assertEquals(0, unistatus[0])
print "Uninstall APK ----------------> OK"
def replace_key(file_path, content, key):
print "Replace value ----------------> START"
f = open(file_path, "r")
f_content = f.read()
f.close()
pos = f_content.find(key)
if pos != -1:
f_content = f_content.replace(key, content)
f = open(file_path, "w")
f.write(f_content)
f.close()
else:
print "Fail to replace: %s with: %s in file: %s" % (content, key, file_path)
return False
print "Replace value ----------------> OK"
return True
def do_remove(target_file_list=None):
for i_file in target_file_list:
print "Removing %s" % i_file
try:
if os.path.isdir(i_file):
shutil.rmtree(i_file)
else:
os.remove(i_file)
except Exception as e:
print "Fail to remove file %s: %s" % (i_file, e)
return False
return True
def do_copy(src_item=None, dest_item=None):
print "Copying %s to %s" % (src_item, dest_item)
try:
if os.path.isdir(src_item):
overwriteCopy(src_item, dest_item, symlinks=True)
else:
if not os.path.exists(os.path.dirname(dest_item)):
print "Create non-existent dir: %s" % os.path.dirname(dest_item)
os.makedirs(os.path.dirname(dest_item))
shutil.copy2(src_item, dest_item)
except Exception as e:
print "Fail to copy file %s: %s" % (src_item, e)
return False
return True
def overwriteCopy(src, dest, symlinks=False, ignore=None):
if not os.path.exists(dest):
os.makedirs(dest)
shutil.copystat(src, dest)
sub_list = os.listdir(src)
if ignore:
excl = ignore(src, sub_list)
sub_list = [x for x in sub_list if x not in excl]
for i_sub in sub_list:
s_path = os.path.join(src, i_sub)
d_path = os.path.join(dest, i_sub)
if symlinks and os.path.islink(s_path):
if os.path.lexists(d_path):
os.remove(d_path)
os.symlink(os.readlink(s_path), d_path)
try:
s_path_s = os.lstat(s_path)
s_path_mode = stat.S_IMODE(s_path_s.st_mode)
os.lchmod(d_path, s_path_mode)
except Exception:
pass
elif os.path.isdir(s_path):
overwriteCopy(s_path, d_path, symlinks, ignore)
else:
shutil.copy2(s_path, d_path)
| bsd-3-clause |
Jenselme/servo | tests/wpt/web-platform-tests/tools/pytest/doc/en/example/assertion/test_setup_flow_example.py | 217 | 1250 | def setup_module(module):
module.TestStateFullThing.classcount = 0
class TestStateFullThing:
def setup_class(cls):
cls.classcount += 1
def teardown_class(cls):
cls.classcount -= 1
def setup_method(self, method):
self.id = eval(method.__name__[5:])
def test_42(self):
assert self.classcount == 1
assert self.id == 42
def test_23(self):
assert self.classcount == 1
assert self.id == 23
def teardown_module(module):
assert module.TestStateFullThing.classcount == 0
""" For this example the control flow happens as follows::
import test_setup_flow_example
setup_module(test_setup_flow_example)
setup_class(TestStateFullThing)
instance = TestStateFullThing()
setup_method(instance, instance.test_42)
instance.test_42()
setup_method(instance, instance.test_23)
instance.test_23()
teardown_class(TestStateFullThing)
teardown_module(test_setup_flow_example)
Note that ``setup_class(TestStateFullThing)`` is called and not
``TestStateFullThing.setup_class()`` which would require you
to insert ``setup_class = classmethod(setup_class)`` to make
your setup function callable.
"""
| mpl-2.0 |
Manishearth/servo | tests/wpt/css-tests/tools/html5lib/html5lib/treebuilders/dom.py | 920 | 8469 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import minidom, Node
import weakref
from . import _base
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
def getDomBuilder(DomImplementation):
Dom = DomImplementation
class AttrList(object):
def __init__(self, element):
self.element = element
def __iter__(self):
return list(self.element.attributes.items()).__iter__()
def __setitem__(self, name, value):
self.element.setAttribute(name, value)
def __len__(self):
return len(list(self.element.attributes.items()))
def items(self):
return [(item[0], item[1]) for item in
list(self.element.attributes.items())]
def keys(self):
return list(self.element.attributes.keys())
def __getitem__(self, name):
return self.element.getAttribute(name)
def __contains__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
return self.element.hasAttribute(name)
class NodeBuilder(_base.Node):
def __init__(self, element):
_base.Node.__init__(self, element.nodeName)
self.element = element
namespace = property(lambda self: hasattr(self.element, "namespaceURI")
and self.element.namespaceURI or None)
def appendChild(self, node):
node.parent = self
self.element.appendChild(node.element)
def insertText(self, data, insertBefore=None):
text = self.element.ownerDocument.createTextNode(data)
if insertBefore:
self.element.insertBefore(text, insertBefore.element)
else:
self.element.appendChild(text)
def insertBefore(self, node, refNode):
self.element.insertBefore(node.element, refNode.element)
node.parent = self
def removeChild(self, node):
if node.element.parentNode == self.element:
self.element.removeChild(node.element)
node.parent = None
def reparentChildren(self, newParent):
while self.element.hasChildNodes():
child = self.element.firstChild
self.element.removeChild(child)
newParent.element.appendChild(child)
self.childNodes = []
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes:
for name, value in list(attributes.items()):
if isinstance(name, tuple):
if name[0] is not None:
qualifiedName = (name[0] + ":" + name[1])
else:
qualifiedName = name[1]
self.element.setAttributeNS(name[2], qualifiedName,
value)
else:
self.element.setAttribute(
name, value)
attributes = property(getAttributes, setAttributes)
def cloneNode(self):
return NodeBuilder(self.element.cloneNode(False))
def hasContent(self):
return self.element.hasChildNodes()
def getNameTuple(self):
if self.namespace is None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TreeBuilder(_base.TreeBuilder):
def documentClass(self):
self.dom = Dom.getDOMImplementation().createDocument(None, None, None)
return weakref.proxy(self)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
domimpl = Dom.getDOMImplementation()
doctype = domimpl.createDocumentType(name, publicId, systemId)
self.document.appendChild(NodeBuilder(doctype))
if Dom == minidom:
doctype.ownerDocument = self.dom
def elementClass(self, name, namespace=None):
if namespace is None and self.defaultNamespace is None:
node = self.dom.createElement(name)
else:
node = self.dom.createElementNS(namespace, name)
return NodeBuilder(node)
def commentClass(self, data):
return NodeBuilder(self.dom.createComment(data))
def fragmentClass(self):
return NodeBuilder(self.dom.createDocumentFragment())
def appendChild(self, node):
self.dom.appendChild(node.element)
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
return self.dom
def getFragment(self):
return _base.TreeBuilder.getFragment(self).element
def insertText(self, data, parent=None):
data = data
if parent != self:
_base.TreeBuilder.insertText(self, data, parent)
else:
# HACK: allow text nodes as children of the document node
if hasattr(self.dom, '_child_node_types'):
if Node.TEXT_NODE not in self.dom._child_node_types:
self.dom._child_node_types = list(self.dom._child_node_types)
self.dom._child_node_types.append(Node.TEXT_NODE)
self.dom.appendChild(self.dom.createTextNode(data))
implementation = DomImplementation
name = None
def testSerializer(element):
element.normalize()
rv = []
def serializeElement(element, indent=0):
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
if element.name:
if element.publicId or element.systemId:
publicId = element.publicId or ""
systemId = element.systemId or ""
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
(' ' * indent, element.name, publicId, systemId))
else:
rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name))
else:
rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
elif element.nodeType == Node.DOCUMENT_NODE:
rv.append("#document")
elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
rv.append("#document-fragment")
elif element.nodeType == Node.COMMENT_NODE:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue))
elif element.nodeType == Node.TEXT_NODE:
rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue))
else:
if (hasattr(element, "namespaceURI") and
element.namespaceURI is not None):
name = "%s %s" % (constants.prefixes[element.namespaceURI],
element.nodeName)
else:
name = element.nodeName
rv.append("|%s<%s>" % (' ' * indent, name))
if element.hasAttributes():
attributes = []
for i in range(len(element.attributes)):
attr = element.attributes.item(i)
name = attr.nodeName
value = attr.value
ns = attr.namespaceURI
if ns:
name = "%s %s" % (constants.prefixes[ns], attr.localName)
else:
name = attr.nodeName
attributes.append((name, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
indent += 2
for child in element.childNodes:
serializeElement(child, indent)
serializeElement(element, 0)
return "\n".join(rv)
return locals()
# The actual means to get a module!
getDomModule = moduleFactoryFactory(getDomBuilder)
| mpl-2.0 |
golismero/golismero | thirdparty_libs/shodan/wps.py | 10 | 2102 | """
WiFi Positioning System
Wrappers around the SkyHook and Google Locations APIs to resolve
wireless routers' MAC addresses (BSSID) to physical locations.
"""
try:
from json import dumps, loads
except:
from simplejson import dumps, loads
try:
from urllib2 import Request, urlopen
from urllib import urlencode
except:
from urllib.request import Request, urlopen
from urllib.parse import urlencode
class Skyhook:
"""Not yet ready for production, use the GoogleLocation class instead."""
def __init__(self, username='api', realm='shodan'):
self.username = username
self.realm = realm
self.url = 'https://api.skyhookwireless.com/wps2/location'
def locate(self, mac):
# Remove the ':'
mac = mac.replace(':', '')
data = """<?xml version='1.0'?>
<LocationRQ xmlns='http://skyhookwireless.com/wps/2005' version='2.6' street-address-lookup='full'>
<authentication version='2.0'>
<simple>
<username>%s</username>
<realm>%s</realm>
</simple>
</authentication>
<access-point>
<mac>%s</mac>
<signal-strength>-50</signal-strength>
</access-point>
</LocationRQ>""" % (self.username, self.realm, mac)
request = Request(url=self.url, data=data, headers={'Content-type': 'text/xml'})
response = urlopen(request)
result = response.read()
return result
class GoogleLocation:
def __init__(self):
self.url = 'http://www.google.com/loc/json'
def locate(self, mac):
data = {
'version': '1.1.0',
'request_address': True,
'wifi_towers': [{
'mac_address': mac,
'ssid': 'g',
'signal_strength': -72
}]
}
response = urlopen(self.url, dumps(data))
data = response.read()
return loads(data)
| gpl-2.0 |
kevin-coder/tensorflow-fork | tensorflow/python/data/kernel_tests/filter_with_legacy_function_test.py | 4 | 1333 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.filter_with_legacy_function()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.kernel_tests import filter_test_base
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_v1_only("filter_with_legacy_function only available in TF 1.x")
class FilterWithLegacyFunctionTest(filter_test_base.FilterTestBase):
def apply_filter(self, input_dataset, predicate):
return input_dataset.filter_with_legacy_function(predicate)
if __name__ == "__main__":
test.main()
| apache-2.0 |
wittekm/synergy-multi-monitor | tools/gmock-1.6.0/scripts/generator/cpp/utils.py | 1158 | 1153 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic utilities for C++ parsing."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import sys
# Set to True to see the start/end token indices.
DEBUG = True
def ReadFile(filename, print_error=True):
"""Returns the contents of a file."""
try:
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
except IOError:
if print_error:
print('Error reading %s: %s' % (filename, sys.exc_info()[1]))
return None
| gpl-2.0 |
Kingclove/ChannelAPI-Demo | server/lib/flask/logging.py | 838 | 1398 | # -*- coding: utf-8 -*-
"""
flask.logging
~~~~~~~~~~~~~
Implements the logging support for Flask.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from logging import getLogger, StreamHandler, Formatter, getLoggerClass, DEBUG
def create_logger(app):
"""Creates a logger for the given application. This logger works
similar to a regular Python logger but changes the effective logging
level based on the application's debug flag. Furthermore this
function also removes all attached handlers in case there was a
logger with the log name before.
"""
Logger = getLoggerClass()
class DebugLogger(Logger):
def getEffectiveLevel(x):
if x.level == 0 and app.debug:
return DEBUG
return Logger.getEffectiveLevel(x)
class DebugHandler(StreamHandler):
def emit(x, record):
StreamHandler.emit(x, record) if app.debug else None
handler = DebugHandler()
handler.setLevel(DEBUG)
handler.setFormatter(Formatter(app.debug_log_format))
logger = getLogger(app.logger_name)
# just in case that was not a new logger, get rid of all the handlers
# already attached to it.
del logger.handlers[:]
logger.__class__ = DebugLogger
logger.addHandler(handler)
return logger
| apache-2.0 |
entomb/CouchPotatoServer | libs/pyutil/odict.py | 106 | 20991 | # Copyright (c) 2002-2009 Zooko "Zooko" Wilcox-O'Hearn
"""
This module offers a Ordered Dict, which is a dict that preserves
insertion order. See PEP 372 for description of the problem. This
implementation uses a linked-list to get good O(1) asymptotic
performance. (Actually it is O(hashtable-update-cost), but whatever.)
Warning: if -O optimizations are not turned on then OrderedDict performs
extensive self-analysis in every function call, which can take minutes
and minutes for a large cache. Turn on -O, or comment out assert
self._assert_invariants()
"""
import operator
from assertutil import _assert, precondition
from humanreadable import hr
class OrderedDict:
"""
An efficient ordered dict.
Adding an item that is already in the dict *does not* make it the
most- recently-added item although it may change the state of the
dict itself (if the value is different than the previous value).
See also SmallOrderedDict (below), which is faster in some cases.
"""
class ItemIterator:
def __init__(self, c):
self.c = c
self.i = c.d[c.ts][1]
def __iter__(self):
return self
def next(self):
if self.i is self.c.hs:
raise StopIteration
k = self.i
precondition(self.c.d.has_key(k), "The iterated OrderedDict doesn't have the next key. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", k, self.c)
(v, p, n,) = self.c.d[k]
self.i = p
return (k, v,)
class KeyIterator:
def __init__(self, c):
self.c = c
self.i = c.d[c.ts][1]
def __iter__(self):
return self
def next(self):
if self.i is self.c.hs:
raise StopIteration
k = self.i
precondition(self.c.d.has_key(k), "The iterated OrderedDict doesn't have the next key. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", k, self.c)
(v, p, n,) = self.c.d[k]
self.i = p
return k
class ValIterator:
def __init__(self, c):
self.c = c
self.i = c.d[c.ts][1]
def __iter__(self):
return self
def next(self):
if self.i is self.c.hs:
raise StopIteration
precondition(self.c.d.has_key(self.i), "The iterated OrderedDict doesn't have the next key. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", self.i, self.c)
(v, p, n,) = self.c.d[self.i]
self.i = p
return v
class Sentinel:
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.msg,)
def __init__(self, initialdata={}):
self.d = {} # k: k, v: [v, prev, next,] # the dict
self.hs = OrderedDict.Sentinel("hs")
self.ts = OrderedDict.Sentinel("ts")
self.d[self.hs] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes.
self.d[self.ts] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes.
self.update(initialdata)
assert self._assert_invariants()
def __repr_n__(self, n=None):
s = ["{",]
try:
iter = self.iteritems()
x = iter.next()
s.append(str(x[0])); s.append(": "); s.append(str(x[1]))
i = 1
while (n is None) or (i < n):
x = iter.next()
s.append(", "); s.append(str(x[0])); s.append(": "); s.append(str(x[1]))
except StopIteration:
pass
s.append("}")
return ''.join(s)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.__repr_n__(),)
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.__repr_n__(16),)
def _assert_invariants(self):
_assert((len(self.d) > 2) == (self.d[self.hs][2] is not self.ts) == (self.d[self.ts][1] is not self.hs), "Head and tail point to something other than each other if and only if there is at least one element in the dictionary.", self.hs, self.ts, len(self.d))
foundprevsentinel = 0
foundnextsentinel = 0
for (k, (v, p, n,)) in self.d.iteritems():
_assert(v not in (self.hs, self.ts,))
_assert(p is not self.ts, "A reference to the tail sentinel may not appear in prev.", k, v, p, n)
_assert(n is not self.hs, "A reference to the head sentinel may not appear in next.", k, v, p, n)
_assert(p in self.d, "Each prev is required to appear as a key in the dict.", k, v, p, n)
_assert(n in self.d, "Each next is required to appear as a key in the dict.", k, v, p, n)
if p is self.hs:
foundprevsentinel += 1
_assert(foundprevsentinel <= 2, "No more than two references to the head sentinel may appear as a prev.", k, v, p, n)
if n is self.ts:
foundnextsentinel += 1
_assert(foundnextsentinel <= 2, "No more than one reference to the tail sentinel may appear as a next.", k, v, p, n)
_assert(foundprevsentinel == 2, "A reference to the head sentinel is required appear as a prev (plus a self-referential reference).")
_assert(foundnextsentinel == 2, "A reference to the tail sentinel is required appear as a next (plus a self-referential reference).")
count = 0
for (k, v,) in self.iteritems():
_assert(k not in (self.hs, self.ts,), k, self.hs, self.ts)
count += 1
_assert(count == len(self.d)-2, count, len(self.d)) # -2 for the sentinels
return True
def move_to_most_recent(self, k, strictkey=False):
assert self._assert_invariants()
if not self.d.has_key(k):
if strictkey:
raise KeyError, k
return
node = self.d[k]
# relink
self.d[node[1]][2] = node[2]
self.d[node[2]][1] = node[1]
# move to front
hnode = self.d[self.hs]
node[1] = self.hs
node[2] = hnode[2]
hnode[2] = k
self.d[node[2]][1] = k
assert self._assert_invariants()
def iteritems(self):
return OrderedDict.ItemIterator(self)
def itervalues(self):
return OrderedDict.ValIterator(self)
def iterkeys(self):
return self.__iter__()
def __iter__(self):
return OrderedDict.KeyIterator(self)
def __getitem__(self, key, default=None, strictkey=True):
node = self.d.get(key)
if not node:
if strictkey:
raise KeyError, key
return default
return node[0]
def __setitem__(self, k, v=None):
assert self._assert_invariants()
node = self.d.get(k)
if node:
node[0] = v
return
hnode = self.d[self.hs]
n = hnode[2]
self.d[k] = [v, self.hs, n,]
hnode[2] = k
self.d[n][1] = k
assert self._assert_invariants()
return v
def __delitem__(self, key, default=None, strictkey=True):
"""
@param strictkey: True if you want a KeyError in the case that
key is not there, False if you want a reference to default
in the case that key is not there
@param default: the object to return if key is not there; This
is ignored if strictkey.
@return: the value removed or default if there is not item by
that key and strictkey is False
"""
assert self._assert_invariants()
if self.d.has_key(key):
node = self.d[key]
# relink
self.d[node[1]][2] = node[2]
self.d[node[2]][1] = node[1]
del self.d[key]
assert self._assert_invariants()
return node[0]
elif strictkey:
assert self._assert_invariants()
raise KeyError, key
else:
assert self._assert_invariants()
return default
def has_key(self, key):
assert self._assert_invariants()
if self.d.has_key(key):
assert self._assert_invariants()
return True
else:
assert self._assert_invariants()
return False
def clear(self):
assert self._assert_invariants()
self.d.clear()
self.d[self.hs] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes.
self.d[self.ts] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes.
assert self._assert_invariants()
def update(self, otherdict):
"""
@return: self
"""
assert self._assert_invariants()
for (k, v,) in otherdict.iteritems():
assert self._assert_invariants()
self[k] = v
assert self._assert_invariants()
def pop(self):
assert self._assert_invariants()
if len(self.d) < 2: # the +2 is for the sentinels
raise KeyError, 'popitem(): dictionary is empty'
k = self.d[self.hs][2]
self.remove(k)
assert self._assert_invariants()
return k
def popitem(self):
assert self._assert_invariants()
if len(self.d) < 2: # the +2 is for the sentinels
raise KeyError, 'popitem(): dictionary is empty'
k = self.d[self.hs][2]
val = self.remove(k)
assert self._assert_invariants()
return (k, val,)
def keys_unsorted(self):
assert self._assert_invariants()
t = self.d.copy()
del t[self.hs]
del t[self.ts]
assert self._assert_invariants()
return t.keys()
def keys(self):
res = [None] * len(self)
i = 0
for k in self.iterkeys():
res[i] = k
i += 1
return res
def values_unsorted(self):
assert self._assert_invariants()
t = self.d.copy()
del t[self.hs]
del t[self.ts]
assert self._assert_invariants()
return map(operator.__getitem__, t.values(), [0]*len(t))
def values(self):
res = [None] * len(self)
i = 0
for v in self.itervalues():
res[i] = v
i += 1
return res
def items(self):
res = [None] * len(self)
i = 0
for it in self.iteritems():
res[i] = it
i += 1
return res
def __len__(self):
return len(self.d) - 2
def insert(self, key, val=None):
assert self._assert_invariants()
result = self.__setitem__(key, val)
assert self._assert_invariants()
return result
def setdefault(self, key, default=None):
assert self._assert_invariants()
if not self.has_key(key):
self[key] = default
assert self._assert_invariants()
return self[key]
def get(self, key, default=None):
return self.__getitem__(key, default, strictkey=False)
def remove(self, key, default=None, strictkey=True):
assert self._assert_invariants()
result = self.__delitem__(key, default, strictkey)
assert self._assert_invariants()
return result
class SmallOrderedDict(dict):
"""
SmallOrderedDict is faster than OrderedDict for small sets. How small? That
depends on your machine and which operations you use most often. Use
performance profiling to determine whether the ordered dict class that you are
using makes any difference to the performance of your program, and if it
does, then run "quick_bench()" in test/test_cache.py to see which cache
implementation is faster for the size of your datasets.
A simple least-recently-used cache. It keeps an LRU queue, and
when the number of items in the cache reaches maxsize, it removes
the least recently used item.
"Looking" at an item or a key such as with "has_key()" makes that
item become the most recently used item.
You can also use "refresh()" to explicitly make an item become the most
recently used item.
Adding an item that is already in the dict *does* make it the
most- recently-used item although it does not change the state of
the dict itself.
"""
class ItemIterator:
def __init__(self, c):
self.c = c
self.i = 0
def __iter__(self):
return self
def next(self):
precondition(self.i <= len(self.c._lru), "The iterated SmallOrderedDict doesn't have this many elements. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", self.i, self.c)
precondition(dict.has_key(self.c, self.c._lru[self.i]), "The iterated SmallOrderedDict doesn't have this key. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", self.i, self.c._lru[self.i], self.c)
if self.i == len(self.c._lru):
raise StopIteration
k = self.i
self.i += 1
return (k, dict.__getitem__(self.c, k),)
class KeyIterator:
def __init__(self, c):
self.c = c
self.i = 0
def __iter__(self):
return self
def next(self):
precondition(self.i <= len(self.c._lru), "The iterated SmallOrderedDict doesn't have this many elements. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", self.i, self.c)
precondition(dict.has_key(self.c, self.c._lru[self.i]), "The iterated SmallOrderedDict doesn't have this key. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", self.i, self.c._lru[self.i], self.c)
if self.i == len(self.c._lru):
raise StopIteration
k = self.i
self.i += 1
return k
class ValueIterator:
def __init__(self, c):
self.c = c
self.i = 0
def __iter__(self):
return self
def next(self):
precondition(self.i <= len(self.c._lru), "The iterated SmallOrderedDict doesn't have this many elements. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", self.i, self.c)
precondition(dict.has_key(self.c, self.c._lru[self.i]), "The iterated SmallOrderedDict doesn't have this key. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", self.i, self.c._lru[self.i], self.c)
if self.i == len(self.c._lru):
raise StopIteration
k = self.i
self.i += 1
return dict.__getitem__(self.c, k)
def __init__(self, initialdata={}, maxsize=128):
dict.__init__(self, initialdata)
self._lru = initialdata.keys() # contains keys
self._maxsize = maxsize
over = len(self) - self._maxsize
if over > 0:
map(dict.__delitem__, [self]*over, self._lru[:over])
del self._lru[:over]
assert self._assert_invariants()
def _assert_invariants(self):
_assert(len(self._lru) <= self._maxsize, "Size is required to be <= maxsize.")
_assert(len(filter(lambda x: dict.has_key(self, x), self._lru)) == len(self._lru), "Each key in self._lru is required to be in dict.", filter(lambda x: not dict.has_key(self, x), self._lru), len(self._lru), self._lru, len(self), self)
_assert(len(filter(lambda x: x in self._lru, self.keys())) == len(self), "Each key in dict is required to be in self._lru.", filter(lambda x: x not in self._lru, self.keys()), len(self._lru), self._lru, len(self), self)
_assert(len(self._lru) == len(self), "internal consistency", filter(lambda x: x not in self.keys(), self._lru), len(self._lru), self._lru, len(self), self)
_assert(len(self._lru) <= self._maxsize, "internal consistency", len(self._lru), self._lru, self._maxsize)
return True
def insert(self, key, item=None):
assert self._assert_invariants()
result = self.__setitem__(key, item)
assert self._assert_invariants()
return result
def setdefault(self, key, default=None):
assert self._assert_invariants()
if not self.has_key(key):
self[key] = default
assert self._assert_invariants()
return self[key]
def __setitem__(self, key, item=None):
assert self._assert_invariants()
if dict.has_key(self, key):
self._lru.remove(key)
else:
if len(self._lru) == self._maxsize:
# If this insert is going to increase the size of the cache to bigger than maxsize:
killkey = self._lru.pop(0)
dict.__delitem__(self, killkey)
dict.__setitem__(self, key, item)
self._lru.append(key)
assert self._assert_invariants()
return item
def remove(self, key, default=None, strictkey=True):
assert self._assert_invariants()
result = self.__delitem__(key, default, strictkey)
assert self._assert_invariants()
return result
def __delitem__(self, key, default=None, strictkey=True):
"""
@param strictkey: True if you want a KeyError in the case that
key is not there, False if you want a reference to default
in the case that key is not there
@param default: the object to return if key is not there; This
is ignored if strictkey.
@return: the object removed or default if there is not item by
that key and strictkey is False
"""
assert self._assert_invariants()
if dict.has_key(self, key):
val = dict.__getitem__(self, key)
dict.__delitem__(self, key)
self._lru.remove(key)
assert self._assert_invariants()
return val
elif strictkey:
assert self._assert_invariants()
raise KeyError, key
else:
assert self._assert_invariants()
return default
def clear(self):
assert self._assert_invariants()
dict.clear(self)
self._lru = []
assert self._assert_invariants()
def update(self, otherdict):
"""
@return: self
"""
assert self._assert_invariants()
if len(otherdict) > self._maxsize:
# Handling this special case here makes it possible to implement the
# other more common cases faster below.
dict.clear(self)
self._lru = []
if self._maxsize > (len(otherdict) - self._maxsize):
dict.update(self, otherdict)
while len(self) > self._maxsize:
dict.popitem(self)
else:
for k, v, in otherdict.iteritems():
if len(self) == self._maxsize:
break
dict.__setitem__(self, k, v)
self._lru = dict.keys(self)
assert self._assert_invariants()
return self
for k in otherdict.iterkeys():
if dict.has_key(self, k):
self._lru.remove(k)
self._lru.extend(otherdict.keys())
dict.update(self, otherdict)
over = len(self) - self._maxsize
if over > 0:
map(dict.__delitem__, [self]*over, self._lru[:over])
del self._lru[:over]
assert self._assert_invariants()
return self
def has_key(self, key):
assert self._assert_invariants()
if dict.has_key(self, key):
assert key in self._lru, "key: %s, self._lru: %s" % tuple(map(hr, (key, self._lru,)))
self._lru.remove(key)
self._lru.append(key)
assert self._assert_invariants()
return True
else:
assert self._assert_invariants()
return False
def refresh(self, key, strictkey=True):
"""
@param strictkey: raise a KeyError exception if key isn't present
"""
assert self._assert_invariants()
if not dict.has_key(self, key):
if strictkey:
raise KeyError, key
return
self._lru.remove(key)
self._lru.append(key)
def popitem(self):
if not self._lru:
raise KeyError, 'popitem(): dictionary is empty'
k = self._lru[-1]
obj = self.remove(k)
return (k, obj,)
| gpl-3.0 |
petemounce/ansible | lib/ansible/modules/cloud/amazon/elasticache_snapshot.py | 42 | 8055 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: elasticache_snapshot
short_description: Manage cache snapshots in Amazon Elasticache.
description:
- Manage cache snapshots in Amazon Elasticache.
- Returns information about the specified snapshot.
version_added: "2.3"
author: "Sloane Hertel (@s-hertel)"
options:
name:
description:
- The name of the snapshot we want to create, copy, delete
required: yes
state:
description:
- Actions that will create, destroy, or copy a snapshot.
choices: ['present', 'absent', 'copy']
replication_id:
description:
- The name of the existing replication group to make the snapshot.
required: no
default: null
cluster_id:
description:
- The name of an existing cache cluster in the replication group to make the snapshot.
required: no
default: null
target:
description:
- The name of a snapshot copy
required: no
default: null
bucket:
description:
- The s3 bucket to which the snapshot is exported
required: no
default: null
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
---
- hosts: localhost
connection: local
tasks:
- name: 'Create a snapshot'
elasticache_snapshot:
name: 'test-snapshot'
state: 'present'
cluster_id: '{{ cluster }}'
replication_id: '{{ replication }}'
"""
RETURN = """
response_metadata:
description: response metadata about the snapshot
returned: always
type: dict
sample:
http_headers:
content-length: 1490
content-type: text/xml
date: Tue, 07 Feb 2017 16:43:04 GMT
x-amzn-requestid: 7f436dea-ed54-11e6-a04c-ab2372a1f14d
http_status_code: 200
request_id: 7f436dea-ed54-11e6-a04c-ab2372a1f14d
retry_attempts: 0
snapshot:
description: snapshot data
returned: always
type: dict
sample:
auto_minor_version_upgrade: true
cache_cluster_create_time: 2017-02-01T17:43:58.261000+00:00
cache_cluster_id: test-please-delete
cache_node_type: cache.m1.small
cache_parameter_group_name: default.redis3.2
cache_subnet_group_name: default
engine: redis
engine_version: 3.2.4
node_snapshots:
cache_node_create_time: 2017-02-01T17:43:58.261000+00:00
cache_node_id: 0001
cache_size:
num_cache_nodes: 1
port: 11211
preferred_availability_zone: us-east-1d
preferred_maintenance_window: wed:03:00-wed:04:00
snapshot_name: deletesnapshot
snapshot_retention_limit: 0
snapshot_source: manual
snapshot_status: creating
snapshot_window: 10:00-11:00
vpc_id: vpc-c248fda4
changed:
description: if a snapshot has been created, deleted, or copied
returned: always
type: bool
sample:
changed: true
"""
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, camel_dict_to_snake_dict
import traceback
try:
import boto3
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def create(module, connection, replication_id, cluster_id, name):
""" Create an Elasticache backup. """
try:
response = connection.create_snapshot(ReplicationGroupId=replication_id,
CacheClusterId=cluster_id,
SnapshotName=name)
changed = True
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "SnapshotAlreadyExistsFault":
response = {}
changed = False
else:
module.fail_json(msg="Unable to create the snapshot.", exception=traceback.format_exc())
return response, changed
def copy(module, connection, name, target, bucket):
""" Copy an Elasticache backup. """
try:
response = connection.copy_snapshot(SourceSnapshotName=name,
TargetSnapshotName=target,
TargetBucket=bucket)
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to copy the snapshot.", exception=traceback.format_exc())
return response, changed
def delete(module, connection, name):
""" Delete an Elasticache backup. """
try:
response = connection.delete_snapshot(SnapshotName=name)
changed = True
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "SnapshotNotFoundFault":
response = {}
changed = False
elif e.response['Error']['Code'] == "InvalidSnapshotState":
module.fail_json(msg="Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow deletion."
"You may need to wait a few minutes.")
else:
module.fail_json(msg="Unable to delete the snapshot.", exception=traceback.format_exc())
return response, changed
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
state=dict(required=True, type='str', choices=['present', 'absent', 'copy']),
replication_id=dict(type='str'),
cluster_id=dict(type='str'),
target=dict(type='str'),
bucket=dict(type='str'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto required for this module')
name = module.params.get('name')
state = module.params.get('state')
replication_id = module.params.get('replication_id')
cluster_id = module.params.get('cluster_id')
target = module.params.get('target')
bucket = module.params.get('bucket')
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
connection = boto3_conn(module, conn_type='client',
resource='elasticache', region=region,
endpoint=ec2_url, **aws_connect_kwargs)
changed = False
response = {}
if state == 'present':
if not all((replication_id, cluster_id)):
module.fail_json(msg="The state 'present' requires options: 'replication_id' and 'cluster_id'")
response, changed = create(module, connection, replication_id, cluster_id, name)
elif state == 'absent':
response, changed = delete(module, connection, name)
elif state == 'copy':
if not all((target, bucket)):
module.fail_json(msg="The state 'copy' requires options: 'target' and 'bucket'.")
response, changed = copy(module, connection, name, target, bucket)
facts_result = dict(changed=changed, **camel_dict_to_snake_dict(response))
module.exit_json(**facts_result)
if __name__ == '__main__':
main()
| gpl-3.0 |
LPgenerator/django-cacheops | cacheops/redis.py | 1 | 3494 | from __future__ import absolute_import
import warnings
from contextlib import contextmanager
import six
from funcy import decorator, identity, memoize, LazyObject
import redis
from redis.sentinel import Sentinel
from .conf import settings
if settings.CACHEOPS_DEGRADE_ON_FAILURE:
@decorator
def handle_connection_failure(call):
try:
return call()
except redis.ConnectionError as e:
warnings.warn("The cacheops cache is unreachable! Error: %s" % e, RuntimeWarning)
except redis.TimeoutError as e:
warnings.warn("The cacheops cache timed out! Error: %s" % e, RuntimeWarning)
else:
handle_connection_failure = identity
LOCK_TIMEOUT = 60
class CacheopsRedis(redis.StrictRedis):
get = handle_connection_failure(redis.StrictRedis.get)
@contextmanager
def getting(self, key, lock=False):
if not lock:
yield self.get(key)
else:
locked = False
try:
data = self._get_or_lock(key)
locked = data is None
yield data
finally:
if locked:
self._release_lock(key)
@handle_connection_failure
def _get_or_lock(self, key):
self._lock = getattr(self, '_lock', self.register_script("""
local locked = redis.call('set', KEYS[1], 'LOCK', 'nx', 'ex', ARGV[1])
if locked then
redis.call('del', KEYS[2])
end
return locked
"""))
signal_key = key + ':signal'
while True:
data = self.get(key)
if data is None:
if self._lock(keys=[key, signal_key], args=[LOCK_TIMEOUT]):
return None
elif data != b'LOCK':
return data
# No data and not locked, wait
self.brpoplpush(signal_key, signal_key, timeout=LOCK_TIMEOUT)
@handle_connection_failure
def _release_lock(self, key):
self._unlock = getattr(self, '_unlock', self.register_script("""
if redis.call('get', KEYS[1]) == 'LOCK' then
redis.call('del', KEYS[1])
end
redis.call('lpush', KEYS[2], 1)
redis.call('expire', KEYS[2], 1)
"""))
signal_key = key + ':signal'
self._unlock(keys=[key, signal_key])
@LazyObject
def redis_client():
if settings.CACHEOPS_SENTINEL and isinstance(settings.CACHEOPS_SENTINEL, dict):
sentinel = Sentinel(
settings.CACHEOPS_SENTINEL['location'],
socket_timeout=settings.CACHEOPS_SENTINEL.get('socket_timeout')
)
return sentinel.master_for(
settings.CACHEOPS_SENTINEL['service_name'],
redis_class=CacheopsRedis,
db=settings.CACHEOPS_SENTINEL.get('db') or 0
)
# Allow client connection settings to be specified by a URL.
if isinstance(settings.CACHEOPS_REDIS, six.string_types):
return CacheopsRedis.from_url(settings.CACHEOPS_REDIS)
else:
return CacheopsRedis(**settings.CACHEOPS_REDIS)
### Lua script loader
import re
import os.path
STRIP_RE = re.compile(r'TOSTRIP.*/TOSTRIP', re.S)
@memoize
def load_script(name, strip=False):
filename = os.path.join(os.path.dirname(__file__), 'lua/%s.lua' % name)
with open(filename) as f:
code = f.read()
if strip:
code = STRIP_RE.sub('', code)
return redis_client.register_script(code)
| bsd-3-clause |
molebot/brython | www/src/Lib/browser/markdown.py | 623 | 13060 | # -*- coding: utf-8 -*-
try:
import _jsre as re
except:
import re
import random
import time
letters = 'abcdefghijklmnopqrstuvwxyz'
letters += letters.upper()+'0123456789'
class URL:
def __init__(self,src):
elts = src.split(maxsplit=1)
self.href = elts[0]
self.alt = ''
if len(elts)==2:
alt = elts[1]
if alt[0]=='"' and alt[-1]=='"':self.alt=alt[1:-1]
elif alt[0]=="'" and alt[-1]=="'":self.alt=alt[1:-1]
elif alt[0]=="(" and alt[-1]==")":self.alt=alt[1:-1]
class CodeBlock:
def __init__(self,line):
self.lines = [line]
if line.startswith("```") and len(line)>3:
self.info = line[3:]
else:
self.info = None
def to_html(self):
if self.lines[0].startswith("`"):
self.lines.pop(0)
res = escape('\n'.join(self.lines))
res = unmark(res)
_class = self.info or "marked"
res = '<pre class="%s">%s</pre>\n' %(_class, res)
return res,[]
class HtmlBlock:
def __init__(self, src):
self.src = src
def to_html(self):
return self.src
class Marked:
def __init__(self, line=''):
self.line = line
self.children = []
def to_html(self):
return apply_markdown(self.line)
# get references
refs = {}
ref_pattern = r"^\[(.*)\]:\s+(.*)"
def mark(src):
global refs
t0 = time.time()
refs = {}
# split source in sections
# sections can be :
# - a block-level HTML element (markdown syntax will not be processed)
# - a script
# - a span-level HTML tag (markdown syntax will be processed)
# - a code block
# normalise line feeds
src = src.replace('\r\n','\n')
# lines followed by dashes
src = re.sub(r'(.*?)\n=+\n', '\n# \\1\n', src)
src = re.sub(r'(.*?)\n-+\n', '\n## \\1\n', src)
lines = src.split('\n')+['']
i = bq = 0
ul = ol = 0
while i<len(lines):
# enclose lines starting by > in a blockquote
if lines[i].startswith('>'):
nb = 1
while nb<len(lines[i]) and lines[i][nb]=='>':
nb += 1
lines[i] = lines[i][nb:]
if nb>bq:
lines.insert(i,'<blockquote>'*(nb-bq))
i += 1
bq = nb
elif nb<bq:
lines.insert(i,'</blockquote>'*(bq-nb))
i += 1
bq = nb
elif bq>0:
lines.insert(i,'</blockquote>'*bq)
i += 1
bq = 0
# unordered lists
if lines[i].strip() and lines[i].lstrip()[0] in '-+*' \
and len(lines[i].lstrip())>1 \
and lines[i].lstrip()[1]==' ' \
and (i==0 or ul or not lines[i-1].strip()):
# line indentation indicates nesting level
nb = 1+len(lines[i])-len(lines[i].lstrip())
lines[i] = '<li>'+lines[i][nb:]
if nb>ul:
lines.insert(i,'<ul>'*(nb-ul))
i += 1
elif nb<ul:
lines.insert(i,'</ul>'*(ul-nb))
i += 1
ul = nb
elif ul and not lines[i].strip():
if i<len(lines)-1 and lines[i+1].strip() \
and not lines[i+1].startswith(' '):
nline = lines[i+1].lstrip()
if nline[0] in '-+*' and len(nline)>1 and nline[1]==' ':
pass
else:
lines.insert(i,'</ul>'*ul)
i += 1
ul = 0
# ordered lists
mo = re.search(r'^(\d+\.)',lines[i])
if mo:
if not ol:
lines.insert(i,'<ol>')
i += 1
lines[i] = '<li>'+lines[i][len(mo.groups()[0]):]
ol = 1
elif ol and not lines[i].strip() and i<len(lines)-1 \
and not lines[i+1].startswith(' ') \
and not re.search(r'^(\d+\.)',lines[i+1]):
lines.insert(i,'</ol>')
i += 1
ol = 0
i += 1
if ul:
lines.append('</ul>'*ul)
if ol:
lines.append('</ol>'*ol)
if bq:
lines.append('</blockquote>'*bq)
t1 = time.time()
#print('part 1', t1-t0)
sections = []
scripts = []
section = Marked()
i = 0
while i<len(lines):
line = lines[i]
if line.strip() and line.startswith(' '):
if isinstance(section,Marked) and section.line:
sections.append(section)
section = CodeBlock(line[4:])
j = i+1
while j<len(lines) and lines[j].startswith(' '):
section.lines.append(lines[j][4:])
j += 1
sections.append(section)
section = Marked()
i = j
continue
elif line.strip() and line.startswith("```"):
# fenced code blocks à la Github Flavoured Markdown
if isinstance(section,Marked) and section.line:
sections.append(section)
section = CodeBlock(line)
j = i+1
while j<len(lines) and not lines[j].startswith("```"):
section.lines.append(lines[j])
j += 1
sections.append(section)
section = Marked()
i = j+1
continue
elif line.lower().startswith('<script'):
if isinstance(section,Marked) and section.line:
sections.append(section)
section = Marked()
j = i+1
while j<len(lines):
if lines[j].lower().startswith('</script>'):
scripts.append('\n'.join(lines[i+1:j]))
for k in range(i,j+1):
lines[k] = ''
break
j += 1
i = j
continue
# atext header
elif line.startswith('#'):
level = 1
line = lines[i]
while level<len(line) and line[level]=='#' and level<=6:
level += 1
if not line[level+1:].strip():
if level==1:
i += 1
continue
else:
lines[i] = '<H%s>%s</H%s>\n' %(level-1,'#',level-1)
else:
lines[i] = '<H%s>%s</H%s>\n' %(level,line[level+1:],level)
else:
mo = re.search(ref_pattern,line)
if mo is not None:
if isinstance(section,Marked) and section.line:
sections.append(section)
section = Marked()
key = mo.groups()[0]
value = URL(mo.groups()[1])
refs[key.lower()] = value
else:
if not line.strip():
line = '<p></p>'
if section.line:
section.line += '\n'
section.line += line
i += 1
t2 = time.time()
#print('section 2', t2-t1)
if isinstance(section,Marked) and section.line:
sections.append(section)
res = ''
for section in sections:
mk,_scripts = section.to_html()
res += mk
scripts += _scripts
#print('end mark', time.time()-t2)
return res,scripts
def escape(czone):
czone = czone.replace('&','&')
czone = czone.replace('<','<')
czone = czone.replace('>','>')
czone = czone.replace('_','_')
czone = czone.replace('*','*')
return czone
def s_escape(mo):
# used in re.sub
czone = mo.string[mo.start():mo.end()]
return escape(czone)
def unmark(code_zone):
# convert _ to _ inside inline code
code_zone = code_zone.replace('_','_')
return code_zone
def s_unmark(mo):
# convert _ to _ inside inline code
code_zone = mo.string[mo.start():mo.end()]
code_zone = code_zone.replace('_','_')
return code_zone
def apply_markdown(src):
scripts = []
key = None
t0 = time.time()
i = 0
while i<len(src):
if src[i]=='[':
start_a = i+1
while True:
end_a = src.find(']',i)
if end_a == -1:
break
if src[end_a-1]=='\\':
i = end_a+1
else:
break
if end_a>-1 and src[start_a:end_a].find('\n')==-1:
link = src[start_a:end_a]
rest = src[end_a+1:].lstrip()
if rest and rest[0]=='(':
j = 0
while True:
end_href = rest.find(')',j)
if end_href == -1:
break
if rest[end_href-1]=='\\':
j = end_href+1
else:
break
if end_href>-1 and rest[:end_href].find('\n')==-1:
tag = '<a href="'+rest[1:end_href]+'">'+link+'</a>'
src = src[:start_a-1]+tag+rest[end_href+1:]
i = start_a+len(tag)
elif rest and rest[0]=='[':
j = 0
while True:
end_key = rest.find(']',j)
if end_key == -1:
break
if rest[end_key-1]=='\\':
j = end_key+1
else:
break
if end_key>-1 and rest[:end_key].find('\n')==-1:
if not key:
key = link
if key.lower() not in refs:
raise KeyError('unknown reference %s' %key)
url = refs[key.lower()]
tag = '<a href="'+url+'">'+link+'</a>'
src = src[:start_a-1]+tag+rest[end_key+1:]
i = start_a+len(tag)
i += 1
t1 = time.time()
#print('apply markdown 1', t1-t0)
# before applying the markup with _ and *, isolate HTML tags because
# they can contain these characters
# We replace them temporarily by a random string
rstr = ''.join(random.choice(letters) for i in range(16))
i = 0
state = None
start = -1
data = ''
tags = []
while i<len(src):
if src[i]=='<':
j = i+1
while j<len(src):
if src[j]=='"' or src[j]=="'":
if state==src[j] and src[j-1]!='\\':
state = None
j = start+len(data)+1
data = ''
elif state==None:
state = src[j]
start = j
else:
data += src[j]
elif src[j]=='>' and state is None:
tags.append(src[i:j+1])
src = src[:i]+rstr+src[j+1:]
i += len(rstr)
break
elif state=='"' or state=="'":
data += src[j]
elif src[j]=='\n':
# if a sign < is not followed by > in the same ligne, it
# is the sign "lesser than"
src = src[:i]+'<'+src[i+1:]
j=i+4
break
j += 1
elif src[i]=='`' and i>0 and src[i-1]!='\\':
# ignore the content of inline code
j = i+1
while j<len(src):
if src[j]=='`' and src[j-1]!='\\':
break
j += 1
i = j
i += 1
t2 = time.time()
#print('apply markdown 2', len(src), t2-t1)
# escape "<", ">", "&" and "_" in inline code
code_pattern = r'\`(.*?)\`'
src = re.sub(code_pattern,s_escape,src)
# replace escaped ` _ * by HTML characters
src = src.replace(r'\\`','`')
src = src.replace(r'\_','_')
src = src.replace(r'\*','*')
# emphasis
strong_patterns = [('STRONG',r'\*\*(.*?)\*\*'),('B',r'__(.*?)__')]
for tag,strong_pattern in strong_patterns:
src = re.sub(strong_pattern,r'<%s>\1</%s>' %(tag,tag),src)
em_patterns = [('EM',r'\*(.*?)\*'),('I',r'\_(.*?)\_')]
for tag,em_pattern in em_patterns:
src = re.sub(em_pattern,r'<%s>\1</%s>' %(tag,tag),src)
# inline code
code_pattern = r'\`(.*?)\`'
src = re.sub(code_pattern,r'<code>\1</code>',src)
# restore tags
while True:
pos = src.rfind(rstr)
if pos==-1:
break
repl = tags.pop()
src = src[:pos]+repl+src[pos+len(rstr):]
src = '<p>'+src+'</p>'
t3 = time.time()
#print('apply markdown 3', t3-t2)
return src,scripts
| bsd-3-clause |
Carpetsmoker/qutebrowser | scripts/dev/gen_resources.py | 7 | 1026 | #!/usr/bin/env python3
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# copyright 2014 florian bruhin (the compiler) <mail@qutebrowser.org>
# this file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the gnu general public license as published by
# the free software foundation, either version 3 of the license, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but without any warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose. see the
# gnu general public license for more details.
#
# you should have received a copy of the gnu general public license
# along with qutebrowser. if not, see <http://www.gnu.org/licenses/>.
"""Generate Qt resources based on source files."""
import subprocess
with open('qutebrowser/resources.py', 'w', encoding='utf-8') as f:
subprocess.run(['pyrcc5', 'qutebrowser.rcc'], stdout=f, check=True)
| gpl-3.0 |
vadimtk/chrome4sdp | third_party/WebKit/Source/build/scripts/make_runtime_features.py | 51 | 4136 | #!/usr/bin/env python
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import in_generator
import name_utilities
from name_utilities import lower_first
import template_expander
class RuntimeFeatureWriter(in_generator.Writer):
class_name = 'RuntimeEnabledFeatures'
filters = {
'enable_conditional': name_utilities.enable_conditional_if_endif,
}
# FIXME: valid_values and defaults should probably roll into one object.
valid_values = {
'status': ['stable', 'experimental', 'test', 'deprecated'],
}
defaults = {
'condition' : None,
'depends_on' : [],
'custom': False,
'status': None,
}
_status_aliases = {
'deprecated': 'test',
}
def __init__(self, in_file_path):
super(RuntimeFeatureWriter, self).__init__(in_file_path)
self._outputs = {(self.class_name + '.h'): self.generate_header,
(self.class_name + '.cpp'): self.generate_implementation,
}
self._features = self.in_file.name_dictionaries
# Make sure the resulting dictionaries have all the keys we expect.
for feature in self._features:
feature['first_lowered_name'] = lower_first(feature['name'])
feature['status'] = self._status_aliases.get(feature['status'], feature['status'])
# Most features just check their isFooEnabled bool
# but some depend on more than one bool.
enabled_condition = 'is%sEnabled' % feature['name']
for dependant_name in feature['depends_on']:
enabled_condition += ' && is%sEnabled' % dependant_name
feature['enabled_condition'] = enabled_condition
self._non_custom_features = filter(lambda feature: not feature['custom'], self._features)
def _feature_sets(self):
# Another way to think of the status levels is as "sets of features"
# which is how we're referring to them in this generator.
return [status for status in self.valid_values['status'] if status not in self._status_aliases]
@template_expander.use_jinja(class_name + '.h.tmpl', filters=filters)
def generate_header(self):
return {
'features': self._features,
'feature_sets': self._feature_sets(),
}
@template_expander.use_jinja(class_name + '.cpp.tmpl', filters=filters)
def generate_implementation(self):
return {
'features': self._features,
'feature_sets': self._feature_sets(),
}
if __name__ == '__main__':
in_generator.Maker(RuntimeFeatureWriter).main(sys.argv)
| bsd-3-clause |
rhhayward/podcast_generator | podcast_generator/PodcastCreator.py | 1 | 4935 | import urllib.request as urllib
from lxml import etree
import os
from os.path import basename
from urllib.parse import urlparse
### PodcastCreator is the class that
### takes a set of downloaders,
### sets their settings, takes
### their downloaded files and
### makes them into an rss file
### for use with podcast
### aggregators.
class PodcastCreator:
""" takes a list of files, creates an output xml file for use with podcatcher """
def __init__(self):
self.files = []
self.outputFile = ""
self.title = ""
self.link = ""
self.enclosureBaseUrl = ""
self.db = None
self.destFolder = None
self.maxCount = None
self.downloaders = []
os.chdir("/tmp")
### addDownloader takes a PodcastDownloader
### object, sets its dest folder and
### db, and adds it to the list of
### available downloaders.
def addDownloader(self, Downloader):
if not self.destFolder is None:
Downloader.setDestFolder(self.destFolder)
if not self.db is None:
Downloader.useDb(self.db)
self.downloaders.append(Downloader)
### getFiles iterates through all
### the available downloaders,
### set their maxCount to our
### maxCount, and decrement our
### maxCount by however many
### the downloader got.
def getFiles(self):
downloadedCount=0
for downloader in self.downloaders:
if(self.maxCount is not None and downloader.maxCount is None):
downloader.setMaxCount(self.maxCount)
count = downloader.getFiles()
downloadedCount += count
if(self.maxCount is not None):
self.maxCount -= count
return downloadedCount
### setMaxCount is an accessor function
### for the maxCount which regulates
### the number of files to download.
def setMaxCount(self, count):
self.maxCount = count;
### setDestFolder takes a destionation
### folder to move files to after
### they've been downloaded.
def setDestFolder(self, destFolder):
self.destFolder = destFolder
### useDb is an accessor function
### for the podcast database object.
def useDb(self, db):
self.db = db
### setLink is used in the rss file for
### the rss link tag.
def setLink(self, link):
self.link = link
### setEnclosureBaseUrl is where the
### files will be avilable for http
### download.
def setEnclosureBaseUrl(self, enclosureBaseUrl):
self.enclosureBaseUrl = enclosureBaseUrl
### setOutputXmlFile is the location
### where the rss file will be written.
def setOutputXmlFile(self, updatedOutputFile):
self.outputFile = updatedOutputFile
### setTitle sets the title of the rss
### file.
def setTitle(self, title):
self.title = title
### writeOutputFile generates the output
### xml file.
def writeOutputFile(self):
self.podcasts = self.db.getPodcastsFromDb()
fh = open(self.outputFile, "wb")
rss = etree.Element("rss")
channel = etree.SubElement(rss, "channel")
etree.SubElement(channel, "title").text = self.title
etree.SubElement(channel, "description").text = self.title
etree.SubElement(channel, "link").text = self.link
etree.SubElement(channel, "language").text = "en-us"
etree.SubElement(channel, "copyright").text = "Copyright 2999"
for podcast in self.podcasts:
file = podcast.getFileName()
pubDate = podcast.getDate()
item = etree.SubElement(channel, "item")
etree.SubElement(item, "enclosure").set("url", self.enclosureBaseUrl + urllib.quote(file))
etree.SubElement(item, "category").text = "Podcasts"
etree.SubElement(item, "pubDate").text = pubDate
etree.SubElement(item, "guid").text = self.enclosureBaseUrl + urllib.quote(file)
titleAdded = False
for field in podcast.getAdditionalFields():
if field['fieldName'] == "title":
titleAdded = True
etree.SubElement(item, field['fieldName']).text = field['fieldValue']
if titleAdded == False:
etree.SubElement(item, "title").text = file
fh.write(etree.tostring(rss, encoding='UTF-8', xml_declaration=True, pretty_print=True))
fh.close()
### cleanupFiles takes a number of days before
### today to remove files from the fs and db
### Returns count of files removeD
def cleanupFiles(self, count):
files = self.db.cleanupFiles(count)
for file in files:
try:
os.unlink(self.destFolder+file)
except:
"there was a problem removing file " + self.destFolder+file
| gpl-3.0 |
BT-rmartin/odoo | addons/portal_claim/portal_claim.py | 315 | 1871 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import osv
class crm_claim(osv.osv):
_inherit = "crm.claim"
def _get_default_partner_id(self, cr, uid, context=None):
""" Gives default partner_id """
if context is None:
context = {}
if context.get('portal'):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
# Special case for portal users, as they are not allowed to call name_get on res.partner
# We save this call for the web client by returning it in default get
return self.pool['res.partner'].name_get(cr, SUPERUSER_ID, [user.partner_id.id], context=context)[0]
return False
_defaults = {
'partner_id': lambda s, cr, uid, c: s._get_default_partner_id(cr, uid, c),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
FreeOneProject/kernel_mediatek_sprout | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | 4653 | 3596 | # EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
| gpl-2.0 |
sputnick-dev/weboob | modules/popolemploi/browser.py | 4 | 2656 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Bezleputh
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .pages import SearchPage, AdvertPage
from weboob.browser import PagesBrowser, URL
from urllib import quote_plus, quote
__all__ = ['PopolemploiBrowser']
class PopolemploiBrowser(PagesBrowser):
BASEURL = 'https://candidat.pole-emploi.fr/'
advert = URL('candidat/rechercheoffres/detail/(?P<id>.*)', AdvertPage)
search = URL('candidat/rechercheoffres/resultats/(?P<search>.*?)',
'https://offre.pole-emploi.fr/resultat\?offresPartenaires=true&libMetier=(?P<pattern>.*?)', SearchPage)
def search_job(self, pattern=None):
return self.search.go(pattern=quote_plus(pattern)).iter_job_adverts()
def advanced_search_job(self, metier='', place=None, contrat=None, salary=None,
qualification=None, limit_date=None, domain=None):
splitted_place = place.split('|')
search = 'A_%s_%s_%s__%s_P_%s_%s_%s_______INDIFFERENT______________%s___' % (quote(metier.encode('utf-8')).replace('%', '$00'),
splitted_place[1],
splitted_place[2],
contrat,
domain,
salary,
qualification,
limit_date
)
return self.search.go(search=search).iter_job_adverts()
def get_job_advert(self, id, advert):
return self.advert.go(id=id).get_job_advert(obj=advert)
| agpl-3.0 |
rishig/zulip | analytics/management/commands/check_analytics_state.py | 2 | 3256 | from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils.timezone import now as timezone_now
from analytics.models import installation_epoch, \
last_successful_fill
from analytics.lib.counts import COUNT_STATS, CountStat
from zerver.lib.timestamp import floor_to_hour, floor_to_day, verify_UTC, \
TimezoneNotUTCException
from zerver.models import Realm
import os
import time
from typing import Any, Dict
states = {
0: "OK",
1: "WARNING",
2: "CRITICAL",
3: "UNKNOWN"
}
class Command(BaseCommand):
help = """Checks FillState table.
Run as a cron job that runs every hour."""
def handle(self, *args: Any, **options: Any) -> None:
fill_state = self.get_fill_state()
status = fill_state['status']
message = fill_state['message']
state_file_path = "/var/lib/nagios_state/check-analytics-state"
state_file_tmp = state_file_path + "-tmp"
with open(state_file_tmp, "w") as f:
f.write("%s|%s|%s|%s\n" % (
int(time.time()), status, states[status], message))
os.rename(state_file_tmp, state_file_path)
def get_fill_state(self) -> Dict[str, Any]:
if not Realm.objects.exists():
return {'status': 0, 'message': 'No realms exist, so not checking FillState.'}
warning_unfilled_properties = []
critical_unfilled_properties = []
for property, stat in COUNT_STATS.items():
last_fill = last_successful_fill(property)
if last_fill is None:
last_fill = installation_epoch()
try:
verify_UTC(last_fill)
except TimezoneNotUTCException:
return {'status': 2, 'message': 'FillState not in UTC for %s' % (property,)}
if stat.frequency == CountStat.DAY:
floor_function = floor_to_day
warning_threshold = timedelta(hours=26)
critical_threshold = timedelta(hours=50)
else: # CountStat.HOUR
floor_function = floor_to_hour
warning_threshold = timedelta(minutes=90)
critical_threshold = timedelta(minutes=150)
if floor_function(last_fill) != last_fill:
return {'status': 2, 'message': 'FillState not on %s boundary for %s' %
(stat.frequency, property)}
time_to_last_fill = timezone_now() - last_fill
if time_to_last_fill > critical_threshold:
critical_unfilled_properties.append(property)
elif time_to_last_fill > warning_threshold:
warning_unfilled_properties.append(property)
if len(critical_unfilled_properties) == 0 and len(warning_unfilled_properties) == 0:
return {'status': 0, 'message': 'FillState looks fine.'}
if len(critical_unfilled_properties) == 0:
return {'status': 1, 'message': 'Missed filling %s once.' %
(', '.join(warning_unfilled_properties),)}
return {'status': 2, 'message': 'Missed filling %s once. Missed filling %s at least twice.' %
(', '.join(warning_unfilled_properties), ', '.join(critical_unfilled_properties))}
| apache-2.0 |
azunite/chrome_build | third_party/logilab/common/sphinx_ext.py | 117 | 3329 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
from logilab.common.decorators import monkeypatch
from sphinx.ext import autodoc
class DocstringOnlyModuleDocumenter(autodoc.ModuleDocumenter):
objtype = 'docstring'
def format_signature(self):
pass
def add_directive_header(self, sig):
pass
def document_members(self, all_members=False):
pass
def resolve_name(self, modname, parents, path, base):
if modname is not None:
return modname, parents + [base]
return (path or '') + base, []
#autodoc.add_documenter(DocstringOnlyModuleDocumenter)
def setup(app):
app.add_autodocumenter(DocstringOnlyModuleDocumenter)
from sphinx.ext.autodoc import (ViewList, Options, AutodocReporter, nodes,
assemble_option_dict, nested_parse_with_titles)
@monkeypatch(autodoc.AutoDirective)
def run(self):
self.filename_set = set() # a set of dependent filenames
self.reporter = self.state.document.reporter
self.env = self.state.document.settings.env
self.warnings = []
self.result = ViewList()
# find out what documenter to call
objtype = self.name[4:]
doc_class = self._registry[objtype]
# process the options with the selected documenter's option_spec
self.genopt = Options(assemble_option_dict(
self.options.items(), doc_class.option_spec))
# generate the output
documenter = doc_class(self, self.arguments[0])
documenter.generate(more_content=self.content)
if not self.result:
return self.warnings
# record all filenames as dependencies -- this will at least
# partially make automatic invalidation possible
for fn in self.filename_set:
self.env.note_dependency(fn)
# use a custom reporter that correctly assigns lines to source
# filename/description and lineno
old_reporter = self.state.memo.reporter
self.state.memo.reporter = AutodocReporter(self.result,
self.state.memo.reporter)
if self.name in ('automodule', 'autodocstring'):
node = nodes.section()
# necessary so that the child nodes get the right source/line set
node.document = self.state.document
nested_parse_with_titles(self.state, self.result, node)
else:
node = nodes.paragraph()
node.document = self.state.document
self.state.nested_parse(self.result, 0, node)
self.state.memo.reporter = old_reporter
return self.warnings + node.children
| bsd-3-clause |
svn2github/gyp | pylib/gyp/ordered_dict.py | 2354 | 10366 | # Unmodified from http://code.activestate.com/recipes/576693/
# other than to add MIT license header (as specified on page, but not in code).
# Linked from Python documentation here:
# http://docs.python.org/2/library/collections.html#collections.OrderedDict
#
# This should be deleted once Py2.7 is available on all bots, see
# http://crbug.com/241769.
#
# Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
# Suppress 'OrderedDict.update: Method has no argument':
# pylint: disable=E0211
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| bsd-3-clause |
davidzchen/tensorflow | tensorflow/python/data/experimental/kernel_tests/serialization/snapshot_dataset_serialization_test.py | 6 | 12104 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the MapDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import snapshot
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class SnapshotDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase,
parameterized.TestCase):
def _build_snapshot_dataset(self, repeat=False):
def ds_fn():
self._snapshot_dir = os.path.join(self.get_temp_dir(), "snapshot")
if not os.path.exists(self._snapshot_dir):
os.mkdir(self._snapshot_dir)
dataset = dataset_ops.Dataset.range(100)
dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))
if repeat:
dataset = dataset.repeat(2)
return dataset
return ds_fn
@combinations.generate(test_base.default_test_combinations())
def testCheckpointBeforeEpochEndNoRepeat(self):
ds_fn = self._build_snapshot_dataset(repeat=False)
outputs = self.gen_outputs(ds_fn, [], 50, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(50))
outputs.extend(
self.gen_outputs(ds_fn, [], 50, ckpt_saved=True, verify_exhausted=True))
self.assertSequenceEqual(outputs, range(100))
@combinations.generate(test_base.default_test_combinations())
def testCheckpointBeforeOneEpochWithReading(self):
ds_fn = self._build_snapshot_dataset(repeat=True)
# Generate 50 entries from iterator and save checkpoint.
outputs = self.gen_outputs(ds_fn, [], 50, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(50)))
# Restore from checkpoint and produce the rest of the elements from the
# iterator.
t = self.gen_outputs(ds_fn, [], 150, ckpt_saved=True, verify_exhausted=True)
outputs.extend(t)
self.assertSequenceEqual(
outputs,
list(range(50)) + list(range(50, 100)) + list(range(100)))
@combinations.generate(test_base.default_test_combinations())
def testCheckpointBeforeOneEpochThenRunAFewSteps(self):
ds_fn = self._build_snapshot_dataset(repeat=False)
outputs = self.gen_outputs(
ds_fn, [10], 20, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, range(20))
outputs = outputs[:10]
outputs.extend(
self.gen_outputs(ds_fn, [], 90, ckpt_saved=True, verify_exhausted=True))
self.assertSequenceEqual(outputs, range(100))
@combinations.generate(test_base.default_test_combinations())
def testCheckpointAfterOneEpoch(self):
ds_fn = self._build_snapshot_dataset(repeat=True)
# Generate 110 entries from iterator and save checkpoint.
outputs = self.gen_outputs(ds_fn, [], 110, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(100)) + list(range(10)))
# Restore from checkpoint and produce the rest of the elements from the
# iterator.
t = self.gen_outputs(ds_fn, [], 90, ckpt_saved=True, verify_exhausted=True)
outputs.extend(t)
self.assertSequenceEqual(
outputs,
list(range(100)) + list(range(10)) + list(range(10, 100)))
@combinations.generate(test_base.default_test_combinations())
def testCheckpointAfterOneEpochRunFewSteps(self):
ds_fn = self._build_snapshot_dataset(repeat=True)
# Generate 120 entries from iterator and save checkpoint at 110.
outputs = self.gen_outputs(
ds_fn, [110], 120, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, list(range(100)) + list(range(20)))
# Restore from checkpoint and produce the rest of the elements from the
# iterator.
outputs = outputs[:110]
t = self.gen_outputs(ds_fn, [], 90, ckpt_saved=True, verify_exhausted=True)
outputs.extend(t)
self.assertSequenceEqual(
outputs,
list(range(100)) + list(range(10)) + list(range(10, 100)))
class LegacySnapshotDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase,
parameterized.TestCase):
def _build_snapshot_dataset(self,
num_threads=1,
repeat=False,
pending_snapshot_expiry_seconds=-1,
shard_size_bytes=None):
def ds_fn():
self.snapshot_dir = os.path.join(self.get_temp_dir(), "snapshot")
if not os.path.exists(self.snapshot_dir):
os.mkdir(self.snapshot_dir)
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(
snapshot.legacy_snapshot(
self.snapshot_dir,
num_writer_threads=num_threads,
writer_buffer_size=2 * num_threads,
num_reader_threads=num_threads,
reader_buffer_size=2 * num_threads,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds,
shard_size_bytes=shard_size_bytes))
if repeat:
dataset = dataset.repeat(2)
return dataset
return ds_fn
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testSnapshotBeforeEpochEnd(self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
outputs = self.gen_outputs(ds_fn, [], 100, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(100))
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointBeforeOneEpochThenRunFewStepsSmallShardMultiThread(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds,
shard_size_bytes=100)
outputs = []
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = self._build_graph(ds_fn)
with self.session(graph=g) as sess:
self._initialize(init_op, sess)
start = 0
end = 100
num_iters = end - start
for _ in range(num_iters):
outputs.append(sess.run(get_next_op))
self._save(sess, saver)
start = 100
end = 400
num_iters = end - start
for _ in range(num_iters):
outputs.append(sess.run(get_next_op))
self.assertSequenceEqual(outputs, range(400))
outputs = outputs[:100]
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
fp_dir_list = os.listdir(self.snapshot_dir)
self.assertLen(list(fp_dir_list), 2)
for d in fp_dir_list:
if not d.endswith("-graph.pbtxt"):
fp_dir = os.path.join(self.snapshot_dir, d)
run_dir_list = os.listdir(fp_dir)
self.assertLen(list(run_dir_list), 2)
for e in run_dir_list:
if e != "snapshot.metadata":
run_dir = os.path.join(fp_dir, e)
self.assertLen(list(os.listdir(run_dir)), 258)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointBeforeOneEpochThenRunFewSteps(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
# Generate 200 entries from iterator but save checkpoint after producing
# 100.
outputs = self.gen_outputs(
ds_fn, [100], 200, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, range(200))
outputs = outputs[:100]
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointBeforeOneEpochThenRunFewStepsMultipleThreads(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
num_threads=2,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
# Generate 200 entries from iterator but save checkpoint after producing
# 100.
outputs = self.gen_outputs(
ds_fn, [100], 200, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, range(200))
outputs = outputs[:100]
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointAfterOneEpoch(self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
repeat=True,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
# Generate 1100 entries from iterator and save checkpoint.
outputs = self.gen_outputs(ds_fn, [], 1100, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(1000)) + list(range(100)))
# Restore from checkpoint and produce the rest of the elements from the
# iterator.
t = self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False)
outputs.extend(t)
self.assertSequenceEqual(
outputs,
list(range(1000)) + list(range(100)) + list(range(900)))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointAfterOneEpochThenRunFewSteps(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
repeat=True,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
# Generate 200 entries from iterator but save checkpoint after producing
# 100.
outputs = self.gen_outputs(
ds_fn, [1100],
1200,
verify_exhausted=False,
save_checkpoint_at_end=False)
self.assertSequenceEqual(
outputs,
list(range(1000)) + list(range(100)) + list(range(100)))
outputs = outputs[:1100]
t = self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False)
outputs.extend(t)
self.assertSequenceEqual(
outputs, (list(range(1000)) + list(range(100)) + list(range(900))))
if __name__ == "__main__":
test.main()
| apache-2.0 |
elit3ge/SickRage | lib/unidecode/x083.py | 252 | 4643 | data = (
'Fu ', # 0x00
'Zhuo ', # 0x01
'Mao ', # 0x02
'Fan ', # 0x03
'Qie ', # 0x04
'Mao ', # 0x05
'Mao ', # 0x06
'Ba ', # 0x07
'Zi ', # 0x08
'Mo ', # 0x09
'Zi ', # 0x0a
'Di ', # 0x0b
'Chi ', # 0x0c
'Ji ', # 0x0d
'Jing ', # 0x0e
'Long ', # 0x0f
'[?] ', # 0x10
'Niao ', # 0x11
'[?] ', # 0x12
'Xue ', # 0x13
'Ying ', # 0x14
'Qiong ', # 0x15
'Ge ', # 0x16
'Ming ', # 0x17
'Li ', # 0x18
'Rong ', # 0x19
'Yin ', # 0x1a
'Gen ', # 0x1b
'Qian ', # 0x1c
'Chai ', # 0x1d
'Chen ', # 0x1e
'Yu ', # 0x1f
'Xiu ', # 0x20
'Zi ', # 0x21
'Lie ', # 0x22
'Wu ', # 0x23
'Ji ', # 0x24
'Kui ', # 0x25
'Ce ', # 0x26
'Chong ', # 0x27
'Ci ', # 0x28
'Gou ', # 0x29
'Guang ', # 0x2a
'Mang ', # 0x2b
'Chi ', # 0x2c
'Jiao ', # 0x2d
'Jiao ', # 0x2e
'Fu ', # 0x2f
'Yu ', # 0x30
'Zhu ', # 0x31
'Zi ', # 0x32
'Jiang ', # 0x33
'Hui ', # 0x34
'Yin ', # 0x35
'Cha ', # 0x36
'Fa ', # 0x37
'Rong ', # 0x38
'Ru ', # 0x39
'Chong ', # 0x3a
'Mang ', # 0x3b
'Tong ', # 0x3c
'Zhong ', # 0x3d
'[?] ', # 0x3e
'Zhu ', # 0x3f
'Xun ', # 0x40
'Huan ', # 0x41
'Kua ', # 0x42
'Quan ', # 0x43
'Gai ', # 0x44
'Da ', # 0x45
'Jing ', # 0x46
'Xing ', # 0x47
'Quan ', # 0x48
'Cao ', # 0x49
'Jing ', # 0x4a
'Er ', # 0x4b
'An ', # 0x4c
'Shou ', # 0x4d
'Chi ', # 0x4e
'Ren ', # 0x4f
'Jian ', # 0x50
'Ti ', # 0x51
'Huang ', # 0x52
'Ping ', # 0x53
'Li ', # 0x54
'Jin ', # 0x55
'Lao ', # 0x56
'Shu ', # 0x57
'Zhuang ', # 0x58
'Da ', # 0x59
'Jia ', # 0x5a
'Rao ', # 0x5b
'Bi ', # 0x5c
'Ze ', # 0x5d
'Qiao ', # 0x5e
'Hui ', # 0x5f
'Qi ', # 0x60
'Dang ', # 0x61
'[?] ', # 0x62
'Rong ', # 0x63
'Hun ', # 0x64
'Ying ', # 0x65
'Luo ', # 0x66
'Ying ', # 0x67
'Xun ', # 0x68
'Jin ', # 0x69
'Sun ', # 0x6a
'Yin ', # 0x6b
'Mai ', # 0x6c
'Hong ', # 0x6d
'Zhou ', # 0x6e
'Yao ', # 0x6f
'Du ', # 0x70
'Wei ', # 0x71
'Chu ', # 0x72
'Dou ', # 0x73
'Fu ', # 0x74
'Ren ', # 0x75
'Yin ', # 0x76
'He ', # 0x77
'Bi ', # 0x78
'Bu ', # 0x79
'Yun ', # 0x7a
'Di ', # 0x7b
'Tu ', # 0x7c
'Sui ', # 0x7d
'Sui ', # 0x7e
'Cheng ', # 0x7f
'Chen ', # 0x80
'Wu ', # 0x81
'Bie ', # 0x82
'Xi ', # 0x83
'Geng ', # 0x84
'Li ', # 0x85
'Fu ', # 0x86
'Zhu ', # 0x87
'Mo ', # 0x88
'Li ', # 0x89
'Zhuang ', # 0x8a
'Ji ', # 0x8b
'Duo ', # 0x8c
'Qiu ', # 0x8d
'Sha ', # 0x8e
'Suo ', # 0x8f
'Chen ', # 0x90
'Feng ', # 0x91
'Ju ', # 0x92
'Mei ', # 0x93
'Meng ', # 0x94
'Xing ', # 0x95
'Jing ', # 0x96
'Che ', # 0x97
'Xin ', # 0x98
'Jun ', # 0x99
'Yan ', # 0x9a
'Ting ', # 0x9b
'Diao ', # 0x9c
'Cuo ', # 0x9d
'Wan ', # 0x9e
'Han ', # 0x9f
'You ', # 0xa0
'Cuo ', # 0xa1
'Jia ', # 0xa2
'Wang ', # 0xa3
'You ', # 0xa4
'Niu ', # 0xa5
'Shao ', # 0xa6
'Xian ', # 0xa7
'Lang ', # 0xa8
'Fu ', # 0xa9
'E ', # 0xaa
'Mo ', # 0xab
'Wen ', # 0xac
'Jie ', # 0xad
'Nan ', # 0xae
'Mu ', # 0xaf
'Kan ', # 0xb0
'Lai ', # 0xb1
'Lian ', # 0xb2
'Shi ', # 0xb3
'Wo ', # 0xb4
'Usagi ', # 0xb5
'Lian ', # 0xb6
'Huo ', # 0xb7
'You ', # 0xb8
'Ying ', # 0xb9
'Ying ', # 0xba
'Nuc ', # 0xbb
'Chun ', # 0xbc
'Mang ', # 0xbd
'Mang ', # 0xbe
'Ci ', # 0xbf
'Wan ', # 0xc0
'Jing ', # 0xc1
'Di ', # 0xc2
'Qu ', # 0xc3
'Dong ', # 0xc4
'Jian ', # 0xc5
'Zou ', # 0xc6
'Gu ', # 0xc7
'La ', # 0xc8
'Lu ', # 0xc9
'Ju ', # 0xca
'Wei ', # 0xcb
'Jun ', # 0xcc
'Nie ', # 0xcd
'Kun ', # 0xce
'He ', # 0xcf
'Pu ', # 0xd0
'Zi ', # 0xd1
'Gao ', # 0xd2
'Guo ', # 0xd3
'Fu ', # 0xd4
'Lun ', # 0xd5
'Chang ', # 0xd6
'Chou ', # 0xd7
'Song ', # 0xd8
'Chui ', # 0xd9
'Zhan ', # 0xda
'Men ', # 0xdb
'Cai ', # 0xdc
'Ba ', # 0xdd
'Li ', # 0xde
'Tu ', # 0xdf
'Bo ', # 0xe0
'Han ', # 0xe1
'Bao ', # 0xe2
'Qin ', # 0xe3
'Juan ', # 0xe4
'Xi ', # 0xe5
'Qin ', # 0xe6
'Di ', # 0xe7
'Jie ', # 0xe8
'Pu ', # 0xe9
'Dang ', # 0xea
'Jin ', # 0xeb
'Zhao ', # 0xec
'Tai ', # 0xed
'Geng ', # 0xee
'Hua ', # 0xef
'Gu ', # 0xf0
'Ling ', # 0xf1
'Fei ', # 0xf2
'Jin ', # 0xf3
'An ', # 0xf4
'Wang ', # 0xf5
'Beng ', # 0xf6
'Zhou ', # 0xf7
'Yan ', # 0xf8
'Ju ', # 0xf9
'Jian ', # 0xfa
'Lin ', # 0xfb
'Tan ', # 0xfc
'Shu ', # 0xfd
'Tian ', # 0xfe
'Dao ', # 0xff
)
| gpl-3.0 |
cybertk/depot_tools | third_party/logilab/common/proc.py | 117 | 9352 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""module providing:
* process information (linux specific: rely on /proc)
* a class for resource control (memory / time / cpu time)
This module doesn't work on windows platforms (only tested on linux)
:organization: Logilab
"""
__docformat__ = "restructuredtext en"
import os
import stat
from resource import getrlimit, setrlimit, RLIMIT_CPU, RLIMIT_AS
from signal import signal, SIGXCPU, SIGKILL, SIGUSR2, SIGUSR1
from threading import Timer, currentThread, Thread, Event
from time import time
from logilab.common.tree import Node
class NoSuchProcess(Exception): pass
def proc_exists(pid):
"""check the a pid is registered in /proc
raise NoSuchProcess exception if not
"""
if not os.path.exists('/proc/%s' % pid):
raise NoSuchProcess()
PPID = 3
UTIME = 13
STIME = 14
CUTIME = 15
CSTIME = 16
VSIZE = 22
class ProcInfo(Node):
"""provide access to process information found in /proc"""
def __init__(self, pid):
self.pid = int(pid)
Node.__init__(self, self.pid)
proc_exists(self.pid)
self.file = '/proc/%s/stat' % self.pid
self.ppid = int(self.status()[PPID])
def memory_usage(self):
"""return the memory usage of the process in Ko"""
try :
return int(self.status()[VSIZE])
except IOError:
return 0
def lineage_memory_usage(self):
return self.memory_usage() + sum([child.lineage_memory_usage()
for child in self.children])
def time(self, children=0):
"""return the number of jiffies that this process has been scheduled
in user and kernel mode"""
status = self.status()
time = int(status[UTIME]) + int(status[STIME])
if children:
time += int(status[CUTIME]) + int(status[CSTIME])
return time
def status(self):
"""return the list of fields found in /proc/<pid>/stat"""
return open(self.file).read().split()
def name(self):
"""return the process name found in /proc/<pid>/stat
"""
return self.status()[1].strip('()')
def age(self):
"""return the age of the process
"""
return os.stat(self.file)[stat.ST_MTIME]
class ProcInfoLoader:
"""manage process information"""
def __init__(self):
self._loaded = {}
def list_pids(self):
"""return a list of existent process ids"""
for subdir in os.listdir('/proc'):
if subdir.isdigit():
yield int(subdir)
def load(self, pid):
"""get a ProcInfo object for a given pid"""
pid = int(pid)
try:
return self._loaded[pid]
except KeyError:
procinfo = ProcInfo(pid)
procinfo.manager = self
self._loaded[pid] = procinfo
return procinfo
def load_all(self):
"""load all processes information"""
for pid in self.list_pids():
try:
procinfo = self.load(pid)
if procinfo.parent is None and procinfo.ppid:
pprocinfo = self.load(procinfo.ppid)
pprocinfo.append(procinfo)
except NoSuchProcess:
pass
try:
class ResourceError(BaseException):
"""Error raise when resource limit is reached"""
limit = "Unknown Resource Limit"
except NameError:
class ResourceError(Exception):
"""Error raise when resource limit is reached"""
limit = "Unknown Resource Limit"
class XCPUError(ResourceError):
"""Error raised when CPU Time limit is reached"""
limit = "CPU Time"
class LineageMemoryError(ResourceError):
"""Error raised when the total amount of memory used by a process and
it's child is reached"""
limit = "Lineage total Memory"
class TimeoutError(ResourceError):
"""Error raised when the process is running for to much time"""
limit = "Real Time"
# Can't use subclass because the StandardError MemoryError raised
RESOURCE_LIMIT_EXCEPTION = (ResourceError, MemoryError)
class MemorySentinel(Thread):
"""A class checking a process don't use too much memory in a separated
daemonic thread
"""
def __init__(self, interval, memory_limit, gpid=os.getpid()):
Thread.__init__(self, target=self._run, name="Test.Sentinel")
self.memory_limit = memory_limit
self._stop = Event()
self.interval = interval
self.setDaemon(True)
self.gpid = gpid
def stop(self):
"""stop ap"""
self._stop.set()
def _run(self):
pil = ProcInfoLoader()
while not self._stop.isSet():
if self.memory_limit <= pil.load(self.gpid).lineage_memory_usage():
os.killpg(self.gpid, SIGUSR1)
self._stop.wait(self.interval)
class ResourceController:
def __init__(self, max_cpu_time=None, max_time=None, max_memory=None,
max_reprieve=60):
if SIGXCPU == -1:
raise RuntimeError("Unsupported platform")
self.max_time = max_time
self.max_memory = max_memory
self.max_cpu_time = max_cpu_time
self._reprieve = max_reprieve
self._timer = None
self._msentinel = None
self._old_max_memory = None
self._old_usr1_hdlr = None
self._old_max_cpu_time = None
self._old_usr2_hdlr = None
self._old_sigxcpu_hdlr = None
self._limit_set = 0
self._abort_try = 0
self._start_time = None
self._elapse_time = 0
def _hangle_sig_timeout(self, sig, frame):
raise TimeoutError()
def _hangle_sig_memory(self, sig, frame):
if self._abort_try < self._reprieve:
self._abort_try += 1
raise LineageMemoryError("Memory limit reached")
else:
os.killpg(os.getpid(), SIGKILL)
def _handle_sigxcpu(self, sig, frame):
if self._abort_try < self._reprieve:
self._abort_try += 1
raise XCPUError("Soft CPU time limit reached")
else:
os.killpg(os.getpid(), SIGKILL)
def _time_out(self):
if self._abort_try < self._reprieve:
self._abort_try += 1
os.killpg(os.getpid(), SIGUSR2)
if self._limit_set > 0:
self._timer = Timer(1, self._time_out)
self._timer.start()
else:
os.killpg(os.getpid(), SIGKILL)
def setup_limit(self):
"""set up the process limit"""
assert currentThread().getName() == 'MainThread'
os.setpgrp()
if self._limit_set <= 0:
if self.max_time is not None:
self._old_usr2_hdlr = signal(SIGUSR2, self._hangle_sig_timeout)
self._timer = Timer(max(1, int(self.max_time) - self._elapse_time),
self._time_out)
self._start_time = int(time())
self._timer.start()
if self.max_cpu_time is not None:
self._old_max_cpu_time = getrlimit(RLIMIT_CPU)
cpu_limit = (int(self.max_cpu_time), self._old_max_cpu_time[1])
self._old_sigxcpu_hdlr = signal(SIGXCPU, self._handle_sigxcpu)
setrlimit(RLIMIT_CPU, cpu_limit)
if self.max_memory is not None:
self._msentinel = MemorySentinel(1, int(self.max_memory) )
self._old_max_memory = getrlimit(RLIMIT_AS)
self._old_usr1_hdlr = signal(SIGUSR1, self._hangle_sig_memory)
as_limit = (int(self.max_memory), self._old_max_memory[1])
setrlimit(RLIMIT_AS, as_limit)
self._msentinel.start()
self._limit_set += 1
def clean_limit(self):
"""reinstall the old process limit"""
if self._limit_set > 0:
if self.max_time is not None:
self._timer.cancel()
self._elapse_time += int(time())-self._start_time
self._timer = None
signal(SIGUSR2, self._old_usr2_hdlr)
if self.max_cpu_time is not None:
setrlimit(RLIMIT_CPU, self._old_max_cpu_time)
signal(SIGXCPU, self._old_sigxcpu_hdlr)
if self.max_memory is not None:
self._msentinel.stop()
self._msentinel = None
setrlimit(RLIMIT_AS, self._old_max_memory)
signal(SIGUSR1, self._old_usr1_hdlr)
self._limit_set -= 1
| bsd-3-clause |
charbeljc/OCB | addons/l10n_be/__init__.py | 430 | 1060 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sanjuro/RCJK | manage.py | 30 | 1649 | #!/usr/bin/env python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import __main__ as a way to pass a variable into the settings file
import logging
import os
import sys
logging.getLogger().setLevel(logging.INFO)
import build
build.bootstrap(only_check_for_zips=True)
for x in os.listdir('.'):
if x.endswith('.zip'):
if x in sys.path:
continue
logging.debug("Adding %s to the sys.path", x)
sys.path.insert(1, x)
from appengine_django import InstallAppengineHelperForDjango
InstallAppengineHelperForDjango()
from common import component
component.install_components()
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| apache-2.0 |
xbmc/xbmc-antiquated | tools/EventClients/examples/python/example_simple.py | 228 | 1272 | #!/usr/bin/python
# This is a simple example showing how you can send a key press event
# to XBMC using the XBMCClient class
import sys
sys.path.append("../../lib/python")
import time
from xbmcclient import XBMCClient
def main():
host = "localhost"
port = 9777
# Create an XBMCClient object and connect
xbmc = XBMCClient("Example Remote", "../../icons/bluetooth.png")
xbmc.connect()
# wait for notification window to close (in XBMC) (optional)
time.sleep(5)
# send a up key press using the xbox gamepad map "XG" and button
# name "dpadup" ( see PacketBUTTON doc for more details)
xbmc.send_button(map="XG", button="dpadup")
# wait for a few seconds to see its effect
time.sleep(5)
# send a right key press using the keyboard map "KB" and button
# name "right"
xbmc.send_keyboard_button("right")
# wait for a few seconds to see its effect
time.sleep(5)
# that's enough, release the button.
xbmc.release_button()
# ok we're done, close the connection
# Note that closing the connection clears any repeat key that is
# active. So in this example, the actual release button event above
# need not have been sent.
xbmc.close()
if __name__=="__main__":
main()
| gpl-2.0 |
yiakwy/numpy | numpy/lib/index_tricks.py | 4 | 26049 | from __future__ import division, absolute_import, print_function
import sys
import math
import numpy.core.numeric as _nx
from numpy.core.numeric import (
asarray, ScalarType, array, alltrue, cumprod, arange
)
from numpy.core.numerictypes import find_common_type
from . import function_base
import numpy.matrixlib as matrix
from .function_base import diff
from numpy.core.multiarray import ravel_multi_index, unravel_index
from numpy.lib.stride_tricks import as_strided
makemat = matrix.matrix
__all__ = [
'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_',
's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal',
'diag_indices', 'diag_indices_from'
]
def ix_(*args):
"""
Construct an open mesh from multiple sequences.
This function takes N 1-D sequences and returns N outputs with N
dimensions each, such that the shape is 1 in all but one dimension
and the dimension with the non-unit shape value cycles through all
N dimensions.
Using `ix_` one can quickly construct index arrays that will index
the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
Parameters
----------
args : 1-D sequences
Returns
-------
out : tuple of ndarrays
N arrays with N dimensions each, with N the number of input
sequences. Together these arrays form an open mesh.
See Also
--------
ogrid, mgrid, meshgrid
Examples
--------
>>> a = np.arange(10).reshape(2, 5)
>>> a
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> ixgrid = np.ix_([0,1], [2,4])
>>> ixgrid
(array([[0],
[1]]), array([[2, 4]]))
>>> ixgrid[0].shape, ixgrid[1].shape
((2, 1), (1, 2))
>>> a[ixgrid]
array([[2, 4],
[7, 9]])
"""
out = []
nd = len(args)
baseshape = [1]*nd
for k in range(nd):
new = _nx.asarray(args[k])
if (new.ndim != 1):
raise ValueError("Cross index must be 1 dimensional")
if issubclass(new.dtype.type, _nx.bool_):
new = new.nonzero()[0]
baseshape[k] = len(new)
new = new.reshape(tuple(baseshape))
out.append(new)
baseshape[k] = 1
return tuple(out)
class nd_grid(object):
"""
Construct a multi-dimensional "meshgrid".
``grid = nd_grid()`` creates an instance which will return a mesh-grid
when indexed. The dimension and number of the output arrays are equal
to the number of indexing dimensions. If the step length is not a
complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then the
integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
If instantiated with an argument of ``sparse=True``, the mesh-grid is
open (or not fleshed out) so that only one-dimension of each returned
argument is greater than 1.
Parameters
----------
sparse : bool, optional
Whether the grid is sparse or not. Default is False.
Notes
-----
Two instances of `nd_grid` are made available in the NumPy namespace,
`mgrid` and `ogrid`::
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
Users should use these pre-defined instances instead of using `nd_grid`
directly.
Examples
--------
>>> mgrid = np.lib.index_tricks.nd_grid()
>>> mgrid[0:5,0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]],
[[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]])
>>> mgrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
>>> ogrid = np.lib.index_tricks.nd_grid(sparse=True)
>>> ogrid[0:5,0:5]
[array([[0],
[1],
[2],
[3],
[4]]), array([[0, 1, 2, 3, 4]])]
"""
def __init__(self, sparse=False):
self.sparse = sparse
def __getitem__(self, key):
try:
size = []
typ = int
for k in range(len(key)):
step = key[k].step
start = key[k].start
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size.append(int(abs(step)))
typ = float
else:
size.append(
int(math.ceil((key[k].stop - start)/(step*1.0))))
if (isinstance(step, float) or
isinstance(start, float) or
isinstance(key[k].stop, float)):
typ = float
if self.sparse:
nn = [_nx.arange(_x, dtype=_t)
for _x, _t in zip(size, (typ,)*len(size))]
else:
nn = _nx.indices(size, typ)
for k in range(len(size)):
step = key[k].step
start = key[k].start
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
step = int(abs(step))
if step != 1:
step = (key[k].stop - start)/float(step-1)
nn[k] = (nn[k]*step+start)
if self.sparse:
slobj = [_nx.newaxis]*len(size)
for k in range(len(size)):
slobj[k] = slice(None, None)
nn[k] = nn[k][slobj]
slobj[k] = _nx.newaxis
return nn
except (IndexError, TypeError):
step = key.step
stop = key.stop
start = key.start
if start is None:
start = 0
if isinstance(step, complex):
step = abs(step)
length = int(step)
if step != 1:
step = (key.stop-start)/float(step-1)
stop = key.stop + step
return _nx.arange(0, length, 1, float)*step + start
else:
return _nx.arange(start, stop, step)
def __getslice__(self, i, j):
return _nx.arange(i, j)
def __len__(self):
return 0
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
mgrid.__doc__ = None # set in numpy.add_newdocs
ogrid.__doc__ = None # set in numpy.add_newdocs
class AxisConcatenator(object):
"""
Translates slice objects to concatenation along an axis.
For detailed documentation on usage, see `r_`.
"""
def _retval(self, res):
if self.matrix:
oldndim = res.ndim
res = makemat(res)
if oldndim == 1 and self.col:
res = res.T
self.axis = self._axis
self.matrix = self._matrix
self.col = 0
return res
def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
self._axis = axis
self._matrix = matrix
self.axis = axis
self.matrix = matrix
self.col = 0
self.trans1d = trans1d
self.ndmin = ndmin
def __getitem__(self, key):
trans1d = self.trans1d
ndmin = self.ndmin
if isinstance(key, str):
frame = sys._getframe().f_back
mymat = matrix.bmat(key, frame.f_globals, frame.f_locals)
return mymat
if not isinstance(key, tuple):
key = (key,)
objs = []
scalars = []
arraytypes = []
scalartypes = []
for k in range(len(key)):
scalar = False
if isinstance(key[k], slice):
step = key[k].step
start = key[k].start
stop = key[k].stop
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size = int(abs(step))
newobj = function_base.linspace(start, stop, num=size)
else:
newobj = _nx.arange(start, stop, step)
if ndmin > 1:
newobj = array(newobj, copy=False, ndmin=ndmin)
if trans1d != -1:
newobj = newobj.swapaxes(-1, trans1d)
elif isinstance(key[k], str):
if k != 0:
raise ValueError("special directives must be the "
"first entry.")
key0 = key[0]
if key0 in 'rc':
self.matrix = True
self.col = (key0 == 'c')
continue
if ',' in key0:
vec = key0.split(',')
try:
self.axis, ndmin = \
[int(x) for x in vec[:2]]
if len(vec) == 3:
trans1d = int(vec[2])
continue
except:
raise ValueError("unknown special directive")
try:
self.axis = int(key[k])
continue
except (ValueError, TypeError):
raise ValueError("unknown special directive")
elif type(key[k]) in ScalarType:
newobj = array(key[k], ndmin=ndmin)
scalars.append(k)
scalar = True
scalartypes.append(newobj.dtype)
else:
newobj = key[k]
if ndmin > 1:
tempobj = array(newobj, copy=False, subok=True)
newobj = array(newobj, copy=False, subok=True,
ndmin=ndmin)
if trans1d != -1 and tempobj.ndim < ndmin:
k2 = ndmin-tempobj.ndim
if (trans1d < 0):
trans1d += k2 + 1
defaxes = list(range(ndmin))
k1 = trans1d
axes = defaxes[:k1] + defaxes[k2:] + \
defaxes[k1:k2]
newobj = newobj.transpose(axes)
del tempobj
objs.append(newobj)
if not scalar and isinstance(newobj, _nx.ndarray):
arraytypes.append(newobj.dtype)
# Esure that scalars won't up-cast unless warranted
final_dtype = find_common_type(arraytypes, scalartypes)
if final_dtype is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtype)
res = _nx.concatenate(tuple(objs), axis=self.axis)
return self._retval(res)
def __getslice__(self, i, j):
res = _nx.arange(i, j)
return self._retval(res)
def __len__(self):
return 0
# separate classes are used here instead of just making r_ = concatentor(0),
# etc. because otherwise we couldn't get the doc string to come out right
# in help(r_)
class RClass(AxisConcatenator):
"""
Translates slice objects to concatenation along the first axis.
This is a simple way to build up arrays quickly. There are two use cases.
1. If the index expression contains comma separated arrays, then stack
them along their first axis.
2. If the index expression contains slice notation or scalars then create
a 1-D array with a range indicated by the slice notation.
If slice notation is used, the syntax ``start:stop:step`` is equivalent
to ``np.arange(start, stop, step)`` inside of the brackets. However, if
``step`` is an imaginary number (i.e. 100j) then its integer portion is
interpreted as a number-of-points desired and the start and stop are
inclusive. In other words ``start:stop:stepj`` is interpreted as
``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets.
After expansion of slice notation, all comma separated sequences are
concatenated together.
Optional character strings placed as the first element of the index
expression can be used to change the output. The strings 'r' or 'c' result
in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row)
matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1
(column) matrix is produced. If the result is 2-D then both provide the
same matrix result.
A string integer specifies which axis to stack multiple comma separated
arrays along. A string of two comma-separated integers allows indication
of the minimum number of dimensions to force each entry into as the
second integer (the axis to concatenate along is still the first integer).
A string with three comma-separated integers allows specification of the
axis to concatenate along, the minimum number of dimensions to force the
entries to, and which axis should contain the start of the arrays which
are less than the specified number of dimensions. In other words the third
integer allows you to specify where the 1's should be placed in the shape
of the arrays that have their shapes upgraded. By default, they are placed
in the front of the shape tuple. The third argument allows you to specify
where the start of the array should be instead. Thus, a third argument of
'0' would place the 1's at the end of the array shape. Negative integers
specify where in the new shape tuple the last dimension of upgraded arrays
should be placed, so the default is '-1'.
Parameters
----------
Not a function, so takes no parameters
Returns
-------
A concatenated ndarray or matrix.
See Also
--------
concatenate : Join a sequence of arrays together.
c_ : Translates slice objects to concatenation along the second axis.
Examples
--------
>>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]
array([1, 2, 3, 0, 0, 4, 5, 6])
>>> np.r_[-1:1:6j, [0]*3, 5, 6]
array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ])
String integers specify the axis to concatenate along or the minimum
number of dimensions to force entries into.
>>> a = np.array([[0, 1, 2], [3, 4, 5]])
>>> np.r_['-1', a, a] # concatenate along last axis
array([[0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5]])
>>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2
array([[1, 2, 3],
[4, 5, 6]])
>>> np.r_['0,2,0', [1,2,3], [4,5,6]]
array([[1],
[2],
[3],
[4],
[5],
[6]])
>>> np.r_['1,2,0', [1,2,3], [4,5,6]]
array([[1, 4],
[2, 5],
[3, 6]])
Using 'r' or 'c' as a first string argument creates a matrix.
>>> np.r_['r',[1,2,3], [4,5,6]]
matrix([[1, 2, 3, 4, 5, 6]])
"""
def __init__(self):
AxisConcatenator.__init__(self, 0)
r_ = RClass()
class CClass(AxisConcatenator):
"""
Translates slice objects to concatenation along the second axis.
This is short-hand for ``np.r_['-1,2,0', index expression]``, which is
useful because of its common occurrence. In particular, arrays will be
stacked along their last axis after being upgraded to at least 2-D with
1's post-pended to the shape (column vectors made out of 1-D arrays).
For detailed documentation, see `r_`.
Examples
--------
>>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]
array([[1, 2, 3, 0, 0, 4, 5, 6]])
"""
def __init__(self):
AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
c_ = CClass()
class ndenumerate(object):
"""
Multidimensional index iterator.
Return an iterator yielding pairs of array coordinates and values.
Parameters
----------
arr : ndarray
Input array.
See Also
--------
ndindex, flatiter
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> for index, x in np.ndenumerate(a):
... print index, x
(0, 0) 1
(0, 1) 2
(1, 0) 3
(1, 1) 4
"""
def __init__(self, arr):
self.iter = asarray(arr).flat
def __next__(self):
"""
Standard iterator method, returns the index tuple and array value.
Returns
-------
coords : tuple of ints
The indices of the current iteration.
val : scalar
The array element of the current iteration.
"""
return self.iter.coords, next(self.iter)
def __iter__(self):
return self
next = __next__
class ndindex(object):
"""
An N-dimensional iterator object to index arrays.
Given the shape of an array, an `ndindex` instance iterates over
the N-dimensional index of the array. At each iteration a tuple
of indices is returned, the last dimension is iterated over first.
Parameters
----------
`*args` : ints
The size of each dimension of the array.
See Also
--------
ndenumerate, flatiter
Examples
--------
>>> for index in np.ndindex(3, 2, 1):
... print index
(0, 0, 0)
(0, 1, 0)
(1, 0, 0)
(1, 1, 0)
(2, 0, 0)
(2, 1, 0)
"""
def __init__(self, *shape):
if len(shape) == 1 and isinstance(shape[0], tuple):
shape = shape[0]
x = as_strided(_nx.zeros(1), shape=shape,
strides=_nx.zeros_like(shape))
self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'],
order='C')
def __iter__(self):
return self
def ndincr(self):
"""
Increment the multi-dimensional index by one.
This method is for backward compatibility only: do not use.
"""
next(self)
def __next__(self):
"""
Standard iterator method, updates the index and returns the index
tuple.
Returns
-------
val : tuple of ints
Returns a tuple containing the indices of the current
iteration.
"""
next(self._it)
return self._it.multi_index
next = __next__
# You can do all this with slice() plus a few special objects,
# but there's a lot to remember. This version is simpler because
# it uses the standard array indexing syntax.
#
# Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-23
#
# Cosmetic changes by T. Oliphant 2001
#
#
class IndexExpression(object):
"""
A nicer way to build up index tuples for arrays.
.. note::
Use one of the two predefined instances `index_exp` or `s_`
rather than directly using `IndexExpression`.
For any index combination, including slicing and axis insertion,
``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any
array `a`. However, ``np.index_exp[indices]`` can be used anywhere
in Python code and returns a tuple of slice objects that can be
used in the construction of complex index expressions.
Parameters
----------
maketuple : bool
If True, always returns a tuple.
See Also
--------
index_exp : Predefined instance that always returns a tuple:
`index_exp = IndexExpression(maketuple=True)`.
s_ : Predefined instance without tuple conversion:
`s_ = IndexExpression(maketuple=False)`.
Notes
-----
You can do all this with `slice()` plus a few special objects,
but there's a lot to remember and this version is simpler because
it uses the standard array indexing syntax.
Examples
--------
>>> np.s_[2::2]
slice(2, None, 2)
>>> np.index_exp[2::2]
(slice(2, None, 2),)
>>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]]
array([2, 4])
"""
def __init__(self, maketuple):
self.maketuple = maketuple
def __getitem__(self, item):
if self.maketuple and not isinstance(item, tuple):
return (item,)
else:
return item
index_exp = IndexExpression(maketuple=True)
s_ = IndexExpression(maketuple=False)
# End contribution from Konrad.
# The following functions complement those in twodim_base, but are
# applicable to N-dimensions.
def fill_diagonal(a, val, wrap=False):
"""Fill the main diagonal of the given array of any dimensionality.
For an array `a` with ``a.ndim > 2``, the diagonal is the list of
locations with indices ``a[i, i, ..., i]`` all identical. This function
modifies the input array in-place, it does not return a value.
Parameters
----------
a : array, at least 2-D.
Array whose diagonal is to be filled, it gets modified in-place.
val : scalar
Value to be written on the diagonal, its type must be compatible with
that of the array a.
wrap : bool
For tall matrices in NumPy version up to 1.6.2, the
diagonal "wrapped" after N columns. You can have this behavior
with this option. This affect only tall matrices.
See also
--------
diag_indices, diag_indices_from
Notes
-----
.. versionadded:: 1.4.0
This functionality can be obtained via `diag_indices`, but internally
this version uses a much faster implementation that never constructs the
indices and uses simple slicing.
Examples
--------
>>> a = np.zeros((3, 3), int)
>>> np.fill_diagonal(a, 5)
>>> a
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
The same function can operate on a 4-D array:
>>> a = np.zeros((3, 3, 3, 3), int)
>>> np.fill_diagonal(a, 4)
We only show a few blocks for clarity:
>>> a[0, 0]
array([[4, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> a[1, 1]
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 0]])
>>> a[2, 2]
array([[0, 0, 0],
[0, 0, 0],
[0, 0, 4]])
# tall matrices no wrap
>>> a = np.zeros((5, 3),int)
>>> fill_diagonal(a, 4)
>>> a
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[0, 0, 0]])
# tall matrices wrap
>>> a = np.zeros((5, 3),int)
>>> fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[4, 0, 0]])
# wide matrices
>>> a = np.zeros((3, 5),int)
>>> fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 4, 0, 0]])
"""
if a.ndim < 2:
raise ValueError("array must be at least 2-d")
end = None
if a.ndim == 2:
# Explicit, fast formula for the common case. For 2-d arrays, we
# accept rectangular ones.
step = a.shape[1] + 1
#This is needed to don't have tall matrix have the diagonal wrap.
if not wrap:
end = a.shape[1] * a.shape[1]
else:
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
if not alltrue(diff(a.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
step = 1 + (cumprod(a.shape[:-1])).sum()
# Write the value out into the diagonal.
a.flat[:end:step] = val
def diag_indices(n, ndim=2):
"""
Return the indices to access the main diagonal of an array.
This returns a tuple of indices that can be used to access the main
diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape
(n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for
``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]``
for ``i = [0..n-1]``.
Parameters
----------
n : int
The size, along each dimension, of the arrays for which the returned
indices can be used.
ndim : int, optional
The number of dimensions.
See also
--------
diag_indices_from
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Create a set of indices to access the diagonal of a (4, 4) array:
>>> di = np.diag_indices(4)
>>> di
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> a[di] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
Now, we create indices to manipulate a 3-D array:
>>> d3 = np.diag_indices(2, 3)
>>> d3
(array([0, 1]), array([0, 1]), array([0, 1]))
And use it to set the diagonal of an array of zeros to 1:
>>> a = np.zeros((2, 2, 2), dtype=np.int)
>>> a[d3] = 1
>>> a
array([[[1, 0],
[0, 0]],
[[0, 0],
[0, 1]]])
"""
idx = arange(n)
return (idx,) * ndim
def diag_indices_from(arr):
"""
Return the indices to access the main diagonal of an n-dimensional array.
See `diag_indices` for full details.
Parameters
----------
arr : array, at least 2-D
See Also
--------
diag_indices
Notes
-----
.. versionadded:: 1.4.0
"""
if not arr.ndim >= 2:
raise ValueError("input array must be at least 2-d")
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
if not alltrue(diff(arr.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
return diag_indices(arr.shape[0], arr.ndim)
| bsd-3-clause |
RedHatQE/cfme_tests | cfme/tests/automate/custom_button/test_service_objects.py | 1 | 25666 | import fauxfactory
import pytest
from widgetastic_patternfly import Dropdown
from cfme.services.myservice import MyService
from cfme.tests.automate.custom_button import CustomButtonSSUIDropdwon
from cfme.tests.automate.custom_button import log_request_check
from cfme.tests.automate.custom_button import TextInputDialogSSUIView
from cfme.tests.automate.custom_button import TextInputDialogView
from cfme.utils.appliance import ViaREST
from cfme.utils.appliance import ViaSSUI
from cfme.utils.appliance import ViaUI
from cfme.utils.appliance.implementations.ssui import navigate_to as ssui_nav
from cfme.utils.appliance.implementations.ui import navigate_to as ui_nav
from cfme.utils.blockers import BZ
from cfme.utils.wait import TimedOutError
from cfme.utils.wait import wait_for
pytestmark = [pytest.mark.tier(2)]
OBJECTS = ["SERVICE", "GENERIC"]
DISPLAY_NAV = {
"Single entity": ["Details"],
"List": ["All"],
"Single and list": ["All", "Details"],
}
SUBMIT = ["Submit all", "One by one"]
TEXT_DISPLAY = {
"group": {"group_display": False, "btn_display": True},
"button": {"group_display": True, "btn_display": False},
}
@pytest.fixture(scope="module")
def service(appliance):
service_name = "service_{}".format(fauxfactory.gen_numeric_string(3))
service = appliance.rest_api.collections.services.action.create(
name=service_name, display=True
)[0]
yield service
service.action.delete()
@pytest.fixture(scope="module")
def definition(appliance):
with appliance.context.use(ViaREST):
definition = appliance.collections.generic_object_definitions.create(
name="generic_class_{}".format(fauxfactory.gen_numeric_string(3)),
description="Generic Object Definition",
attributes={"addr01": "string"},
associations={"services": "Service"},
methods=["add_vm", "remove_vm"],
)
yield definition
if definition.exists:
definition.delete()
@pytest.fixture(scope="module")
def objects(appliance, definition, service):
with appliance.context.use(ViaREST):
instance = appliance.collections.generic_objects.create(
name="generic_instance_{}".format(fauxfactory.gen_numeric_string(3)),
definition=definition,
attributes={"addr01": "Test Address"},
associations={"services": [service]},
)
service.action.add_resource(
resource=appliance.rest_api.collections.generic_objects.find_by(name=instance.name)[
0
]._ref_repr()
)
instance.my_service = MyService(appliance, name=service.name)
obj_dest = {
"GENERIC": {
"All": (instance.my_service, "GenericObjectInstance"),
"Details": (instance, "MyServiceDetails"),
},
"SERVICE": {
"All": (instance.my_service, "All"),
"Details": (instance.my_service, "Details"),
},
}
yield obj_dest
if instance.exists:
instance.delete()
@pytest.fixture(params=OBJECTS, ids=[obj.capitalize() for obj in OBJECTS], scope="module")
def button_group(appliance, request):
with appliance.context.use(ViaUI):
collection = appliance.collections.button_groups
button_gp = collection.create(
text=fauxfactory.gen_alphanumeric(),
hover=fauxfactory.gen_alphanumeric(),
type=getattr(collection, request.param),
)
yield button_gp, request.param
button_gp.delete_if_exists()
@pytest.fixture(params=TEXT_DISPLAY, scope="module")
def serv_button_group(appliance, request):
with appliance.context.use(ViaUI):
collection = appliance.collections.button_groups
button_gp = collection.create(
text="group_{}".format(fauxfactory.gen_numeric_string(3)),
hover="hover_{}".format(fauxfactory.gen_alphanumeric(3)),
display=TEXT_DISPLAY[request.param]["group_display"],
type=getattr(collection, "SERVICE"),
)
button = button_gp.buttons.create(
text="btn_{}".format(fauxfactory.gen_numeric_string(3)),
hover="hover_{}".format(fauxfactory.gen_alphanumeric(3)),
display=TEXT_DISPLAY[request.param]["btn_display"],
display_for="Single and list",
system="Request",
request="InspectMe",
)
yield button, button_gp
button_gp.delete_if_exists()
button.delete_if_exists()
@pytest.mark.tier(1)
@pytest.mark.parametrize("context", [ViaUI, ViaSSUI])
@pytest.mark.parametrize(
"display", DISPLAY_NAV.keys(), ids=[item.replace(" ", "_") for item in DISPLAY_NAV.keys()]
)
@pytest.mark.uncollectif(
lambda context, button_group: context == ViaSSUI and "GENERIC" in button_group,
reason="Generic object custom button not supported by SSUI",
)
@pytest.mark.meta(
blockers=[
BZ(
1650066,
unblock=lambda display, context: not (
context is ViaSSUI and display in ["List", "Single and list"]
),
)
]
)
def test_custom_button_display_service_obj(
request, appliance, context, display, objects, button_group
):
""" Test custom button display on a targeted page
Polarion:
assignee: ndhandre
initialEstimate: 1/4h
caseimportance: critical
caseposneg: positive
testtype: functional
startsin: 5.8
casecomponent: CustomButton
tags: custom_button
testSteps:
1. Create custom button group with the Object type
2. Create a custom button with specific display
3. Navigate to object type page as per display selected [For service SSUI]
4. Single entity: Details page of the entity
5. List: All page of the entity
6. Single and list: Both All and Details page of the entity
7. Check for button group and button
Bugzilla:
1650066
"""
group, obj_type = button_group
with appliance.context.use(ViaUI):
button = group.buttons.create(
text=fauxfactory.gen_alphanumeric(),
hover=fauxfactory.gen_alphanumeric(),
display_for=display,
system="Request",
request="InspectMe",
)
request.addfinalizer(button.delete_if_exists)
with appliance.context.use(context):
navigate_to = ssui_nav if context is ViaSSUI else ui_nav
for destination in DISPLAY_NAV[display]:
obj = objects[obj_type][destination][0]
dest_name = objects[obj_type][destination][1]
view = navigate_to(obj, dest_name)
custom_button_group = Dropdown(view, group.text)
assert custom_button_group.is_displayed
assert custom_button_group.has_item(button.text)
@pytest.mark.parametrize("context", [ViaUI, ViaSSUI])
@pytest.mark.parametrize("submit", SUBMIT, ids=[item.replace(" ", "_") for item in SUBMIT])
@pytest.mark.uncollectif(
lambda context, button_group: context == ViaSSUI and "GENERIC" in button_group,
reason="Generic object custom button not supported by SSUI",
)
def test_custom_button_automate_service_obj(
request, appliance, context, submit, objects, button_group
):
""" Test custom button for automate and requests count as per submit
Polarion:
assignee: ndhandre
initialEstimate: 1/4h
caseimportance: high
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: CustomButton
tags: custom_button
testSteps:
1. Create custom button group with the Object type
2. Create a custom button with specific submit option and Single and list display
3. Navigate to object type pages (All and Details)
4. Check for button group and button
5. Select/execute button from group dropdown for selected entities
6. Check for the proper flash message related to button execution
7. Check automation log requests. Submitted as per selected submit option or not.
8. Submit all: single request for all entities execution
9. One by one: separate requests for all entities execution
Bugzilla:
1650066
"""
group, obj_type = button_group
with appliance.context.use(ViaUI):
button = group.buttons.create(
text=fauxfactory.gen_alphanumeric(),
hover=fauxfactory.gen_alphanumeric(),
display_for="Single and list",
submit=submit,
system="Request",
request="InspectMe",
)
request.addfinalizer(button.delete_if_exists)
with appliance.context.use(context):
navigate_to = ssui_nav if context is ViaSSUI else ui_nav
# BZ-1650066: no custom button on All page
destinations = (
["Details"]
if context == ViaSSUI and BZ(1650066).blocks
else ["All", "Details"]
)
for destination in destinations:
obj = objects[obj_type][destination][0]
dest_name = objects[obj_type][destination][1]
view = navigate_to(obj, dest_name)
custom_button_group = Dropdown(view, group.text)
assert custom_button_group.has_item(button.text)
# Entity count depends on the destination for `All` available entities and
# `Details` means a single entity.
if destination == "All":
try:
paginator = view.paginator
except AttributeError:
paginator = view.entities.paginator
entity_count = min(paginator.items_amount, paginator.items_per_page)
view.entities.paginator.check_all()
else:
entity_count = 1
# Clear the automation log
assert appliance.ssh_client.run_command(
'echo -n "" > /var/www/miq/vmdb/log/automation.log'
)
custom_button_group.item_select(button.text)
# SSUI not support flash messages
if context is ViaUI:
diff = "executed" if appliance.version < "5.10" else "launched"
view.flash.assert_message('"{btn}" was {diff}'.format(btn=button.text, diff=diff))
# Submit all: single request for all entity execution
# One by one: separate requests for all entity execution
expected_count = 1 if submit == "Submit all" else entity_count
try:
wait_for(
log_request_check,
[appliance, expected_count],
timeout=600,
message="Check for expected request count",
delay=20,
)
except TimedOutError:
assert False, "Expected {count} requests not found in automation log".format(
count=str(expected_count)
)
@pytest.mark.meta(
blockers=[BZ(1659452, unblock=lambda serv_button_group: "group" not in serv_button_group)]
)
@pytest.mark.parametrize("context", [ViaUI, ViaSSUI])
def test_custom_button_text_display(appliance, context, serv_button_group, service):
""" Test custom button text display on option
Polarion:
assignee: ndhandre
initialEstimate: 1/6h
caseimportance: medium
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: CustomButton
tags: custom_button
testSteps:
1. Appliance with Service
2. Create custom button `Group` or `Button` without display option
3. Check Group/Button text display or not on UI and SSUI.
Bugzilla:
1650066
1659452
"""
my_service = MyService(appliance, name=service.name)
button, group = serv_button_group
with appliance.context.use(context):
navigate_to = ssui_nav if context is ViaSSUI else ui_nav
destinations = (
["Details"]
if (BZ(1650066).blocks and context is ViaSSUI)
else ["All", "Details"]
)
for destination in destinations:
view = navigate_to(my_service, destination)
custom_button_group = Dropdown(view, group.text)
if group.display is True:
assert "" in custom_button_group.items
else:
assert custom_button_group.read() == ""
@pytest.fixture(params=["enablement", "visibility"], scope="module")
def vis_enb_button(request, appliance, button_group):
"""Create custom button with enablement/visibility expression"""
group, _ = button_group
exp = {request.param: {"tag": "My Company Tags : Department", "value": "Engineering"}}
with appliance.context.use(ViaUI):
button = group.buttons.create(
text=fauxfactory.gen_alphanumeric(),
hover=fauxfactory.gen_alphanumeric(),
display_for="Single entity",
system="Request",
request="InspectMe",
**exp
)
yield button, request.param
button.delete_if_exists()
@pytest.mark.tier(0)
@pytest.mark.parametrize("context", [ViaUI, ViaSSUI])
@pytest.mark.uncollectif(
lambda context, button_group: "GENERIC" in button_group,
reason="Generic object custom button not supported by SSUI",
)
def test_custom_button_expression_service_obj(
appliance, context, objects, button_group, vis_enb_button
):
""" Test custom button as per expression enablement/visibility.
Polarion:
assignee: ndhandre
initialEstimate: 1/4h
caseimportance: medium
caseposneg: positive
testtype: functional
casecomponent: CustomButton
startsin: 5.9
testSteps:
1. Create custom button group with the Object type
2. Create a custom button with expression (Tag)
a. Enablement Expression
b. Visibility Expression
3. Navigate to object Detail page
4. Check: button should not enable/visible without tag
5. Check: button should enable/visible with tag
Bugzilla:
1509959, 1513498
"""
# ToDo: Add support for Generic Object by adding tagging ability from All page.
group, obj_type = button_group
button, expression = vis_enb_button
obj = objects[obj_type]["Details"][0]
dest_name = objects[obj_type]["Details"][1]
navigate_to = ssui_nav if context is ViaSSUI else ui_nav
tag_cat = appliance.collections.categories.instantiate(
name="department", display_name="Department"
)
tag = tag_cat.collections.tags.instantiate(name="engineering", display_name="Engineering")
# Check without tag
with appliance.context.use(ViaUI):
if tag.display_name in [item.display_name for item in obj.get_tags()]:
obj.remove_tag(tag)
with appliance.context.use(context):
view = navigate_to(obj, dest_name, wait_for_view=15)
custom_button_group = (
CustomButtonSSUIDropdwon(view, group.text)
if context is ViaSSUI
else Dropdown(view, group.text)
)
if expression == "enablement":
# Note: SSUI still fallow enablement behaviour like 5.9. In latest version dropdown
# having single button and button is disabled then dropdown disabled.
if appliance.version < "5.10" or (context is ViaSSUI):
assert not custom_button_group.item_enabled(button.text)
else:
assert not custom_button_group.is_enabled
elif expression == "visibility":
assert not custom_button_group.is_displayed
# Check with tag
with appliance.context.use(ViaUI):
if tag.display_name not in [item.display_name for item in obj.get_tags()]:
obj.add_tag(tag)
with appliance.context.use(context):
view = navigate_to(obj, dest_name)
custom_button_group = (
CustomButtonSSUIDropdwon(view, group.text)
if context is ViaSSUI
else Dropdown(view, group.text)
)
if expression == "enablement":
assert custom_button_group.item_enabled(button.text)
elif expression == "visibility":
assert button.text in custom_button_group.items
@pytest.mark.manual
@pytest.mark.tier(2)
@pytest.mark.parametrize("context", [ViaSSUI])
def test_custom_button_on_vm_resource_detail(context):
""" Test custom button on SSUI vm resource detail page
Polarion:
assignee: ndhandre
initialEstimate: 1/2h
caseimportance: medium
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: CustomButton
tags: custom_button
setup:
1. Add Provider
2. Refresh Provider; Data should be collected.
3. Create Simple Dialog for catalog
* Navigate to Automation > Automate > Customization
* Click on All Dialogs from a sidebar
* From toolbar select "Add a new Dialog"
* Fill Dialog's name and description
* Drag TextBox (we can select other as well)
* Save changes
4. Create Catalog
* Navigate to Services > Catalogs
* Click on Catalogs from a sidebar
* From toolbar Configuration select "Add New Catalog"
* Fill name and description
* Save changes
5. Create a Catalog item
* Navigate to Services > Catalogs
* From sidebar select All Catalogs > catalog (created above)
* From toolbar select Add New catalog item
* Select Provider
* Fill Name, description, catalog and dialog (created above)
* Select VM name proper template etc...
6. Order Catalog
* Navigate to Services > Catalogs
* Click on Service catalogs from a sidebar
* Order catalog from this Page
testSteps:
1. Add custom button group for VM/Instance object from automation
* Navigate to Automate > Automation > Customization
* Click on Buttons from a sidebar
* Select VM/Instance
* From configuration select Add new Button Group
* Fill text, hover, icon, icon color
* Save change
2. Add custom button in above group
* Under this Group Click on configuration and select Add new Button
* Fill text, hover, icon, icon color, dialog, method
* Save changes
3. Navigate to SSUI (https://hostname/ui/service/login) (Credentials as Normal UI)
4. Click on My Services and select service (as per catalog item name)
5. Click on Instance which ordered by service.
6. Click on Group and select button
7. Fill dialog and submit it.
expectedResults:
1. A group should be created (is_displayed)
2. A button should created (is_displayed)
3.
4.
5. Check button displayed in a toolbar or not (Details page of an instance)
6. Dialog should display
7. Check for the flash message "Order Request was Submitted" and
check automation log for the request (Note request as per method attach to button in
step-1).
Bugzilla:
1427430, 1450473, 1454910
"""
pass
@pytest.mark.manual
@pytest.mark.parametrize("context", [ViaUI, ViaSSUI])
def test_custom_button_role_access_service(context):
"""Test custom button for role access of SSUI
Polarion:
assignee: ndhandre
initialEstimate: 1/4h
caseimportance: medium
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: CustomButton
tags: custom_button
testSteps:
1. Create role by copying EvmRole-user_self_service
2. Create Group and respective user for role
3. Create custom button group
4. Create custom button with role
5. Check use able to access custom button or not
"""
pass
@pytest.mark.manual
def test_custom_button_on_catalog_item():
"""
Polarion:
assignee: ndhandre
initialEstimate: 1/8h
caseimportance: medium
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: CustomButton
tags: custom_button
testSteps:
1. Add catalog_item
2. Goto catalog detail page and select `add button` from toolbar
3. Fill info and save button
"""
pass
@pytest.mark.manual
def test_custom_button_dialog_service_archived():
""" From Service OPS check if archive vms"s dialog invocation via custom button. ref: BZ1439883
Polarion:
assignee: ndhandre
initialEstimate: 1/8h
caseimportance: medium
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: CustomButton
tags: custom_button
testSteps:
1. Create a button at the service level with InspectMe method
2. Create a service that contains 1 VM
3. Remove this VM from the provider, resulting in a VM state of 'Archived'
4. Go to the service and try to execute the button
Bugzilla:
1439883
"""
pass
@pytest.mark.parametrize("context", [ViaUI, ViaSSUI])
@pytest.mark.uncollectif(
lambda context, button_group: context == ViaSSUI and "GENERIC" in button_group,
reason="Generic object custom button not supported by SSUI",
)
def test_custom_button_dialog_service_obj(
appliance, dialog, request, context, objects, button_group
):
""" Test custom button with dialog and InspectMe method
Polarion:
assignee: ndhandre
initialEstimate: 1/4h
caseimportance: medium
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: CustomButton
tags: custom_button
testSteps:
1. Create custom button group with the Object type
2. Create a custom button with service dialog
3. Navigate to object Details page
4. Check for button group and button
5. Select/execute button from group dropdown for selected entities
6. Fill dialog and submit
7. Check for the proper flash message related to button execution
Bugzilla:
1574774
"""
group, obj_type = button_group
with appliance.context.use(ViaUI):
button = group.buttons.create(
text="btn_{}".format(fauxfactory.gen_alphanumeric(3)),
hover="btn_hover_{}".format(fauxfactory.gen_alphanumeric(3)),
dialog=dialog,
system="Request",
request="InspectMe",
)
request.addfinalizer(button.delete_if_exists)
with appliance.context.use(context):
navigate_to = ssui_nav if context is ViaSSUI else ui_nav
obj = objects[obj_type]["Details"][0]
dest_name = objects[obj_type]["Details"][1]
view = navigate_to(obj, dest_name)
custom_button_group = Dropdown(view, group.text)
assert custom_button_group.has_item(button.text)
# Clear the automation log
assert appliance.ssh_client.run_command(
'echo -n "" > /var/www/miq/vmdb/log/automation.log'
)
custom_button_group.item_select(button.text)
_dialog_view = TextInputDialogView if context is ViaUI else TextInputDialogSSUIView
dialog_view = view.browser.create_view(_dialog_view, wait="10s")
assert dialog_view.service_name.fill("Custom Button Execute")
dialog_view.submit.click()
# SSUI not support flash messages
if context is ViaUI:
view.flash.assert_message("Order Request was Submitted")
# check request in log
try:
wait_for(
log_request_check,
[appliance, 1],
timeout=600,
message="Check for expected request count",
delay=20,
)
except TimedOutError:
assert False, "Expected {count} requests not found in automation log".format(
count=str(1)
)
@pytest.mark.manual
def test_custom_button_open_url_service_obj(objects, button_group):
""" Test Open url functionality of custom button.
Polarion:
assignee: ndhandre
initialEstimate: 1/2h
caseimportance: high
caseposneg: positive
testtype: functional
startsin: 5.11
casecomponent: CustomButton
tags: custom_button
testSteps:
1. Create ruby method for url functionality
2. Create custom button group with the Object type
3. Create a custom button with open_url option and respective method
4. Navigate to object Detail page
5. Execute custom button
expectedResults:
1.
2.
3.
4.
5. New tab should open with respective url
Bugzilla:
1550002
"""
pass
| gpl-2.0 |
vnsofthe/odoo-dev | addons/report_webkit/convert.py | 322 | 2581 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.tools import convert
original_xml_import = convert.xml_import
class WebkitXMLImport(original_xml_import):
# Override of xml import in order to add webkit_header tag in report tag.
# As discussed with the R&D Team, the current XML processing API does
# not offer enough flexibity to do it in a cleaner way.
# The solution is not meant to be long term solution, but at least
# allows chaining of several overrides of the _tag_report method,
# and does not require a copy/paste of the original code.
def _tag_report(self, cr, rec, data_node=None, mode=None):
report_id = super(WebkitXMLImport, self)._tag_report(cr, rec, data_node)
if rec.get('report_type') == 'webkit':
header = rec.get('webkit_header')
if header:
if header in ('False', '0', 'None'):
webkit_header_id = False
else:
webkit_header_id = self.id_get(cr, header)
self.pool.get('ir.actions.report.xml').write(cr, self.uid,
report_id, {'webkit_header': webkit_header_id})
return report_id
convert.xml_import = WebkitXMLImport
| agpl-3.0 |
anisyonk/pilot | RunJobUtilities.py | 2 | 36818 | import commands
import json
import os
import socket
import time
import re
import sys
from pUtil import timeStamp, debugInfo, tolog, readpar, verifyReleaseString,\
isAnalysisJob, dumpOrderedItems, grep, getExperiment, getGUID,\
getCmtconfig, timedCommand, getProperTimeout, removePattern, encode_string
from PilotErrors import PilotErrors
from FileStateClient import dumpFileStates, hasOnlyCopyToScratch
from SiteInformation import SiteInformation
# global variables
#siteroot = ""
def filterTCPString(TCPMessage):
""" Remove any unwanted characters from the TCP message string and truncate if necessary """
# sometimes a failed command will return html which end up in (e.g.) pilotErrorDiag, remove it
if TCPMessage.upper().find("<HTML>") >= 0:
# reset here. the error diag will be set in pilot.updatePandaServer()
tolog("Found html in TCP message string (will be reset): %s" % (TCPMessage))
TCPMessage = ""
# remove any ;-signs which will cause the TCP message to become corrupt (see use of ; below)
# and cause an exception in the TCP server (job will fail with no subprocesses error)
TCPMessage = TCPMessage.replace(";"," ")
# also remove any =-signs since they are interpreted as well
TCPMessage = TCPMessage.replace("!=","ne")
TCPMessage = TCPMessage.replace("="," ")
# also remove any "-signs
TCPMessage = TCPMessage.replace('"','')
# truncate if necessary
if len(TCPMessage) > 250:
tolog("TCP message string will be truncated to size 250")
tolog("Original TCP message string: %s" % (TCPMessage))
TCPMessage = TCPMessage[:250]
return TCPMessage
def updateJobInfo(job, server, port, logfile=None, final=False, latereg=False):
""" send job status updates to local pilot TCP server, in the format
of status=running;pid=2343; logfile is the file that contains
some debug information, usually used in failure case """
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(120)
s.connect((server, port))
except Exception, e:
tolog("!!WARNING!!2999!! updateJobInfo caught a socket/connect exception: %s" % str(e))
return "NOTOK"
msgdic = {}
msgdic["pid"] = os.getpid()
msgdic["pgrp"] = os.getpgrp()
msgdic["jobid"] = job.jobId
msgdic["status"] = job.result[0]
msgdic["jobState"] = job.jobState
msgdic["transecode"] = job.result[1]
msgdic["pilotecode"] = job.result[2]
msgdic["timeStageIn"] = job.timeStageIn
msgdic["timeStageOut"] = job.timeStageOut
msgdic["timeSetup"] = job.timeSetup
msgdic["timeExe"] = job.timeExe
msgdic["cpuTime"] = job.cpuConsumptionTime
msgdic["cpuUnit"] = job.cpuConsumptionUnit
msgdic["cpuConversionFactor"] = job.cpuConversionFactor
msgdic["nEvents"] = job.nEvents
msgdic["nEventsW"] = job.nEventsW
msgdic["vmPeakMax"] = job.vmPeakMax
msgdic["vmPeakMean"] = job.vmPeakMean
msgdic["RSSMean"] = job.RSSMean
msgdic["JEM"] = job.JEM
msgdic["cmtconfig"] = getCmtconfig(job.cmtconfig)
msgdic["dbTime"] = job.dbTime
msgdic["dbData"] = job.dbData
if job.external_stageout_time:
msgdic['external_stageout_time'] = job.external_stageout_time
if job.outputZipName and job.outputZipBucketID:
msgdic['outputZipName'] = job.outputZipName
msgdic['outputZipBucketID'] = job.outputZipBucketID
# hpc job status
if job.mode:
msgdic["mode"] = job.mode
if job.hpcStatus:
msgdic['hpcStatus'] = job.hpcStatus
if job.yodaJobMetrics:
msgdic["yodaJobMetrics"] = json.dumps(job.yodaJobMetrics)
if job.HPCJobId:
msgdic['HPCJobId'] = job.HPCJobId
if job.outputZipName and job.outputZipBucketID:
msgdic['outputZipName'] = job.outputZipName
msgdic['outputZipBucketID'] = job.outputZipBucketID
if job.refreshNow:
msgdic['refreshNow'] = job.refreshNow
if job.coreCount or job.coreCount == 0:
msgdic['coreCount'] = job.coreCount
# report FAX usage if at least one successful FAX transfer
if job.filesWithFAX > 0:
msgdic["filesWithFAX"] = job.filesWithFAX
if job.filesWithoutFAX > 0:
msgdic["filesWithoutFAX"] = job.filesWithoutFAX
if job.bytesWithFAX > 0:
msgdic["bytesWithFAX"] = job.bytesWithFAX
if job.bytesWithoutFAX > 0:
msgdic["bytesWithoutFAX"] = job.bytesWithoutFAX
# report alternative stage-out usage if at least one successful stage-out to an alternative SE
if job.filesAltStageOut > 0:
msgdic["filesAltStageOut"] = job.filesAltStageOut
tolog("filesAltStageOut=%d" % (job.filesAltStageOut))
else:
tolog("filesAltStageOut not set")
if job.filesNormalStageOut > 0:
msgdic["filesNormalStageOut"] = job.filesNormalStageOut
tolog("filesNormalStageOut=%d" % (job.filesNormalStageOut))
else:
tolog("filesNormalStageOut not set")
# truncate already now if necesary so not too much junk is sent back to the local pilot TCP server
if job.pilotErrorDiag != None and job.pilotErrorDiag != "None" and len(job.pilotErrorDiag.strip()) != 0:
# remove any unwanted characters from the string
job.pilotErrorDiag = encode_string(job.pilotErrorDiag)
msgdic["pilotErrorDiag"] = job.pilotErrorDiag
# report trf error message if set
if job.exeErrorDiag != "":
# remove any unwanted characters from the string
msgdic["exeErrorDiag"] = encode_string(job.exeErrorDiag)
msgdic["exeErrorCode"] = job.exeErrorCode
if logfile:
msgdic["logfile"] = logfile
# send the special setup string for the log transfer (on xrdcp systems)
if job.spsetup:
# temporarily remove = and ;-signs not to disrupt the TCP message (see ;-handling below)
msgdic["spsetup"] = job.spsetup.replace(";", "^").replace("=", "!")
tolog("Updated spsetup: %s" % (msgdic["spsetup"]))
# set final job state (will be propagated to the job state file)
if final:
if job.subStatus:
msgdic['subStatus'] = job.subStatus
job.finalstate = getFinalState(job.result)
tolog("Final payload state set to: %s" % (job.finalstate))
msgdic["finalstate"] = job.finalstate
if job.result[0] == "holding" and job.finalstate == "finished":
if readpar('retry').upper() == "TRUE":
tolog("This job is recoverable")
else:
tolog("This job is not recoverable since job recovery is switched off")
# variables needed for later registration of output files
# (log will be handled by the pilot)
if latereg:
latereg_str = "True"
else:
latereg_str = "False"
msgdic["output_latereg"] = latereg_str
msg = ''
for k in msgdic.keys():
msg += "%s=%s;" % (k, msgdic[k])
tolog("About to send TCP message to main pilot thread of length = %d" % len(msg))
if len(msg) > 4096:
tolog("!!WARNING!!1234!! TCP message too long (cannot truncate easily without harming encoded message)")
try:
s.send(msg)
tolog("(Sent)")
tm = s.recv(1024)
tolog("(Received)")
except Exception, e:
tolog("!!WARNING!!2999!! updateJobInfo caught a send/receive exception: %s" % str(e))
return "NOTOK"
else:
s.settimeout(None)
s.close()
tolog("Successfully sent and received TCP message")
return tm # =OK or NOTOK
def getFinalState(result):
"""
Figure out the final job state (finished or failed)
Simplies job recovery
"""
state = "failed"
# job has failed if transExitCode != 0
if result[1] != 0:
state = "failed"
else:
error = PilotErrors()
# job has finished if pilotErrorCode is in the allowed list or recoverable jobs
if ((error.isRecoverableErrorCode(result[2])) or (result[2] == error.ERR_KILLSIGNAL and result[0] == "holding")):
state = "finished"
return state
def updatePilotServer(job, server, port, logfile=None, final=False, latereg=False):
""" handle the local pilot TCP server updates """
if not final:
max_trials = 2
else:
max_trials = 10
status = False
trial = 1
while trial <= max_trials:
rt = updateJobInfo(job, server, port, logfile=logfile, final=final, latereg=latereg)
if rt == "OK":
tolog("Successfully updated local pilot TCP server at %s (Trial %d/%d)" % (timeStamp(), trial, max_trials))
status = True
break
else:
tolog("[Trial %d/%d] Failed to communicate with local pilot TCP server: %s" % (trial, max_trials, rt))
time.sleep(5)
trial += 1
if not status:
tolog("updatePilotServer failed")
if final:
tolog("!!FAILED!!3000!! Local pilot TCP server down, expecting pilot to fail job (cannot communicate final update back to main pilot thread)")
return status
def getFileNamesFromString(s=""):
"""
Return a list from the input string
Ex. s = "a+b+c+", return: [a,b,c]
"""
list = []
if len(s) > 0:
# remove any trailing + sign
if s[-1] == "+":
s = s[:-1]
# create the list
list = s.split("+")
# remove the path from the file names
for i in range(len(list)):
list[i] = os.path.basename(list[i])
return list
def getRemainingFiles(movedFileList, allFiles):
""" Make a diff between the moved files and all files """
# loop over all entries and remove those that are in the movedFileList
for file in movedFileList:
for i in range(len(allFiles)):
if file == allFiles[i]:
del allFiles[i]
break
return allFiles
def dumpOutput(filename):
""" dump an extract from an ascii file """
ret = ""
if os.path.exists(filename):
fsize = os.path.getsize(filename)
tolog("Filename : %s" % (filename))
tolog("File size: %s" % str(fsize))
if fsize > 2**14: # 16k
tolog("Begin output (first and last 8k)............................................")
ret = commands.getoutput("head --bytes=8192 %s" % filename)
if ret == "":
tolog("[no output?]")
else:
# protect against corrupted files containing illegal chars
try:
tolog(str(ret))
except Exception, e:
tolog("!!WARNING!!3000!! Could not dump file: %s" % str(e))
tolog("\n.... [snip] ....\n")
ret = commands.getoutput("tail --bytes=8192 %s" % filename)
if ret == "":
tolog("[no output?]")
else:
# protect against corrupted files containing illegal chars
try:
tolog(str(ret))
except Exception, e:
tolog("!!WARNING!!3000!! Could not dump file: %s" % str(e))
else:
tolog("Begin output (all)..........................................................")
ret = commands.getoutput("cat %s" % filename)
if ret == "":
tolog("[no output?]")
else:
# protect against corrupted files containing illegal chars
try:
tolog(ret)
except Exception, e:
tolog("!!WARNING!!3000!! Could not dump file: %s" % str(e))
tolog("End output..................................................................")
else:
tolog("!!WARNING!!3000!! file %s does not exist" % (filename))
return ret
def isCommandOk(cmd):
""" return True if the command is returning exit code 0 """
status = True
tolog("Executing command: %s" % (cmd))
a, b = commands.getstatusoutput(cmd)
if a != 0:
tolog("!!WARNING!!3000!! Command test failed with exit code %d: %s" % (a, b))
status = False
return status
def prepareInFiles(inFiles, filesizeIn, checksumIn):
""" prepare the input files (remove non-valid names) """
# if the file name list is modified, make sure that the file size and checksum lists are modified as well
ins = []
fIn = []
cIn = []
file_nr = -1
if inFiles: # non empty list
for inf in inFiles:
file_nr += 1
if inf and inf != 'NULL' and inf not in ins: # non-empty string and not NULL
ins.append(inf)
fIn.append(filesizeIn[file_nr])
cIn.append(checksumIn[file_nr])
if inFiles[0] != '':
tolog("Input file(s): (%d in total)" % (len(inFiles)))
dumpOrderedItems(inFiles)
else:
tolog("No input files for this job")
return ins, fIn, cIn
def prepareOutFiles(outFiles, logFile, workdir, fullpath=False):
""" verify and prepare the output files for transfer """
# fullpath = True means that the file in outFiles already has a full path, adding it to workdir is then not needed
ec = 0
pilotErrorDiag = ""
outs = []
modt = []
from SiteMover import SiteMover
for outf in outFiles:
if outf and outf != 'NULL': # non-empty string and not NULL
path = os.path.join(workdir, outf)
if (not os.path.isfile(path) and not fullpath) or (not os.path.isfile(outf) and fullpath):
pilotErrorDiag = "Expected output file %s does not exist" % (path)
tolog("!!FAILED!!3000!! %s" % (pilotErrorDiag))
error = PilotErrors()
ec = error.ERR_MISSINGOUTPUTFILE
break
else:
tolog("outf = %s" % (outf))
if fullpath:
# remove the full path here from outf
workdir = os.path.dirname(outf)
outf = os.path.basename(outf)
outs.append(outf)
# get the modification time for the file (needed by NG)
modt.append(SiteMover.getModTime(workdir, outf))
tolog("Output file(s):")
try:
_ec, _rs = commands.getstatusoutput("ls -l %s/%s" % (workdir, outf))
except Exception, e:
tolog(str(e))
else:
tolog(_rs)
if ec == 0:
# create a dictionary of the output files with matched modification times (needed to create the NG OutputFiles.xml)
outsDict = dict(zip(outs, modt))
# add the log file with a fictious date since it has not been created yet
outsDict[logFile] = ''
else:
outsDict = {}
return ec, pilotErrorDiag, outs, outsDict
def convertMetadata4NG(filenameOUT, filenameIN, outsDict, dataset, datasetDict):
""" convert the metadata-<jobId>.xml to NG format """
# note: 'dataset' will only be used if datasetDict is None
status = True
# xml tags and conversion dictionaries
_header = '<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n<!-- ATLAS file meta-data catalog -->\n<!DOCTYPE POOLFILECATALOG SYSTEM "InMemory">\n'
_tagsBEGIN_END = '<%s>%s</%s>\n'
_tagBEGIN = '<%s>\n'
_tagEND = '</%s>\n'
_tagDICT = { 'ID':'guid', 'fsize':'size', 'md5sum':'md5sum', 'adler32':'ad32', 'name':'lfn', 'csumtypetobeset':'ad32', 'surl':'surl' }
dic = {}
dic['md5sum'] = '' # to avoid KeyError's with older python
dic['adler32'] = ''
dic['fsize'] = ''
if os.path.exists(filenameIN):
try:
f = open(filenameIN, 'r')
except Exception, e:
tolog("!!WARNING!!1999!! Could not open file: %s, %s" % (filenameIN, str(e)))
status = False
else:
# get the metadata
xmlIN = f.read()
f.close()
xmlOUT = _header
xmlOUT += _tagBEGIN % 'outputfiles'
from xml.dom import minidom
xmldoc = minidom.parseString(xmlIN)
fileList = xmldoc.getElementsByTagName("File")
# convert the metadata to NG format
for _file in fileList:
xmlOUT += _tagBEGIN % 'file'
lfn = str(_file.getElementsByTagName("lfn")[0].getAttribute("name"))
guid = str(_file.getAttribute("ID"))
lrc_metadata_dom = _file.getElementsByTagName("metadata")
for i in range(len(lrc_metadata_dom)):
_key = str(_file.getElementsByTagName("metadata")[i].getAttribute("att_name"))
_value = str(_file.getElementsByTagName("metadata")[i].getAttribute("att_value"))
# e.g. key = 'fsize', get the corresponding NG name ('size')
_keyNG = _tagDICT[_key]
xmlOUT += ' ' + _tagsBEGIN_END % (_keyNG, _value, _keyNG)
xmlOUT += ' ' + _tagsBEGIN_END % ('guid', guid, 'guid')
xmlOUT += ' ' + _tagsBEGIN_END % ('lfn', lfn, 'lfn')
if datasetDict:
try:
xmlOUT += ' ' + _tagsBEGIN_END % ('dataset', datasetDict[lfn], 'dataset')
except Exception, e:
tolog("!!WARNING!!2999!! datasetDict could not be used: %s (using default dataset instead)" % str(e))
xmlOUT += ' ' + _tagsBEGIN_END % ('dataset', dataset, 'dataset')
else:
xmlOUT += ' ' + _tagsBEGIN_END % ('dataset', dataset, 'dataset')
xmlOUT += ' ' + _tagsBEGIN_END % ('date', outsDict[lfn], 'date')
xmlOUT += _tagEND % 'file'
xmlOUT += _tagEND % 'outputfiles'
tolog("Converted xml for NorduGrid / CERNVM")
# write the new metadata to the OutputFiles.xml
try:
f = open(filenameOUT, 'w')
except Exception, e:
tolog("!!WARNING!!1999!! Could not create output file: %s, %s" % (filenameOUT, str(e)))
status = False
else:
f.write(xmlOUT)
f.close()
else:
status = False
return status
def getOutFilesGuids(outFiles, workdir, experiment, TURL=False):
""" get the outFilesGuids from the PFC """
ec = 0
pilotErrorDiag = ""
outFilesGuids = []
# Get the experiment object and the GUID source filename
thisExperiment = getExperiment(experiment)
filename = thisExperiment.getGUIDSourceFilename()
# If a source file should not be used (ie empty filename string), then generate the GUIDs here
if filename == "":
tolog("Pilot will generate GUIDs for the output files")
for i in range (0, len(outFiles)):
guid = getGUID()
if guid == "":
guid = "- GUID generation failed -"
outFilesGuids.append(guid)
return ec, pilotErrorDiag, outFilesGuids
else:
tolog("Pilot will get GUIDs for the output files from source: %s" % (filename))
pfcFile = os.path.join(workdir, filename) #"%s/PoolFileCatalog.xml" % (workdir)
# The PFC used for Event Service will be TURL based, use the corresponding file
if TURL:
pfcFile = pfcFile.replace(".xml", "TURL.xml")
# Initialization: make sure the guid list has the same length as the file list
for i in range (0, len(outFiles)):
outFilesGuids.append(None)
# make sure the PFC exists
if os.path.isfile(pfcFile):
from xml.dom import minidom
xmldoc = minidom.parse(pfcFile)
fileList = xmldoc.getElementsByTagName("File")
for thisfile in fileList:
gpfn = str(thisfile.getElementsByTagName("pfn")[0].getAttribute("name"))
guid = str(thisfile.getAttribute("ID"))
for i in range(0, len(outFiles)):
if outFiles[i] == gpfn:
outFilesGuids[i] = guid
else:
pilotErrorDiag = "PFC file does not exist: %s" % (pfcFile)
tolog("!!FAILED!!3000!! %s" % (pilotErrorDiag))
error = PilotErrors()
ec = error.ERR_MISSINGPFC
return ec, pilotErrorDiag, outFilesGuids
def verifyMultiTrf(jobParameterList, jobHomePackageList, jobTrfList, jobAtlasRelease):
""" make sure that a multi-trf (or single trf) job is properly setup """
error = PilotErrors()
ec = 0
pilotErrorDiag = ""
N_jobParameterList = len(jobParameterList)
N_jobHomePackageList = len(jobHomePackageList)
N_jobTrfList = len(jobTrfList)
N_jobAtlasRelease = len(jobAtlasRelease)
# test jobs have multiple atlas releases defined, but not real tasks
if N_jobTrfList > N_jobAtlasRelease and N_jobAtlasRelease == 1:
# jobAtlasRelease = ['14.0.0'] -> ['14.0.0', '14.0.0']
jobAtlasRelease = jobAtlasRelease*N_jobTrfList
N_jobAtlasRelease = len(jobAtlasRelease)
if (N_jobParameterList == N_jobHomePackageList) and \
(N_jobHomePackageList == N_jobTrfList) and \
(N_jobTrfList == N_jobAtlasRelease):
if N_jobAtlasRelease == 1:
tolog("Multi-trf verification succeeded (single job)")
else:
tolog("Multi-trf verification succeeded")
else:
pilotErrorDiag = "Multi-trf verification failed: N(jobPars) eq %d, but N(homepackage,transformation,AtlasRelease) eq (%d,%d,%d)" %\
(N_jobParameterList, N_jobHomePackageList, N_jobTrfList, N_jobAtlasRelease)
tolog("!!FAILED!!2999!! %s" % (pilotErrorDiag))
ec = error.ERR_SETUPFAILURE
return ec, pilotErrorDiag, jobAtlasRelease
def updateCopysetups(cmd3, transferType=None, useCT=None, directIn=None, useFileStager=None):
""" Update the relevant copysetup fields for remote I/O or file stager """
si = SiteInformation()
_copysetupin = readpar('copysetupin')
_copysetup = readpar('copysetup')
if _copysetupin != "":
si.updateCopysetup(cmd3, 'copysetupin', _copysetupin, transferType=transferType, useCT=useCT, directIn=directIn, useFileStager=useFileStager)
else:
si.updateCopysetup(cmd3, 'copysetup', _copysetup, transferType=transferType, useCT=useCT, directIn=directIn, useFileStager=useFileStager)
def addSPSetupToCmd(special_setup_cmd, cmd):
""" Add the special command setup if it exists to the main run command """
if special_setup_cmd != "":
if not special_setup_cmd.endswith(";"):
special_setup_cmd += ";"
# the special command must be squeezed in before the trf but after the setup scripts
# find the last ; and add the special setup there
# in case of -f options, there's a possibility that the command can contain extra ;-signs, remove that part
pos_f = cmd.rfind("-f ")
if pos_f != -1:
_cmd0 = cmd[:pos_f]
_cmd1 = cmd[pos_f:]
else:
_cmd0 = cmd
_cmd1 = ""
last_semicolon = _cmd0.rfind(";")
cmd = _cmd0[:last_semicolon] + ";" + special_setup_cmd[:-1] + _cmd0[last_semicolon:] + _cmd1
tolog("Special setup command added to main run command")
return cmd
def removeSkippedFromJobPars(fname, jobPars):
""" remove skipped input files from jobPars """
# get the skipped file names from the xml
skipped = getLFNsFromSkippedXML(fname)
if skipped == []:
tolog("Did not find any skipped LFNs in: %s" % (fname))
else:
tolog("Removing skipped input files from jobPars")
tolog("..skipped: %s" % str(skipped))
tolog("..jobPars:\n%s" % (jobPars))
for skip in skipped:
tolog("..Removing: %s" % (skip))
# try difference styles
_skip = "\'%s\'," % (skip)
if _skip in jobPars:
jobPars = jobPars.replace(_skip,'')
tolog('..Removed %s from jobPars' % (_skip))
else:
_skip = "\'%s\'" % (skip)
if _skip in jobPars:
jobPars = jobPars.replace(_skip,'')
tolog('..Removed %s from jobPars' % (_skip))
else:
_skip = "%s," % (skip)
if _skip in jobPars:
jobPars = jobPars.replace(skip,'')
tolog('..Removed %s from jobPars' % (skip))
else:
if skip in jobPars:
jobPars = jobPars.replace(skip,'')
print '..Removed %s from jobPars' % (skip)
else:
# nothing to remove
tolog("..Found nothing to remove from jobPars: %s" % (jobPars))
return jobPars
def getLFNsFromSkippedXML(fname):
""" extract the list of skipped files from the skipped xml """
lfns = []
try:
f = open(fname, "r")
except Exception, e:
tolog("Warning: could not open skipped xml file: %s" % str(e))
else:
pre_xml = f.read()
f.close()
# add an XML header etc since the skipped xml is just an XML fragment
# so that it can be processed
xmlstr = '<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n'
xmlstr += "<!-- Edited By POOL -->\n"
xmlstr += '<!DOCTYPE POOLFILECATALOG SYSTEM "InMemory">\n'
xmlstr += "<POOLFILECATALOG>\n"
xmlstr += pre_xml
xmlstr += "</POOLFILECATALOG>\n"
from xml.dom import minidom
xmldoc = minidom.parseString(xmlstr)
fileList = xmldoc.getElementsByTagName("File")
for thisfile in fileList:
lfns.append(str(thisfile.getElementsByTagName("lfn")[0].getAttribute("name")))
return lfns
def setEnvVars(sitename):
""" Set ATLAS_CONDDB if necessary """
if not os.environ.has_key('ATLAS_CONDDB'):
atlas_conddb = readpar('gatekeeper')
if atlas_conddb != "to.be.set":
os.environ["ATLAS_CONDDB"] = atlas_conddb
tolog("Note: ATLAS_CONDDB was not set by the pilot wrapper script")
tolog("The pilot has set ATLAS_CONDDB to: %s" % (atlas_conddb))
else:
tolog("Warning: ATLAS_CONDDB was not set by the pilot wrapper and schedconfig.gatekeeper value is to.be.set (pilot will take no action)")
# set specially requested env vars
os.environ["PANDA_SITE_NAME"] = sitename
tolog("Set PANDA_SITE_NAME = %s" % (sitename))
copytool = readpar("copytoolin")
if copytool == "":
copytool = readpar("copytool")
if "^" in copytool:
copytool = copytool.split("^")[0]
os.environ["COPY_TOOL"] = copytool
tolog("Set COPY_TOOL = %s" % (copytool))
def addFullPathsAsInput(jobPars, full_paths_dictionary):
""" Replace LFNs with full root paths """
# jobPars = .. --inputEVNTFile=EVNT.01416937._000003.pool.root,EVNT.01416937._000004.pool.root ..
# ->
# jobPars = .. --inputEVNTFile=root://../EVNT.01416937._000003.pool.root,root://../EVNT.01416937._000004.pool.root
# FORMAT: full_paths_dictionary = { 'LFN1':'protocol://fullpath/LFN1', .. }
# Extract the inputEVNTFile from the jobPars
if "--inputEVNTFile" in jobPars:
found_items = re.findall(r'\S+', jobPars)
pattern = r"\'?\-\-inputEVNTFile\=(.+)\'?"
for item in found_items:
found = re.findall(pattern, item)
if len(found) > 0:
input_files = found[0]
if input_files.endswith("\'"):
input_files = input_files[:-1]
if len(input_files) > 0:
for lfn in input_files.split(','):
if lfn in full_paths_dictionary.keys():
full_path = full_paths_dictionary[lfn]['pfn']
if full_path not in jobPars:
jobPars = jobPars.replace(lfn, full_path)
else:
tolog("!!WARNING!!3435!! Did not find LFN=%s" % lfn)
else:
tolog(
"!!WARNING!!3434!! Zero length list, cannot update LFN:s with full paths (remote I/O will not work)")
return jobPars
def updateRunCommandList(runCommandList, pworkdir, jobId, statusPFCTurl, analysisJob, usedFAXandDirectIO, hasInput, prodDBlockToken, full_paths_dictionary=None):
""" update the run command list if --directIn is no longer needed """
# the method is using the file state dictionary
# remove later
dumpFileStates(pworkdir, jobId, ftype="input")
# remove any instruction regarding tag file creation for event service jobs
_runCommandList = []
for cmd in runCommandList:
if "--createTAGFileForES" in cmd:
cmd = cmd.replace("--createTAGFileForES","")
_runCommandList.append(cmd)
runCommandList = _runCommandList
# no need to continue if no input files
if not hasInput:
return runCommandList
# are there only copy_to_scratch transfer modes in the file state dictionary?
# if so, remove any lingering --directIn instruction
only_copy_to_scratch = hasOnlyCopyToScratch(pworkdir, jobId)
if only_copy_to_scratch or 'local' in prodDBlockToken:
# if hasOnlyCopyToScratch(pworkdir, jobId): # python bug? does not work, have to use previous two lines?
_runCommandList = []
if only_copy_to_scratch:
tolog("There are only copy_to_scratch transfer modes in file state dictionary")
for cmd in runCommandList:
# remove the --directIn string if present
if "--directIn" in cmd:
tolog("(Removing --directIn instruction from run command since it is not needed)")
cmd = cmd.replace("--directIn", "")
# remove the --useFileStager string if present
if "--useFileStager" in cmd:
tolog("(Removing --useFileStager instruction from run command since it is not needed)")
cmd = cmd.replace("--useFileStager", "")
# remove additional run options if creation of TURL based PFC failed
if statusPFCTurl == False: # (note: can also be None, so do not use 'if not statusPFCTurl')
if "--usePFCTurl" in cmd:
tolog("(Removing --usePFCTurl instruction from run command since it is not needed)")
cmd = cmd.replace(" --usePFCTurl", "")
tolog("Updated run command: %s" % (cmd))
_runCommandList.append(cmd)
else:
tolog("Nothing to update in run command list related to copy-to-scratch")
_runCommandList = runCommandList
# was FAX used as primary site mover in combination with direct I/O?
if usedFAXandDirectIO == True:
tolog("Since FAX was used as primary site mover in combination with direct I/O, the run command list need to be updated")
_runCommandList2 = []
for cmd in _runCommandList:
# remove the --oldPrefix
if "--oldPrefix" in cmd:
pattern = "(\-\-oldPrefix\ \S+)"
cmd = removePattern(cmd, pattern)
tolog("(Removed --oldPrefix pattern)")
# remove the --newPrefix
if "--newPrefix" in cmd:
pattern = "(\-\-newPrefix\ \S+)"
cmd = removePattern(cmd, pattern)
tolog("(Removed --newPrefix pattern)")
# add the --usePFCTurl if not there already
if not "--usePFCTurl" in cmd and analysisJob:
cmd += " --usePFCTurl"
tolog("(Added --usePFCTurl)")
tolog("Updated run command: %s" % (cmd))
_runCommandList2.append(cmd)
_runCommandList = _runCommandList2
### new movers quick integration: reuse usedFAXandDirectIO variable with special meaning
### to avoid any LFC and prefixes lookups in transformation scripts
### since new movers already form proper pfn values
### proper workflow is required: to be reimplemented later
if usedFAXandDirectIO == 'newmover' or usedFAXandDirectIO == 'newmover-directaccess':
_runCommandList2 = []
for cmd in _runCommandList:
# remove --oldPrefix, --newPrefix
# add --usePFCTurl
if "--oldPrefix" in cmd:
pattern = "(\-\-oldPrefix\ \S+)"
cmd = removePattern(cmd, pattern)
tolog("(Removed --oldPrefix pattern)")
if "--newPrefix" in cmd:
pattern = "(\-\-newPrefix\ \S+)"
cmd = removePattern(cmd, pattern)
tolog("(Removed --newPrefix pattern)")
if "--usePFCTurl" not in cmd and analysisJob:
cmd += " --usePFCTurl"
tolog("(Added --usePFCTurl)")
# add --directIn if need
if usedFAXandDirectIO == 'newmover-directaccess':
if "--directIn" not in cmd and analysisJob:
cmd += " --directIn"
tolog("(Added --directIn)")
tolog("Updated run command: %s" % cmd)
_runCommandList2.append(cmd)
_runCommandList = _runCommandList2
# for remote i/o in production jobs, we need to update the jobPars
if full_paths_dictionary:
_runCommandList3 = []
for cmd in _runCommandList:
if full_paths_dictionary and full_paths_dictionary != {}:
cmd = addFullPathsAsInput(cmd, full_paths_dictionary)
tolog("Updated input file list with full paths: %s" % cmd)
else:
tolog("!!WARNING!!5555!! Empty full paths dictionary (direct I/O will not work)")
_runCommandList3.append(cmd)
_runCommandList = _runCommandList3
tolog("Dumping final input file states")
dumpFileStates(pworkdir, jobId, ftype="input")
return _runCommandList
def getStdoutFilename(workdir, _stdout, current_job_number, number_of_jobs):
""" Return a proper stdout filename """
if number_of_jobs > 1:
_stdout = _stdout.replace(".txt", "_%d.txt" % (current_job_number))
return os.path.join(workdir, _stdout)
def findVmPeaks(setup):
""" Find the VmPeak values """
vmPeakMax = 0
vmPeakMean = 0
RSSMean = 0
# matched_lines = grep(["Py\:PerfMonSvc\s*INFO\s*VmPeak:\s*[0-9]"], stdout_filename)
# pattern = "([0-9]+)"
# # now extract the digits from the found lines
# N = 0
# vmPeaks = 0
# for line in matched_lines:
# _vmPeak = re.search(pattern, line)
# if _vmPeak:
# N += 1
# vmPeak = _vmPeak.group(1)
# if vmPeak > vmPeakMax:
# vmPeakMax = vmPeak
# vmPeaks += vmPeak
# use the VmPeak script to get all values
cmd = "%s python VmPeak.py >Pilot_VmPeak.txt" % (setup)
try:
ec, output = timedCommand(cmd, timeout=getProperTimeout(setup))
except Exception, e:
tolog("!!WARNING!!1111!! Failed to execute VmPeak script: %s" % (e))
else:
# now get the values from the deault file
file_name = os.path.join(os.getcwd(), "VmPeak_values.txt")
if ec == 0:
if os.path.exists(file_name):
try:
f = open(file_name, "r")
except Exception, e:
tolog("!!WARNING!!1111!! Failed to open VmPeak values file: %s" % (e))
else:
_values = f.read()
f.close()
values = _values.split(",")
try:
vmPeakMax = int(values[0])
vmPeakMean = int(values[1])
RSSMean = int(values[2])
except Exception, e:
tolog("!!WARNING!!1111!! VmPeak exception: %s" % (e))
else:
tolog("Note: File %s does not exist" % (file_name))
else:
tolog("!!WARNING!!1111!! VmPeak script returned: %d, %s" % (ec, output))
tolog("[VmPeak] vmPeakMax=%d" % (vmPeakMax))
tolog("[VmPeak] vmPeakMean=%d" % (vmPeakMean))
tolog("[VmPeak] RSSMean=%d" % (RSSMean))
return vmPeakMax, vmPeakMean, RSSMean
def getSourceSetup(runCommand):
""" Extract the source setup command from the run command """
if type(runCommand) is dict:
to_str = " ".join(runCommand['environment'])
to_str = "%s %s %s %s" % (to_str, runCommand["interpreter"], runCommand["payload"], runCommand["parameters"])
runCommand = to_str
setup = ""
pattern = re.compile(r"(source /.+?;)")
s = re.findall(pattern, runCommand)
if s != []:
setup = s[0]
return setup
| apache-2.0 |
matejc/searx | searx/engines/currency_convert.py | 3 | 2530 | from datetime import datetime
import re
import os
import json
import unicodedata
categories = []
url = 'https://download.finance.yahoo.com/d/quotes.csv?e=.csv&f=sl1d1t1&s={query}=X'
weight = 100
parser_re = re.compile(u'.*?(\\d+(?:\\.\\d+)?) ([^.0-9]+) (?:in|to) ([^.0-9]+)', re.I) # noqa
db = 1
def normalize_name(name):
name = name.lower().replace('-', ' ').rstrip('s')
name = re.sub(' +', ' ', name)
return unicodedata.normalize('NFKD', name).lower()
def name_to_iso4217(name):
global db
name = normalize_name(name)
currencies = db['names'].get(name, [name])
return currencies[0]
def iso4217_to_name(iso4217, language):
global db
return db['iso4217'].get(iso4217, {}).get(language, iso4217)
def request(query, params):
m = parser_re.match(unicode(query, 'utf8'))
if not m:
# wrong query
return params
ammount, from_currency, to_currency = m.groups()
ammount = float(ammount)
from_currency = name_to_iso4217(from_currency.strip())
to_currency = name_to_iso4217(to_currency.strip())
q = (from_currency + to_currency).upper()
params['url'] = url.format(query=q)
params['ammount'] = ammount
params['from'] = from_currency
params['to'] = to_currency
params['from_name'] = iso4217_to_name(from_currency, 'en')
params['to_name'] = iso4217_to_name(to_currency, 'en')
return params
def response(resp):
results = []
try:
_, conversion_rate, _ = resp.text.split(',', 2)
conversion_rate = float(conversion_rate)
except:
return results
answer = '{0} {1} = {2} {3}, 1 {1} ({5}) = {4} {3} ({6})'.format(
resp.search_params['ammount'],
resp.search_params['from'],
resp.search_params['ammount'] * conversion_rate,
resp.search_params['to'],
conversion_rate,
resp.search_params['from_name'],
resp.search_params['to_name'],
)
now_date = datetime.now().strftime('%Y%m%d')
url = 'https://finance.yahoo.com/currency/converter-results/{0}/{1}-{2}-to-{3}.html' # noqa
url = url.format(
now_date,
resp.search_params['ammount'],
resp.search_params['from'].lower(),
resp.search_params['to'].lower()
)
results.append({'answer': answer, 'url': url})
return results
def load():
global db
current_dir = os.path.dirname(os.path.realpath(__file__))
json_data = open(current_dir + "/../data/currencies.json").read()
db = json.loads(json_data)
load()
| agpl-3.0 |
cefn/firmware-codesign-readinglog | ui/index.py | 1 | 2487 | #!/usr/bin/python
import sys,os,glob,urlparse,urllib,subprocess
def setcwd():
realpath = os.path.realpath(sys.argv[0])
dname = os.path.dirname(realpath)
os.chdir(dname)
# sets working directory based on path to index.py
setcwd()
# loads local python modules, relative to index.py
sys.path.append(os.path.realpath('py'))
from logx import Viewer,Editor,debug_trace
'''
from PyQt5 import uic
from PyQt5.QtWidgets import QApplication
'''
from PyQt4 import uic
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import QObject,pyqtSlot
notesdir = "../notes"
pdfdir = "../papers"
startquery = "./xq/index.xq"
class PdfAdaptor(QObject):
@pyqtSlot(str)
def loadid(self, pdfid):
pdfid = str(pdfid)
pdfpath = pdfdir + os.sep + pdfid + '.pdf'
self.loadpdf(pdfpath)
@pyqtSlot(str)
def loadpdf(self, pdfpath):
pdfpath = str(pdfpath)
pdfpath = os.path.realpath(pdfpath)
subprocess.Popen(['xdg-open', pdfpath])
def path2url(path):
return urlparse.urljoin(
'file:', urllib.pathname2url(path))
def main(argv):
querypath = os.path.realpath(startquery)
sourcedir = os.path.realpath(notesdir)
sourcepaths = glob.glob(sourcedir + "/*.html")
# for PyQt4
sourceurls = ",".join([("file://" + path) for path in sourcepaths])
# for PyQt5
#sourceurls = ",".join([path2url(path) for path in sourcepaths])
xquerynames = [
['sourceurls', sourceurls,'http://cefn.com/logx']
]
javascriptnames = dict()
# create application context
app = QApplication(sys.argv)
ui = uic.loadUi('index.ui')
editor = Editor(focuspath=None,view=ui.editView,javascriptnames=javascriptnames,xquerynames=xquerynames)
viewer = Viewer(querypath=querypath,view=ui.navView,javascriptnames=javascriptnames,xquerynames=xquerynames)
pdf = PdfAdaptor()
javascriptnames['editor']=editor
javascriptnames['viewer']=viewer
javascriptnames['pdf']=pdf
# subscribe viewer to refresh whenever source files refresh
# implicitly bound through 'sourcepaths' xquery name
for sourcepath in sourcepaths:
viewer.registersource(sourcepath)
ui.show()
# edit a notes file, if specified
if len(argv) > 0:
editor.focuspath = os.path.realpath(argv[0])
# load the view
viewer.render()
sys.exit(app.exec_())
if __name__ == "__main__":
main(sys.argv[1:]) | mit |
bradh/samba | third_party/waf/wafadmin/Environment.py | 32 | 5044 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005 (ita)
"""Environment representation
There is one gotcha: getitem returns [] if the contents evals to False
This means env['foo'] = {}; print env['foo'] will print [] not {}
"""
import os, copy, re
import Logs, Options, Utils
from Constants import *
re_imp = re.compile('^(#)*?([^#=]*?)\ =\ (.*?)$', re.M)
class Environment(object):
"""A safe-to-use dictionary, but do not attach functions to it please (break cPickle)
An environment instance can be stored into a file and loaded easily
"""
__slots__ = ("table", "parent")
def __init__(self, filename=None):
self.table = {}
#self.parent = None
if filename:
self.load(filename)
def __contains__(self, key):
if key in self.table: return True
try: return self.parent.__contains__(key)
except AttributeError: return False # parent may not exist
def __str__(self):
keys = set()
cur = self
while cur:
keys.update(cur.table.keys())
cur = getattr(cur, 'parent', None)
keys = list(keys)
keys.sort()
return "\n".join(["%r %r" % (x, self.__getitem__(x)) for x in keys])
def __getitem__(self, key):
try:
while 1:
x = self.table.get(key, None)
if not x is None:
return x
self = self.parent
except AttributeError:
return []
def __setitem__(self, key, value):
self.table[key] = value
def __delitem__(self, key):
del self.table[key]
def pop(self, key, *args):
if len(args):
return self.table.pop(key, *args)
return self.table.pop(key)
def set_variant(self, name):
self.table[VARIANT] = name
def variant(self):
try:
while 1:
x = self.table.get(VARIANT, None)
if not x is None:
return x
self = self.parent
except AttributeError:
return DEFAULT
def copy(self):
# TODO waf 1.6 rename this method derive, #368
newenv = Environment()
newenv.parent = self
return newenv
def detach(self):
"""TODO try it
modifying the original env will not change the copy"""
tbl = self.get_merged_dict()
try:
delattr(self, 'parent')
except AttributeError:
pass
else:
keys = tbl.keys()
for x in keys:
tbl[x] = copy.deepcopy(tbl[x])
self.table = tbl
def get_flat(self, key):
s = self[key]
if isinstance(s, str): return s
return ' '.join(s)
def _get_list_value_for_modification(self, key):
"""Gets a value that must be a list for further modification. The
list may be modified inplace and there is no need to
"self.table[var] = value" afterwards.
"""
try:
value = self.table[key]
except KeyError:
try: value = self.parent[key]
except AttributeError: value = []
if isinstance(value, list):
value = value[:]
else:
value = [value]
else:
if not isinstance(value, list):
value = [value]
self.table[key] = value
return value
def append_value(self, var, value):
current_value = self._get_list_value_for_modification(var)
if isinstance(value, list):
current_value.extend(value)
else:
current_value.append(value)
def prepend_value(self, var, value):
current_value = self._get_list_value_for_modification(var)
if isinstance(value, list):
current_value = value + current_value
# a new list: update the dictionary entry
self.table[var] = current_value
else:
current_value.insert(0, value)
# prepend unique would be ambiguous
def append_unique(self, var, value):
current_value = self._get_list_value_for_modification(var)
if isinstance(value, list):
for value_item in value:
if value_item not in current_value:
current_value.append(value_item)
else:
if value not in current_value:
current_value.append(value)
def get_merged_dict(self):
"""compute a merged table"""
table_list = []
env = self
while 1:
table_list.insert(0, env.table)
try: env = env.parent
except AttributeError: break
merged_table = {}
for table in table_list:
merged_table.update(table)
return merged_table
def store(self, filename):
"Write the variables into a file"
file = open(filename, 'w')
merged_table = self.get_merged_dict()
keys = list(merged_table.keys())
keys.sort()
for k in keys: file.write('%s = %r\n' % (k, merged_table[k]))
file.close()
def load(self, filename):
"Retrieve the variables from a file"
tbl = self.table
code = Utils.readf(filename)
for m in re_imp.finditer(code):
g = m.group
tbl[g(2)] = eval(g(3))
Logs.debug('env: %s', self.table)
def get_destdir(self):
"return the destdir, useful for installing"
if self.__getitem__('NOINSTALL'): return ''
return Options.options.destdir
def update(self, d):
for k, v in d.iteritems():
self[k] = v
def __getattr__(self, name):
if name in self.__slots__:
return object.__getattr__(self, name)
else:
return self[name]
def __setattr__(self, name, value):
if name in self.__slots__:
object.__setattr__(self, name, value)
else:
self[name] = value
def __delattr__(self, name):
if name in self.__slots__:
object.__delattr__(self, name)
else:
del self[name]
| gpl-3.0 |
jason-neal/spectrum_overload | spectrum_overload/differential.py | 1 | 1536 | # -*- coding: utf-8 -*-
"""Differential Class which takes the difference between two spectra."""
from typing import Any, Dict, Optional
from spectrum_overload.spectrum import Spectrum
# TODO: Add in s-profile from
# Ferluga 1997: Separating the spectra of binary stars-I. A simple method: Secondary reconstruction
class DifferentialSpectrum(object):
"""A differential spectrum."""
def __init__(self, Spectrum1: Spectrum, Spectrum2: Spectrum) -> None:
"""Initialise class with both spectra."""
if not (Spectrum1.calibrated and Spectrum2.calibrated):
raise ValueError("Input spectra are not calibrated.")
self.spec1 = Spectrum1
self.spec2 = Spectrum2
self.params = None # type: Optional[Dict[str, Any]]
def barycentric_correct(self):
"""Barycentric correct each spectra."""
pass
def rest_frame(self, frame):
"""Change rest frame to one of the spectra."""
pass
def diff(self):
"""Calculate difference between the two spectra."""
# TODO: Access interpolations
return self.spec1 - self.spec2
def sort(self, method: str = "time"):
"""Sort spectra in specific order. e.g. time, reversed."""
pass
def swap(self):
"""Swap order of the two spectra."""
self.spec1, self.spec2 = self.spec2, self.spec1
def add_orbital_params(self, params: Dict[str, Any]):
"""A dictionary of orbital parameters to use for shifting frames."""
self.params = params
| mit |
domzsalvador/Final-project | appengine_config.py | 36 | 3078 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Edit the code below to add you own hooks and modify tailbone's behavior
## Base Tailbone overrides and hooks
## Set the global default namespace
# def namespace_manager_default_namespace_for_request():
# return "my_custom_namespace"
## Use JSONP for all apis
# tailbone_JSONP = False
# Use CORS for all apis
tailbone_CORS = True
tailbone_CORS_RESTRICTED_DOMAINS = ["http://localhost"]
## modify the below functions to change how users are identified
# tailbone_is_current_user_admin =
# tailbone_get_current_user =
# tailbone_create_login_url =
# tailbone_create_logout_url =
## Use cloud store instead of blobstore
# tailboneFiles_CLOUDSTORE = False
## Store counts for restful models accessible in HEAD query
# tailboneRestful_METADATA = False
## If specified is a list of tailbone.restful.ScopedModel objects these will be the only ones allowed.
## This is a next level step of model restriction to your db, this replaces validation.json
# from google.appengine.ext import ndb
# from tailbone.restful import ScopedModel
# class MyModel(ScopedModel):
# stuff = ndb.IntegerProperty()
# tailboneRestful_DEFINED_MODELS = {"mymodel": MyModel}
# tailboneRestful_RESTRICT_TO_DEFINED_MODELS = False
## Protected model names gets overridden by RESTRICTED_MODELS
# tailboneRestful_PROTECTED_MODEL_NAMES = ["(?i)tailbone.*", "custom", "(?i)users"]
## Proxy can only be used for the restricted domains if specified
# tailboneProxy_RESTRICTED_DOMAINS = ["google.com"]
## Cloud store bucket to use default is your application id
# tailboneCloudstore_BUCKET = "mybucketname"
# tailboneTurn_RESTIRCTED_DOMAINS = ["localhost"]
# tailboneTurn_SECRET = "notasecret"
# tailboneMesh_ENABLE_TURN = True
# tailboneMesh_ENABLE_WEBSOCKET = True
## Seconds until room expires
# tailboneMesh_ROOM_EXPIRATION = 86400
## Protected site
# tailboneStaticProtected_PASSWORD = "mypassword"
## the base path for the protected site can change to deploy or something else defaults to app
# tailboneStaticProtected_BASE_PATH = "app"
## Custom load balanced compute engine instance
# tailboneCustomCE_STARTUP_SCRIPT = """
# apt-get install build-essential
# curl -O http://nodejs.org/dist/v0.10.15/node-v0.10.15.tar.gz
# tar xvfz node-v0.10.15.tar.gz
# cd node-v0.10.15
# ./configure
# make
# make install
# cd ..
# rm -rf node-v0.10.15
# rm -f node-v0.10.15.tar.gz
# cat >server.js <<EOL
# %s
# EOL
# npm install ws
# node server.js
# """ % (open("client/mywebsocketserver.js").read(),) | apache-2.0 |
guorendong/iridium-browser-ubuntu | tools/swarming_client/third_party/requests/packages/urllib3/packages/six.py | 2375 | 11628 | """Utilities for writing code that runs on Python 2 and 3"""
#Copyright (c) 2010-2011 Benjamin Peterson
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.2.0" # Revision 41c74fef2ded
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
| bsd-3-clause |
sidrakesh93/grpc-tools | benchmarking/performance_db/performance_db_frontend/app/views.py | 1 | 3289 | #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""View for the front-end."""
from django import shortcuts
from user_data import UserData
user_data = UserData()
def display_performance_database(request):
"""View for performance database table page."""
data = user_data.get_all_users_data()
return shortcuts.render(request, 'data_table.html', {'all_users_data': data})
def display_configs(request):
"""View for config page."""
return shortcuts.render(request, 'configs.html', {})
def display_general_statistic(request, metric):
"""View for general statistic page."""
return general_statistic_renderer(request, metric)
def general_statistic_renderer(request, metric):
"""General statistic page renderer."""
data = user_data.get_all_users_single_metric_data(metric)
return shortcuts.render(
request, 'general_plots.html',
{'metric': get_metric_full_desc(metric),
'all_users_data': data})
def display_user_metrics(request, username):
"""View for user metrics page."""
complete_data = user_data.get_single_user_data(username)
return shortcuts.render(
request, 'user_plots.html',
{'username': complete_data[0],
'user_data': complete_data[1]})
def get_metric_full_desc(metric):
"""Returns full metric name."""
metric_name = {
'qps': 'Queries Per Second',
'qpspercore': 'QPS Per Core',
'perc50': '50th Percentile Latency',
'perc90': '90th Percentile Latency',
'perc95': '95th Percentile Latency',
'perc99': '99th Percentile Latency',
'perc99point9': '99.9th Percentile Latency',
'serversystime': 'Server System Time',
'serverusertime': 'Server User Time',
'clientsystime': 'Client System Time',
'clientusertime': 'Client User Time'
}[metric]
return metric_name
| bsd-3-clause |
DSLituiev/scikit-learn | sklearn/cluster/k_means_.py | 19 | 55794 | """K-means clustering"""
# Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Thomas Rueckstiess <ruecksti@in.tum.de>
# James Bergstra <james.bergstra@umontreal.ca>
# Jan Schlueter <scikit-learn@jan-schlueter.de>
# Nelle Varoquaux
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..utils.extmath import row_norms, squared_norm
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.fixes import astype
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.random import choice
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.six import string_types
from . import _k_means
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X: array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters: integer
The number of seeds to choose
x_squared_norms: array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state: numpy.RandomState
The generator used to initialize the centers.
n_local_trials: integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features))
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(closest_dist_sq.cumsum(), rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _validate_center_shape(X, n_centers, centers):
"""Check if centers is compatible with X and n_centers"""
if len(centers) != n_centers:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, n_centers))
if centers.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of the initial centers %s "
"does not match the number of features of the data %s."
% (centers.shape[1], X.shape[1]))
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter: int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % max_iter)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X) or hasattr(init, '__array__'):
X_mean = X.mean(axis=0)
if not sp.issparse(X):
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init = check_array(init, dtype=np.float64, copy=True)
_validate_center_shape(X, n_clusters, init)
init -= X_mean
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = _kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_kmeans_single)(X, n_clusters, max_iter=max_iter,
init=init, verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single(X, n_clusters, x_squared_norms, max_iter=300,
init='k-means++', verbose=False, random_state=None,
tol=1e-4, precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X: array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters: int
The number of clusters to form as well as the number of
centroids to generate.
max_iter: int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init: {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol: float, optional
The relative increment in the results before declaring convergence.
verbose: boolean, optional
Verbosity mode
x_squared_norms: array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid: float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label: integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia: float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=np.float64)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
shift = squared_norm(centers_old - centers)
if shift <= tol:
if verbose:
print("Converged at iteration %d" % i)
break
if shift > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
k = centers.shape[0]
all_distances = euclidean_distances(centers, X, x_squared_norms,
squared=True)
labels = np.empty(n_samples, dtype=np.int32)
labels.fill(-1)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(k):
dist = all_distances[center_id]
labels[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms: array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers: float64 array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances: float64 array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels: int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=np.float64)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X: array, shape (n_samples, n_features)
k: int
number of centroids
init: {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms: array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers: array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if isinstance(init, string_types) and init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif isinstance(init, string_types) and init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
centers = init
elif callable(init):
centers = init(X, k, random_state=random_state)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
_validate_center_shape(X, k, centers)
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
See also
--------
MiniBatchKMeans:
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster to than the default batch implementation.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300,
tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True, n_jobs=1):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES,
warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float64, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = choice(X.shape[0], replace=False, size=n_reassigns,
random_state=random_state)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X,
astype(new_centers, np.intp),
astype(np.where(to_reassign)[0], np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
centers[center_idx] /= counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulate the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C', dtype=np.float64)
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, np.double)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, np.double)
distances = np.zeros(self.batch_size, dtype=np.float64)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.random_integers(
0, n_samples - 1, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=np.float64)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, np.double), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
| bsd-3-clause |
dieterv/gnome-python-desktop | examples/gnomeprint/example_04.py | 2 | 2504 | #! /usr/bin/env python
#
# * example_04.c: sample gnome-print code
# *
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU Library General Public License
# * as published by the Free Software Foundation; either version 2 of
# * the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU Library General Public License for more details.
# *
# * You should have received a copy of the GNU Library General Public
# * License along with this program; if not, write to the Free Software
# * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# *
# * Authors:
# * Chema Celorio <chema@ximian.com>
# Python conversion:
# Gustavo J. A. M. Carneiro <gustavo@users.sf.net>
# *
# * Copyright (C) 2002 Ximian Inc. and authors
# *
# */
#/*
# * See README
# */
import pygtk; pygtk.require("2.0")
import gnomeprint
def my_draw(gpc):
# Make some UTF-8 strings
acented = "".join(map(chr, (0xC3, 0xA0, 0xC3, 0xA8, 0xC3, 0xAC,
0xC3, 0xB2, 0xC3, 0xB9, 0x20, 0xC3,
0xB1, 0xC3, 0x91, 0x20, 0xC3, 0xBB,
0xC3, 0xB4, 0x20)))
cyrillic = "".join(map(chr, (0xD0, 0xA1, 0xD0, 0xBE, 0xD0, 0xBC, 0xD0, 0xB5,
0x20, 0xD1, 0x80, 0xD0, 0xB0, 0xD0, 0xBD,
0xD0, 0xB4, 0xD0, 0xBE, 0xD0, 0xBC, 0x20, 0xD1,
0x86, 0xD1, 0x8B, 0xD1, 0x80, 0xD1, 0x83,
0xD0, 0xBB, 0xD0, 0xBB, 0xD0, 0xB8, 0xD1, 0x86,
0x20, 0xD1, 0x87, 0xD0, 0xB0, 0xD1, 0x80,
0xD1, 0x81)))
# Get this font from:
# http://bibliofile.mc.duke.edu/gww/fonts/Unicode.html
# I used the TTF Caslon Roman.
font = gnomeprint.font_find_closest("Caslon Roman", 12)
font_name = font.get_name()
print "Found:", font_name
if font_name != "Caslon Roman":
print "You might not see cyrillic characters because Caslon Roman was not found.\n"
gpc.beginpage("1")
gpc.setfont(font)
gpc.moveto(100, 700)
gpc.show("Some acented characters:")
gpc.moveto(100, 680)
gpc.show(acented)
gpc.moveto(100, 650)
gpc.show("Some cyrillic:")
gpc.moveto(100, 630)
gpc.show(cyrillic)
gpc.showpage()
def my_print():
job = gnomeprint.Job(gnomeprint.config_default())
gpc = job.get_context()
my_draw(gpc)
job.close()
job.print_()
my_print()
print "Done..."
| lgpl-2.1 |
abtink/openthread | tools/harness-automation/cases/router_9_2_8.py | 18 | 1877 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Router_9_2_8(HarnessCase):
role = HarnessCase.ROLE_ROUTER
case = '9 2 8'
golden_devices_required = 2
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
mbauskar/omnitech-demo-erpnext | erpnext/setup/doctype/item_group/test_item_group.py | 90 | 6962 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils.nestedset import NestedSetRecursionError, NestedSetMultipleRootsError, \
NestedSetChildExistsError, NestedSetInvalidMergeError, rebuild_tree, get_ancestors_of
test_records = frappe.get_test_records('Item Group')
class TestItem(unittest.TestCase):
def test_basic_tree(self, records=None):
min_lft = 1
max_rgt = frappe.db.sql("select max(rgt) from `tabItem Group`")[0][0]
if not records:
records = test_records[2:]
for item_group in records:
lft, rgt, parent_item_group = frappe.db.get_value("Item Group", item_group["item_group_name"],
["lft", "rgt", "parent_item_group"])
if parent_item_group:
parent_lft, parent_rgt = frappe.db.get_value("Item Group", parent_item_group,
["lft", "rgt"])
else:
# root
parent_lft = min_lft - 1
parent_rgt = max_rgt + 1
self.assertTrue(lft)
self.assertTrue(rgt)
self.assertTrue(lft < rgt)
self.assertTrue(parent_lft < parent_rgt)
self.assertTrue(lft > parent_lft)
self.assertTrue(rgt < parent_rgt)
self.assertTrue(lft >= min_lft)
self.assertTrue(rgt <= max_rgt)
no_of_children = self.get_no_of_children(item_group["item_group_name"])
self.assertTrue(rgt == (lft + 1 + (2 * no_of_children)))
no_of_children = self.get_no_of_children(parent_item_group)
self.assertTrue(parent_rgt == (parent_lft + 1 + (2 * no_of_children)))
def get_no_of_children(self, item_group):
def get_no_of_children(item_groups, no_of_children):
children = []
for ig in item_groups:
children += frappe.db.sql_list("""select name from `tabItem Group`
where ifnull(parent_item_group, '')=%s""", ig or '')
if len(children):
return get_no_of_children(children, no_of_children + len(children))
else:
return no_of_children
return get_no_of_children([item_group], 0)
def test_recursion(self):
group_b = frappe.get_doc("Item Group", "_Test Item Group B")
group_b.parent_item_group = "_Test Item Group B - 3"
self.assertRaises(NestedSetRecursionError, group_b.save)
# cleanup
group_b.parent_item_group = "All Item Groups"
group_b.save()
def test_rebuild_tree(self):
rebuild_tree("Item Group", "parent_item_group")
self.test_basic_tree()
def move_it_back(self):
group_b = frappe.get_doc("Item Group", "_Test Item Group B")
group_b.parent_item_group = "All Item Groups"
group_b.save()
self.test_basic_tree()
def test_move_group_into_another(self):
# before move
old_lft, old_rgt = frappe.db.get_value("Item Group", "_Test Item Group C", ["lft", "rgt"])
# put B under C
group_b = frappe.get_doc("Item Group", "_Test Item Group B")
lft, rgt = group_b.lft, group_b.rgt
group_b.parent_item_group = "_Test Item Group C"
group_b.save()
self.test_basic_tree()
# after move
new_lft, new_rgt = frappe.db.get_value("Item Group", "_Test Item Group C", ["lft", "rgt"])
# lft should reduce
self.assertEquals(old_lft - new_lft, rgt - lft + 1)
# adjacent siblings, hence rgt diff will be 0
self.assertEquals(new_rgt - old_rgt, 0)
self.move_it_back()
def test_move_group_into_root(self):
group_b = frappe.get_doc("Item Group", "_Test Item Group B")
group_b.parent_item_group = ""
self.assertRaises(NestedSetMultipleRootsError, group_b.save)
# trick! works because it hasn't been rolled back :D
self.test_basic_tree()
self.move_it_back()
def print_tree(self):
import json
print json.dumps(frappe.db.sql("select name, lft, rgt from `tabItem Group` order by lft"), indent=1)
def test_move_leaf_into_another_group(self):
# before move
old_lft, old_rgt = frappe.db.get_value("Item Group", "_Test Item Group C", ["lft", "rgt"])
group_b_3 = frappe.get_doc("Item Group", "_Test Item Group B - 3")
lft, rgt = group_b_3.lft, group_b_3.rgt
# child of right sibling is moved into it
group_b_3.parent_item_group = "_Test Item Group C"
group_b_3.save()
self.test_basic_tree()
new_lft, new_rgt = frappe.db.get_value("Item Group", "_Test Item Group C", ["lft", "rgt"])
# lft should remain the same
self.assertEquals(old_lft - new_lft, 0)
# rgt should increase
self.assertEquals(new_rgt - old_rgt, rgt - lft + 1)
# move it back
group_b_3 = frappe.get_doc("Item Group", "_Test Item Group B - 3")
group_b_3.parent_item_group = "_Test Item Group B"
group_b_3.save()
self.test_basic_tree()
def test_delete_leaf(self):
# for checking later
parent_item_group = frappe.db.get_value("Item Group", "_Test Item Group B - 3", "parent_item_group")
rgt = frappe.db.get_value("Item Group", parent_item_group, "rgt")
ancestors = get_ancestors_of("Item Group", "_Test Item Group B - 3")
ancestors = frappe.db.sql("""select name, rgt from `tabItem Group`
where name in ({})""".format(", ".join(["%s"]*len(ancestors))), tuple(ancestors), as_dict=True)
frappe.delete_doc("Item Group", "_Test Item Group B - 3")
records_to_test = test_records[2:]
del records_to_test[4]
self.test_basic_tree(records=records_to_test)
# rgt of each ancestor would reduce by 2
for item_group in ancestors:
new_lft, new_rgt = frappe.db.get_value("Item Group", item_group.name, ["lft", "rgt"])
self.assertEquals(new_rgt, item_group.rgt - 2)
# insert it back
frappe.copy_doc(test_records[6]).insert()
self.test_basic_tree()
def test_delete_group(self):
# cannot delete group with child, but can delete leaf
self.assertRaises(NestedSetChildExistsError, frappe.delete_doc, "Item Group", "_Test Item Group B")
def test_merge_groups(self):
frappe.rename_doc("Item Group", "_Test Item Group B", "_Test Item Group C", merge=True)
records_to_test = test_records[2:]
del records_to_test[1]
self.test_basic_tree(records=records_to_test)
# insert Group B back
frappe.copy_doc(test_records[3]).insert()
self.test_basic_tree()
# move its children back
for name in frappe.db.sql_list("""select name from `tabItem Group`
where parent_item_group='_Test Item Group C'"""):
doc = frappe.get_doc("Item Group", name)
doc.parent_item_group = "_Test Item Group B"
doc.save()
self.test_basic_tree()
def test_merge_leaves(self):
frappe.rename_doc("Item Group", "_Test Item Group B - 2", "_Test Item Group B - 1", merge=True)
records_to_test = test_records[2:]
del records_to_test[3]
self.test_basic_tree(records=records_to_test)
# insert Group B - 2back
frappe.copy_doc(test_records[5]).insert()
self.test_basic_tree()
def test_merge_leaf_into_group(self):
self.assertRaises(NestedSetInvalidMergeError, frappe.rename_doc, "Item Group", "_Test Item Group B - 3",
"_Test Item Group B", merge=True)
def test_merge_group_into_leaf(self):
self.assertRaises(NestedSetInvalidMergeError, frappe.rename_doc, "Item Group", "_Test Item Group B",
"_Test Item Group B - 3", merge=True)
| agpl-3.0 |
great-expectations/great_expectations | great_expectations/expectations/core/expect_column_values_to_be_in_type_list.py | 1 | 17690 | import logging
from typing import Dict, Optional
import numpy as np
import pandas as pd
from great_expectations.core import ExpectationConfiguration
from great_expectations.exceptions import InvalidExpectationConfigurationError
from great_expectations.execution_engine import (
ExecutionEngine,
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.core.expect_column_values_to_be_of_type import (
_get_dialect_type_module,
_native_type_type_map,
)
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.registry import get_metric_kwargs
from great_expectations.expectations.util import render_evaluation_parameter_string
from great_expectations.render.renderer.renderer import renderer
from great_expectations.render.types import RenderedStringTemplateContent
from great_expectations.render.util import (
num_to_str,
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
from great_expectations.validator.validation_graph import MetricConfiguration
logger = logging.getLogger(__name__)
try:
import pyspark.sql.types as sparktypes
except ImportError as e:
logger.debug(str(e))
logger.debug(
"Unable to load spark context; install optional spark dependency for support."
)
class ExpectColumnValuesToBeInTypeList(ColumnMapExpectation):
"""
Expect a column to contain values from a specified type list.
expect_column_values_to_be_in_type_list is a \
:func:`column_map_expectation <great_expectations.execution_engine.execution_engine.MetaExecutionEngine
.column_map_expectation>` for typed-column backends,
and also for PandasDataset where the column dtype provides an unambiguous constraints (any dtype except
'object'). For PandasDataset columns with dtype of 'object' expect_column_values_to_be_of_type is a
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>` and will
independently check each row's type.
Args:
column (str): \
The column name.
type_list (str): \
A list of strings representing the data type that each column should have as entries. Valid types are
defined by the current backend implementation and are dynamically loaded. For example, valid types for
PandasDataset include any numpy dtype values (such as 'int64') or native python types (such as 'int'),
whereas valid types for a SqlAlchemyDataset include types named by the current driver such as 'INTEGER'
in most SQL dialects and 'TEXT' in dialects such as postgresql. Valid types for SparkDFDataset include
'StringType', 'BooleanType' and other pyspark-defined type names.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See also:
:func:`expect_column_values_to_be_of_type \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_of_type>`
"""
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "production",
"package": "great_expectations",
"tags": ["core expectation", "column map expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
}
map_metric = "column_values.in_type_list"
success_keys = (
"type_list",
"mostly",
)
default_kwarg_values = {
"type_list": None,
"mostly": 1,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
super().validate_configuration(configuration)
try:
assert "type_list" in configuration.kwargs, "type_list is required"
assert (
isinstance(configuration.kwargs["type_list"], (list, dict))
or configuration.kwargs["type_list"] is None
), "type_list must be a list or None"
if isinstance(configuration.kwargs["type_list"], dict):
assert (
"$PARAMETER" in configuration.kwargs["type_list"]
), 'Evaluation Parameter dict for type_list kwarg must have "$PARAMETER" key.'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "type_list", "mostly", "row_condition", "condition_parser"],
)
if params["type_list"] is not None:
for i, v in enumerate(params["type_list"]):
params["v__" + str(i)] = v
values_string = " ".join(
["$v__" + str(i) for i, v in enumerate(params["type_list"])]
)
if params["mostly"] is not None:
params["mostly_pct"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
if include_column_name:
template_str = (
"$column value types must belong to this set: "
+ values_string
+ ", at least $mostly_pct % of the time."
)
else:
template_str = (
"value types must belong to this set: "
+ values_string
+ ", at least $mostly_pct % of the time."
)
else:
if include_column_name:
template_str = (
"$column value types must belong to this set: "
+ values_string
+ "."
)
else:
template_str = (
"value types must belong to this set: " + values_string + "."
)
else:
if include_column_name:
template_str = "$column value types may be any value, but observed value will be reported"
else:
template_str = (
"value types may be any value, but observed value will be reported"
)
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = conditional_template_str + ", then " + template_str
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
def _validate_pandas(
self,
actual_column_type,
expected_types_list,
):
if expected_types_list is None:
success = True
else:
comp_types = []
for type_ in expected_types_list:
try:
comp_types.append(np.dtype(type_).type)
comp_types.append(np.dtype(type_))
except TypeError:
try:
pd_type = getattr(pd, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
try:
pd_type = getattr(pd.core.dtypes.dtypes, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
native_type = _native_type_type_map(type_)
if native_type is not None:
comp_types.extend(native_type)
success = actual_column_type in comp_types
return {
"success": success,
"result": {"observed_value": actual_column_type.type.__name__},
}
def _validate_sqlalchemy(
self, actual_column_type, expected_types_list, execution_engine
):
# Our goal is to be as explicit as possible. We will match the dialect
# if that is possible. If there is no dialect available, we *will*
# match against a top-level SqlAlchemy type.
#
# This is intended to be a conservative approach.
#
# In particular, we *exclude* types that would be valid under an ORM
# such as "float" for postgresql with this approach
if expected_types_list is None:
success = True
else:
types = []
type_module = _get_dialect_type_module(execution_engine=execution_engine)
for type_ in expected_types_list:
try:
type_class = getattr(type_module, type_)
types.append(type_class)
except AttributeError:
logger.debug("Unrecognized type: %s" % type_)
if len(types) == 0:
logger.warning(
"No recognized sqlalchemy types in type_list for current dialect."
)
types = tuple(types)
success = isinstance(actual_column_type, types)
return {
"success": success,
"result": {"observed_value": type(actual_column_type).__name__},
}
def _validate_spark(
self,
actual_column_type,
expected_types_list,
):
if expected_types_list is None:
success = True
else:
types = []
for type_ in expected_types_list:
try:
type_class = getattr(sparktypes, type_)
types.append(type_class)
except AttributeError:
logger.debug("Unrecognized type: %s" % type_)
if len(types) == 0:
raise ValueError("No recognized spark types in expected_types_list")
types = tuple(types)
success = isinstance(actual_column_type, types)
return {
"success": success,
"result": {"observed_value": type(actual_column_type).__name__},
}
def get_validation_dependencies(
self,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
# This calls TableExpectation.get_validation_dependencies to set baseline dependencies for the aggregate version
# of the expectation.
# We need to keep this as super(ColumnMapExpectation, self), which calls
# TableExpectation.get_validation_dependencies instead of ColumnMapExpectation.get_validation_dependencies.
# This is because the map version of this expectation is only supported for Pandas, so we want the aggregate
# version for the other backends.
dependencies = super(ColumnMapExpectation, self).get_validation_dependencies(
configuration, execution_engine, runtime_configuration
)
# Only PandasExecutionEngine supports the column map version of the expectation.
if isinstance(execution_engine, PandasExecutionEngine):
column_name = configuration.kwargs.get("column")
expected_types_list = configuration.kwargs.get("type_list")
metric_kwargs = get_metric_kwargs(
configuration=configuration,
metric_name="table.column_types",
runtime_configuration=runtime_configuration,
)
metric_domain_kwargs = metric_kwargs.get("metric_domain_kwargs")
metric_value_kwargs = metric_kwargs.get("metric_value_kwargs")
table_column_types_configuration = MetricConfiguration(
"table.column_types",
metric_domain_kwargs=metric_domain_kwargs,
metric_value_kwargs=metric_value_kwargs,
)
actual_column_types_list = execution_engine.resolve_metrics(
[table_column_types_configuration]
)[table_column_types_configuration.id]
actual_column_type = [
type_dict["type"]
for type_dict in actual_column_types_list
if type_dict["name"] == column_name
][0]
# only use column map version if column dtype is object
if (
actual_column_type.type.__name__ == "object_"
and expected_types_list is not None
):
# this resets dependencies using ColumnMapExpectation.get_validation_dependencies
dependencies = super().get_validation_dependencies(
configuration, execution_engine, runtime_configuration
)
# this adds table.column_types dependency for both aggregate and map versions of expectation
column_types_metric_kwargs = get_metric_kwargs(
metric_name="table.column_types",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
dependencies["metrics"]["table.column_types"] = MetricConfiguration(
metric_name="table.column_types",
metric_domain_kwargs=column_types_metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=column_types_metric_kwargs["metric_value_kwargs"],
)
return dependencies
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
column_name = configuration.kwargs.get("column")
expected_types_list = configuration.kwargs.get("type_list")
actual_column_types_list = metrics.get("table.column_types")
actual_column_type = [
type_dict["type"]
for type_dict in actual_column_types_list
if type_dict["name"] == column_name
][0]
if isinstance(execution_engine, PandasExecutionEngine):
# only PandasExecutionEngine supports map version of expectation and
# only when column type is object
if (
actual_column_type.type.__name__ == "object_"
and expected_types_list is not None
):
# this calls ColumnMapMetric._validate
return super()._validate(
configuration, metrics, runtime_configuration, execution_engine
)
return self._validate_pandas(
actual_column_type=actual_column_type,
expected_types_list=expected_types_list,
)
elif isinstance(execution_engine, SqlAlchemyExecutionEngine):
return self._validate_sqlalchemy(
actual_column_type=actual_column_type,
expected_types_list=expected_types_list,
execution_engine=execution_engine,
)
elif isinstance(execution_engine, SparkDFExecutionEngine):
return self._validate_spark(
actual_column_type=actual_column_type,
expected_types_list=expected_types_list,
)
| apache-2.0 |
GitHublong/hue | desktop/core/ext-py/boto-2.38.0/boto/services/bs.py | 153 | 8144 | #!/usr/bin/env python
# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from optparse import OptionParser
from boto.services.servicedef import ServiceDef
from boto.services.submit import Submitter
from boto.services.result import ResultProcessor
import boto
import sys, os
from boto.compat import StringIO
class BS(object):
Usage = "usage: %prog [options] config_file command"
Commands = {'reset' : 'Clear input queue and output bucket',
'submit' : 'Submit local files to the service',
'start' : 'Start the service',
'status' : 'Report on the status of the service buckets and queues',
'retrieve' : 'Retrieve output generated by a batch',
'batches' : 'List all batches stored in current output_domain'}
def __init__(self):
self.service_name = None
self.parser = OptionParser(usage=self.Usage)
self.parser.add_option("--help-commands", action="store_true", dest="help_commands",
help="provides help on the available commands")
self.parser.add_option("-a", "--access-key", action="store", type="string",
help="your AWS Access Key")
self.parser.add_option("-s", "--secret-key", action="store", type="string",
help="your AWS Secret Access Key")
self.parser.add_option("-p", "--path", action="store", type="string", dest="path",
help="the path to local directory for submit and retrieve")
self.parser.add_option("-k", "--keypair", action="store", type="string", dest="keypair",
help="the SSH keypair used with launched instance(s)")
self.parser.add_option("-l", "--leave", action="store_true", dest="leave",
help="leave the files (don't retrieve) files during retrieve command")
self.parser.set_defaults(leave=False)
self.parser.add_option("-n", "--num-instances", action="store", type="string", dest="num_instances",
help="the number of launched instance(s)")
self.parser.set_defaults(num_instances=1)
self.parser.add_option("-i", "--ignore-dirs", action="append", type="string", dest="ignore",
help="directories that should be ignored by submit command")
self.parser.add_option("-b", "--batch-id", action="store", type="string", dest="batch",
help="batch identifier required by the retrieve command")
def print_command_help(self):
print('\nCommands:')
for key in self.Commands.keys():
print(' %s\t\t%s' % (key, self.Commands[key]))
def do_reset(self):
iq = self.sd.get_obj('input_queue')
if iq:
print('clearing out input queue')
i = 0
m = iq.read()
while m:
i += 1
iq.delete_message(m)
m = iq.read()
print('deleted %d messages' % i)
ob = self.sd.get_obj('output_bucket')
ib = self.sd.get_obj('input_bucket')
if ob:
if ib and ob.name == ib.name:
return
print('delete generated files in output bucket')
i = 0
for k in ob:
i += 1
k.delete()
print('deleted %d keys' % i)
def do_submit(self):
if not self.options.path:
self.parser.error('No path provided')
if not os.path.exists(self.options.path):
self.parser.error('Invalid path (%s)' % self.options.path)
s = Submitter(self.sd)
t = s.submit_path(self.options.path, None, self.options.ignore, None,
None, True, self.options.path)
print('A total of %d files were submitted' % t[1])
print('Batch Identifier: %s' % t[0])
def do_start(self):
ami_id = self.sd.get('ami_id')
instance_type = self.sd.get('instance_type', 'm1.small')
security_group = self.sd.get('security_group', 'default')
if not ami_id:
self.parser.error('ami_id option is required when starting the service')
ec2 = boto.connect_ec2()
if not self.sd.has_section('Credentials'):
self.sd.add_section('Credentials')
self.sd.set('Credentials', 'aws_access_key_id', ec2.aws_access_key_id)
self.sd.set('Credentials', 'aws_secret_access_key', ec2.aws_secret_access_key)
s = StringIO()
self.sd.write(s)
rs = ec2.get_all_images([ami_id])
img = rs[0]
r = img.run(user_data=s.getvalue(), key_name=self.options.keypair,
max_count=self.options.num_instances,
instance_type=instance_type,
security_groups=[security_group])
print('Starting AMI: %s' % ami_id)
print('Reservation %s contains the following instances:' % r.id)
for i in r.instances:
print('\t%s' % i.id)
def do_status(self):
iq = self.sd.get_obj('input_queue')
if iq:
print('The input_queue (%s) contains approximately %s messages' % (iq.id, iq.count()))
ob = self.sd.get_obj('output_bucket')
ib = self.sd.get_obj('input_bucket')
if ob:
if ib and ob.name == ib.name:
return
total = 0
for k in ob:
total += 1
print('The output_bucket (%s) contains %d keys' % (ob.name, total))
def do_retrieve(self):
if not self.options.path:
self.parser.error('No path provided')
if not os.path.exists(self.options.path):
self.parser.error('Invalid path (%s)' % self.options.path)
if not self.options.batch:
self.parser.error('batch identifier is required for retrieve command')
s = ResultProcessor(self.options.batch, self.sd)
s.get_results(self.options.path, get_file=(not self.options.leave))
def do_batches(self):
d = self.sd.get_obj('output_domain')
if d:
print('Available Batches:')
rs = d.query("['type'='Batch']")
for item in rs:
print(' %s' % item.name)
else:
self.parser.error('No output_domain specified for service')
def main(self):
self.options, self.args = self.parser.parse_args()
if self.options.help_commands:
self.print_command_help()
sys.exit(0)
if len(self.args) != 2:
self.parser.error("config_file and command are required")
self.config_file = self.args[0]
self.sd = ServiceDef(self.config_file)
self.command = self.args[1]
if hasattr(self, 'do_%s' % self.command):
method = getattr(self, 'do_%s' % self.command)
method()
else:
self.parser.error('command (%s) not recognized' % self.command)
if __name__ == "__main__":
bs = BS()
bs.main()
| apache-2.0 |
tanayseven/Voix | flask/lib/python2.7/site-packages/sqlalchemy/ext/orderinglist.py | 6 | 12647 | # ext/orderinglist.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""A custom list that manages index/position information for its children.
:author: Jason Kirtland
``orderinglist`` is a helper for mutable ordered relationships. It will
intercept list operations performed on a relationship collection and
automatically synchronize changes in list position with an attribute on the
related objects. (See :ref:`advdatamapping_entitycollections` for more
information on the general pattern.)
Example: Two tables that store slides in a presentation. Each slide
has a number of bullet points, displayed in order by the 'position'
column on the bullets table. These bullets can be inserted and re-ordered
by your end users, and you need to update the 'position' column of all
affected rows when changes are made.
.. sourcecode:: python+sql
slides_table = Table('Slides', metadata,
Column('id', Integer, primary_key=True),
Column('name', String))
bullets_table = Table('Bullets', metadata,
Column('id', Integer, primary_key=True),
Column('slide_id', Integer, ForeignKey('Slides.id')),
Column('position', Integer),
Column('text', String))
class Slide(object):
pass
class Bullet(object):
pass
mapper(Slide, slides_table, properties={
'bullets': relationship(Bullet, order_by=[bullets_table.c.position])
})
mapper(Bullet, bullets_table)
The standard relationship mapping will produce a list-like attribute on each
Slide containing all related Bullets, but coping with changes in ordering is
totally your responsibility. If you insert a Bullet into that list, there is
no magic - it won't have a position attribute unless you assign it it one, and
you'll need to manually renumber all the subsequent Bullets in the list to
accommodate the insert.
An ``orderinglist`` can automate this and manage the 'position' attribute on
all related bullets for you.
.. sourcecode:: python+sql
mapper(Slide, slides_table, properties={
'bullets': relationship(Bullet,
collection_class=ordering_list('position'),
order_by=[bullets_table.c.position])
})
mapper(Bullet, bullets_table)
s = Slide()
s.bullets.append(Bullet())
s.bullets.append(Bullet())
s.bullets[1].position
>>> 1
s.bullets.insert(1, Bullet())
s.bullets[2].position
>>> 2
Use the ``ordering_list`` function to set up the ``collection_class`` on
relationships (as in the mapper example above). This implementation depends
on the list starting in the proper order, so be SURE to put an order_by on
your relationship.
.. warning::
``ordering_list`` only provides limited functionality when a primary
key column or unique column is the target of the sort. Since changing the
order of entries often means that two rows must trade values, this is not
possible when the value is constrained by a primary key or unique
constraint, since one of the rows would temporarily have to point to a
third available value so that the other row could take its old
value. ``ordering_list`` doesn't do any of this for you,
nor does SQLAlchemy itself.
``ordering_list`` takes the name of the related object's ordering attribute as
an argument. By default, the zero-based integer index of the object's
position in the ``ordering_list`` is synchronized with the ordering attribute:
index 0 will get position 0, index 1 position 1, etc. To start numbering at 1
or some other integer, provide ``count_from=1``.
Ordering values are not limited to incrementing integers. Almost any scheme
can implemented by supplying a custom ``ordering_func`` that maps a Python list
index to any value you require.
"""
from ..orm.collections import collection
from .. import util
__all__ = ['ordering_list']
def ordering_list(attr, count_from=None, **kw):
"""Prepares an OrderingList factory for use in mapper definitions.
Returns an object suitable for use as an argument to a Mapper
relationship's ``collection_class`` option. Arguments are:
attr
Name of the mapped attribute to use for storage and retrieval of
ordering information
count_from (optional)
Set up an integer-based ordering, starting at ``count_from``. For
example, ``ordering_list('pos', count_from=1)`` would create a 1-based
list in SQL, storing the value in the 'pos' column. Ignored if
``ordering_func`` is supplied.
Passes along any keyword arguments to ``OrderingList`` constructor.
"""
kw = _unsugar_count_from(count_from=count_from, **kw)
return lambda: OrderingList(attr, **kw)
# Ordering utility functions
def count_from_0(index, collection):
"""Numbering function: consecutive integers starting at 0."""
return index
def count_from_1(index, collection):
"""Numbering function: consecutive integers starting at 1."""
return index + 1
def count_from_n_factory(start):
"""Numbering function: consecutive integers starting at arbitrary start."""
def f(index, collection):
return index + start
try:
f.__name__ = 'count_from_%i' % start
except TypeError:
pass
return f
def _unsugar_count_from(**kw):
"""Builds counting functions from keyword arguments.
Keyword argument filter, prepares a simple ``ordering_func`` from a
``count_from`` argument, otherwise passes ``ordering_func`` on unchanged.
"""
count_from = kw.pop('count_from', None)
if kw.get('ordering_func', None) is None and count_from is not None:
if count_from == 0:
kw['ordering_func'] = count_from_0
elif count_from == 1:
kw['ordering_func'] = count_from_1
else:
kw['ordering_func'] = count_from_n_factory(count_from)
return kw
class OrderingList(list):
"""A custom list that manages position information for its children.
See the module and __init__ documentation for more details. The
``ordering_list`` factory function is used to configure ``OrderingList``
collections in ``mapper`` relationship definitions.
"""
def __init__(self, ordering_attr=None, ordering_func=None,
reorder_on_append=False):
"""A custom list that manages position information for its children.
``OrderingList`` is a ``collection_class`` list implementation that
syncs position in a Python list with a position attribute on the
mapped objects.
This implementation relies on the list starting in the proper order,
so be **sure** to put an ``order_by`` on your relationship.
:param ordering_attr:
Name of the attribute that stores the object's order in the
relationship.
:param ordering_func: Optional. A function that maps the position in
the Python list to a value to store in the
``ordering_attr``. Values returned are usually (but need not be!)
integers.
An ``ordering_func`` is called with two positional parameters: the
index of the element in the list, and the list itself.
If omitted, Python list indexes are used for the attribute values.
Two basic pre-built numbering functions are provided in this module:
``count_from_0`` and ``count_from_1``. For more exotic examples
like stepped numbering, alphabetical and Fibonacci numbering, see
the unit tests.
:param reorder_on_append:
Default False. When appending an object with an existing (non-None)
ordering value, that value will be left untouched unless
``reorder_on_append`` is true. This is an optimization to avoid a
variety of dangerous unexpected database writes.
SQLAlchemy will add instances to the list via append() when your
object loads. If for some reason the result set from the database
skips a step in the ordering (say, row '1' is missing but you get
'2', '3', and '4'), reorder_on_append=True would immediately
renumber the items to '1', '2', '3'. If you have multiple sessions
making changes, any of whom happen to load this collection even in
passing, all of the sessions would try to "clean up" the numbering
in their commits, possibly causing all but one to fail with a
concurrent modification error. Spooky action at a distance.
Recommend leaving this with the default of False, and just call
``reorder()`` if you're doing ``append()`` operations with
previously ordered instances or when doing some housekeeping after
manual sql operations.
"""
self.ordering_attr = ordering_attr
if ordering_func is None:
ordering_func = count_from_0
self.ordering_func = ordering_func
self.reorder_on_append = reorder_on_append
# More complex serialization schemes (multi column, e.g.) are possible by
# subclassing and reimplementing these two methods.
def _get_order_value(self, entity):
return getattr(entity, self.ordering_attr)
def _set_order_value(self, entity, value):
setattr(entity, self.ordering_attr, value)
def reorder(self):
"""Synchronize ordering for the entire collection.
Sweeps through the list and ensures that each object has accurate
ordering information set.
"""
for index, entity in enumerate(self):
self._order_entity(index, entity, True)
# As of 0.5, _reorder is no longer semi-private
_reorder = reorder
def _order_entity(self, index, entity, reorder=True):
have = self._get_order_value(entity)
# Don't disturb existing ordering if reorder is False
if have is not None and not reorder:
return
should_be = self.ordering_func(index, self)
if have != should_be:
self._set_order_value(entity, should_be)
def append(self, entity):
super(OrderingList, self).append(entity)
self._order_entity(len(self) - 1, entity, self.reorder_on_append)
def _raw_append(self, entity):
"""Append without any ordering behavior."""
super(OrderingList, self).append(entity)
_raw_append = collection.adds(1)(_raw_append)
def insert(self, index, entity):
super(OrderingList, self).insert(index, entity)
self._reorder()
def remove(self, entity):
super(OrderingList, self).remove(entity)
self._reorder()
def pop(self, index=-1):
entity = super(OrderingList, self).pop(index)
self._reorder()
return entity
def __setitem__(self, index, entity):
if isinstance(index, slice):
step = index.step or 1
start = index.start or 0
if start < 0:
start += len(self)
stop = index.stop or len(self)
if stop < 0:
stop += len(self)
for i in xrange(start, stop, step):
self.__setitem__(i, entity[i])
else:
self._order_entity(index, entity, True)
super(OrderingList, self).__setitem__(index, entity)
def __delitem__(self, index):
super(OrderingList, self).__delitem__(index)
self._reorder()
# Py2K
def __setslice__(self, start, end, values):
super(OrderingList, self).__setslice__(start, end, values)
self._reorder()
def __delslice__(self, start, end):
super(OrderingList, self).__delslice__(start, end)
self._reorder()
# end Py2K
def __reduce__(self):
return _reconstitute, (self.__class__, self.__dict__, list(self))
for func_name, func in locals().items():
if (util.callable(func) and func.func_name == func_name and
not func.__doc__ and hasattr(list, func_name)):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
def _reconstitute(cls, dict_, items):
""" Reconstitute an ``OrderingList``.
This is the adjoint to ``OrderingList.__reduce__()``. It is used for
unpickling ``OrderingList``\\s
"""
obj = cls.__new__(cls)
obj.__dict__.update(dict_)
list.extend(obj, items)
return obj
| gpl-3.0 |
burzillibus/RobHome | venv/lib/python2.7/site-packages/django/conf/locale/mk/formats.py | 504 | 1742 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y'
SHORT_DATETIME_FORMAT = 'j.m.Y H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| mit |
kdwink/intellij-community | python/helpers/py2only/docutils/parsers/rst/languages/cs.py | 57 | 4679 | # $Id: cs.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Marek Blaha <mb@dat.cz>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Czech-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'pozor': 'attention',
u'caution (translation required)': 'caution', # jak rozlisit caution a warning?
u'nebezpe\u010D\u00ED': 'danger',
u'chyba': 'error',
u'rada': 'hint',
u'd\u016Fle\u017Eit\u00E9': 'important',
u'pozn\u00E1mka': 'note',
u'tip (translation required)': 'tip',
u'varov\u00E1n\u00ED': 'warning',
u'admonition (translation required)': 'admonition',
u'sidebar (translation required)': 'sidebar',
u't\u00E9ma': 'topic',
u'line-block (translation required)': 'line-block',
u'parsed-literal (translation required)': 'parsed-literal',
u'odd\u00EDl': 'rubric',
u'moto': 'epigraph',
u'highlights (translation required)': 'highlights',
u'pull-quote (translation required)': 'pull-quote',
u'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
u'table (translation required)': 'table',
u'csv-table (translation required)': 'csv-table',
u'list-table (translation required)': 'list-table',
u'meta (translation required)': 'meta',
#'imagemap': 'imagemap',
u'image (translation required)': 'image', # obrazek
u'figure (translation required)': 'figure', # a tady?
u'include (translation required)': 'include',
u'raw (translation required)': 'raw',
u'replace (translation required)': 'replace',
u'unicode (translation required)': 'unicode',
u'datum': 'date',
u't\u0159\u00EDda': 'class',
u'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
u'obsah': 'contents',
u'sectnum (translation required)': 'sectnum',
u'section-numbering (translation required)': 'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
u'target-notes (translation required)': 'target-notes',
u'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Czech name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
u'abbreviation (translation required)': 'abbreviation',
u'ab (translation required)': 'abbreviation',
u'acronym (translation required)': 'acronym',
u'ac (translation required)': 'acronym',
u'index (translation required)': 'index',
u'i (translation required)': 'index',
u'subscript (translation required)': 'subscript',
u'sub (translation required)': 'subscript',
u'superscript (translation required)': 'superscript',
u'sup (translation required)': 'superscript',
u'title-reference (translation required)': 'title-reference',
u'title (translation required)': 'title-reference',
u't (translation required)': 'title-reference',
u'pep-reference (translation required)': 'pep-reference',
u'pep (translation required)': 'pep-reference',
u'rfc-reference (translation required)': 'rfc-reference',
u'rfc (translation required)': 'rfc-reference',
u'emphasis (translation required)': 'emphasis',
u'strong (translation required)': 'strong',
u'literal (translation required)': 'literal',
u'named-reference (translation required)': 'named-reference',
u'anonymous-reference (translation required)': 'anonymous-reference',
u'footnote-reference (translation required)': 'footnote-reference',
u'citation-reference (translation required)': 'citation-reference',
u'substitution-reference (translation required)': 'substitution-reference',
u'target (translation required)': 'target',
u'uri-reference (translation required)': 'uri-reference',
u'uri (translation required)': 'uri-reference',
u'url (translation required)': 'uri-reference',
u'raw (translation required)': 'raw',}
"""Mapping of Czech role names to canonical role names for interpreted text.
"""
| apache-2.0 |
bongtrop/peach | tutorial/neural-networks/linear-prediction.py | 6 | 3386 | ################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/linear-prediction.py
# Using neural networks to predict number sequences
################################################################################
# A neural network can be used to predict future values of a sequence of
# numbers. Wold's Decomposition Theorem stablishes that any sequence can be
# split in a regular and predictable part and an innovation process (which is
# discrete white noise, and thus impredictable). The goal of this tutorial is
# to show how to use the neural network implementation of Peach to do this.
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
import random
import peach as p
# First, we create the network, with only one layer with only one neuron in it.
# The neuron has many inputs and only one output. The activation function is the
# identity. This kind of neuron is usually known as ADALINE (Adaptive Linear
# Neuron, later Adaptive Linear Element). We use as learning algorithm the LMS
# algorithm.
N = 32
nn = p.FeedForward((N, 1), phi=p.Identity, lrule=p.LMS(0.05))
# The lists below will track the values of the sequence being predicted and of
# the error for plotting.
xlog = [ ]
ylog = [ ]
elog = [ ]
error = 1.
i = 0
x = zeros((N, 1), dtype=float) # Input is a column-vector.
while i < 2000 and error > 1.e-10:
# The sequence we will predict is the one generated by a cossinus. The next
# value of the function is the desired output of the neuron. The neuron will
# use past values to predict the unknown value. To spice things, we add some
# gaussian noise (actually, it might help the convergence).
d = cos(2.*pi/128. * i) + random.gauss(0., 0.01)
# Here, we activate the network to calculate the prediction.
y = nn(x)[0, 0] # Notice that we need to access the output
error = abs(d - y) # as a vector, since that's how the NN work.
nn.learn(x, d)
# We store the results to plot later.
xlog.append(d)
ylog.append(y)
elog.append(error)
# Here, we apply a delay in the sequence by shifting every value one
# position back. We are using N (=32) samples to make the prediction, but
# the code here makes no distinction and could be used with any number of
# coefficients in the prediction. The last value of the sequence is put in
# the [0] position of the vector.
x[1:] = x[:-1]
x[0] = d
i = i + 1
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``linear-prediction.png``.
try:
import pylab
pylab.subplot(211)
pylab.hold(True)
pylab.grid(True)
pylab.plot(array(xlog), 'b--')
pylab.plot(array(ylog), 'g')
pylab.plot(array(elog), 'r:')
pylab.legend([ "$x$", "$y$", "$error$" ])
pylab.subplot(212)
pylab.grid(True)
pylab.stem(arange(0, N), reshape(nn[0].weights, (N,)), "k-", "ko", "k-")
pylab.xlim([0, N-1])
pylab.savefig("linear-prediction.png")
except ImportError:
print "After %d iterations:" % (len(elog),)
print nn[0].weights | lgpl-2.1 |
SU-ECE-17-7/ibeis | ibeis/algo/hots/word_index.py | 1 | 9442 | # -*- coding: utf-8 -*-
"""
TODO: DEPRICATE OR REFACTOR INTO SMK
python -c "import doctest, ibeis; print(doctest.testmod(ibeis.algo.hots.word_index))"
python -m doctest -v ibeis/algo/hots/word_index.py
python -m doctest ibeis/algo/hots/word_index.py
"""
from __future__ import absolute_import, division, print_function
# Standard
import six
#from itertools import chain
# Science
import numpy as np
# UTool
import vtool
import utool
# VTool
import vtool.nearest_neighbors as nntool
(print, print_, printDBG, rrr_, profile) = utool.inject(__name__, '[entroid_index]')
NOCACHE_WORD = utool.get_argflag('--nocache-word')
# TODO:
class NeighborAssignment():
def __init__(asgn):
pass
def test_windex():
from ibeis.algo.hots.query_request import new_ibeis_query_request
import ibeis
daid_list = [7, 8, 9, 10, 11]
ibs = ibeis.opendb(db='testdb1')
qreq_ = new_ibeis_query_request(ibs, daid_list, daid_list)
windex = new_ibeis_windex(ibs, qreq_.get_internal_daids())
return windex, qreq_, ibs
def new_word_index(aid_list=[], vecs_list=[], flann_params={},
flann_cachedir=None, indexer_cfgstr='', hash_rowids=True,
use_cache=not NOCACHE_WORD, use_params_hash=True):
print('[windex] building WordIndex object')
_check_input(aid_list, vecs_list)
# Create indexes into the input aids
ax_list = np.arange(len(aid_list))
idx2_vec, idx2_ax, idx2_fx = invert_index(vecs_list, ax_list)
if hash_rowids:
# Fingerprint
aids_hashstr = utool.hashstr_arr(aid_list, '_AIDS')
cfgstr = aids_hashstr + indexer_cfgstr
else:
# Dont hash rowids when given enough info in indexer_cfgstr
cfgstr = indexer_cfgstr
# Build/Load the flann index
flann = nntool.flann_cache(idx2_vec, **{
'cache_dir': flann_cachedir,
'cfgstr': cfgstr,
'flann_params': flann_params,
'use_cache': use_cache,
'use_params_hash': use_params_hash})
ax2_aid = np.array(aid_list)
windex = WordIndex(ax2_aid, idx2_vec, idx2_ax, idx2_fx, flann)
return windex
def new_ibeis_windex(ibs, daid_list):
"""
IBEIS interface into word_index
>>> from ibeis.algo.hots.word_index import * # NOQA
>>> windex, qreq_, ibs = test_windex()
"""
daids_hashid = ibs.get_annot_hashid_visual_uuid(daid_list, 'D')
flann_cfgstr = ibs.cfg.query_cfg.flann_cfg.get_cfgstr()
feat_cfgstr = ibs.cfg.query_cfg._feat_cfg.get_cfgstr()
indexer_cfgstr = daids_hashid + flann_cfgstr + feat_cfgstr
try:
# Grab the keypoints names and image ids before query time
flann_params = ibs.cfg.query_cfg.flann_cfg.get_flann_params()
# Get annotation descriptors that will be searched
# FIXME; qreq_
vecs_list = ibs.get_annot_vecs(daid_list)
flann_cachedir = ibs.get_flann_cachedir()
windex = new_word_index(
daid_list, vecs_list, flann_params, flann_cachedir,
indexer_cfgstr, hash_rowids=False, use_params_hash=False)
return windex
except Exception as ex:
utool.printex(ex, True, msg_='cannot build inverted index', key_list=['ibs.get_infostr()'])
raise
def _check_input(aid_list, vecs_list):
assert len(aid_list) == len(vecs_list), 'invalid input'
assert len(aid_list) > 0, ('len(aid_list) == 0.'
'Cannot invert index without features!')
@six.add_metaclass(utool.ReloadingMetaclass)
class WordIndex(object):
"""
Abstract wrapper around flann
Example:
>>> from ibeis.algo.hots.word_index import * # NOQA
>>> windex, qreq_, ibs = test_windex()
"""
def __init__(windex, ax2_aid, idx2_vec, idx2_ax, idx2_fx, flann):
windex.ax2_aid = ax2_aid # (A x 1) Mapping to original annot ids
windex.idx2_vec = idx2_vec # (M x D) Descriptors to index
windex.idx2_ax = idx2_ax # (M x 1) Index into the aid_list
windex.idx2_fx = idx2_fx # (M x 1) Index into the annot's features
windex.flann = flann # Approximate search structure
def knn(windex, qfx2_vec, K, checks=1028):
"""
Args:
qfx2_vec (ndarray): (N x D) array of N, D-dimensional query vectors
K (int): number of approximate nearest words to find
Returns:
tuple of (qfx2_idx, qfx2_dist)
qfx2_idx (ndarray): (N x K) qfx2_idx[n][k] is the index of the kth
approximate nearest data vector w.r.t qfx2_vec[n]
qfx2_dist (ndarray): (N x K) qfx2_dist[n][k] is the distance to the kth
approximate nearest data vector w.r.t. qfx2_vec[n]
Example:
>>> from ibeis.algo.hots.word_index import * # NOQA
>>> windex, qreq_, ibs = test_windex()
>>> new_aid_list = [2, 3, 4]
>>> qfx2_vec = ibs.get_annot_vecs(1, config2_=qreq_.get_internal_query_config2())
>>> K = 2
>>> checks = 1028
>>> (qfx2_idx, qfx2_dist) = windex.knn(qfx2_vec, K, checks=checks)
"""
(qfx2_idx, qfx2_dist) = windex.flann.nn_index(qfx2_vec, K, checks=checks)
return (qfx2_idx, qfx2_dist)
def empty_words(K):
qfx2_idx = np.empty((0, K), dtype=np.int32)
qfx2_dist = np.empty((0, K), dtype=np.float64)
return (qfx2_idx, qfx2_dist)
def add_points(windex, new_aid_list, new_vecs_list):
"""
Example:
>>> from ibeis.algo.hots.word_index import * # NOQA
>>> windex, qreq_, ibs = test_windex()
>>> new_aid_list = [2, 3, 4]
>>> qfx2_vec = ibs.get_annot_vecs(1, config2_=qreq_.get_internal_query_config2())
>>> new_vecs_list = ibs.get_annot_vecs(new_aid_list, config2_=qreq_.get_internal_data_config2())
>>> K = 2
>>> checks = 1028
>>> (qfx2_idx1, qfx2_dist1) = windex.knn(qfx2_vec, K, checks=checks)
>>> windex.add_points(new_aid_list, new_vecs_list)
>>> (qfx2_idx2, qfx2_dist2) = windex.knn(qfx2_vec, K, checks=checks)
>>> assert qfx2_idx2.max() > qfx2_idx1.max()
"""
nAnnots = windex.num_indexed_annots()
nNew = len(new_aid_list)
new_ax_list = np.arange(nAnnots, nAnnots + nNew)
new_idx2_vec, new_idx2_ax, new_idx2_fx = \
invert_index(new_vecs_list, new_ax_list)
# Stack inverted information
_ax2_aid = np.hstack((windex.ax2_aid, new_aid_list))
_idx2_ax = np.hstack((windex.idx2_ax, new_idx2_ax))
_idx2_fx = np.hstack((windex.idx2_fx, new_idx2_fx))
_idx2_vec = np.vstack((windex.idx2_vec, new_idx2_vec))
windex.ax2_aid = _ax2_aid
windex.idx2_ax = _idx2_ax
windex.idx2_vec = _idx2_vec
windex.idx2_fx = _idx2_fx
#windex.idx2_kpts = None
#windex.idx2_oris = None
# Add new points to flann structure
windex.flann.add_points(new_idx2_vec)
def num_indexed_vecs(windex):
return len(windex.idx2_vec)
def num_indexed_annots(windex):
return len(windex.ax2_aid)
def get_nn_axs(windex, qfx2_nnidx):
#return windex.idx2_ax[qfx2_nnidx]
return windex.idx2_ax.take(qfx2_nnidx)
def get_nn_aids(windex, qfx2_nnidx):
"""
Args:
qfx2_nnidx (ndarray): (N x K) qfx2_idx[n][k] is the index of the kth
approximate nearest data vector
Returns:
ndarray: qfx2_aid - (N x K) qfx2_fx[n][k] is the annotation id index
of the kth approximate nearest data vector
"""
#qfx2_ax = windex.idx2_ax[qfx2_nnidx]
#qfx2_aid = windex.ax2_aid[qfx2_ax]
qfx2_ax = windex.idx2_ax.take(qfx2_nnidx)
qfx2_aid = windex.ax2_aid.take(qfx2_ax)
return qfx2_aid
def get_nn_featxs(windex, qfx2_nnidx):
"""
Args:
qfx2_nnidx (ndarray): (N x K) qfx2_idx[n][k] is the index of the kth
approximate nearest data vector
Returns:
ndarray: qfx2_fx - (N x K) qfx2_fx[n][k] is the feature index (w.r.t
the source annotation) of the kth approximate nearest data vector
"""
#return windex.idx2_fx[qfx2_nnidx]
return windex.idx2_fx.take(qfx2_nnidx)
def invert_index(vecs_list, ax_list):
"""
Aggregates descriptors of input annotations and returns inverted information
"""
if utool.NOT_QUIET:
print('[hsnbrx] stacking descriptors from %d annotations'
% len(ax_list))
try:
idx2_vec, idx2_ax, idx2_fx = nntool.invertible_stack(vecs_list, ax_list)
assert idx2_vec.shape[0] == idx2_ax.shape[0]
assert idx2_vec.shape[0] == idx2_fx.shape[0]
except MemoryError as ex:
utool.printex(ex, 'cannot build inverted index', '[!memerror]')
raise
if utool.NOT_QUIET:
print('stacked nVecs={nVecs} from nAnnots={nAnnots}'.format(
nVecs=len(idx2_vec), nAnnots=len(ax_list)))
return idx2_vec, idx2_ax, idx2_fx
def vlad(qfx2_vec, qfx2_cvec):
qfx2_rvec = qfx2_cvec - qfx2_vec
aggvlad = qfx2_rvec.sum(axis=0)
aggvlad_norm = vtool.l2normalize(aggvlad)
return aggvlad_norm
#if __name__ == '__main__':
# #python -m doctest -v ibeis/algo/hots/word_index.py
# import doctest
# doctest.testmod()
| apache-2.0 |
ChristopherHogan/pip | pip/_vendor/html5lib/treewalkers/genshistream.py | 1730 | 2278 | from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from . import _base
from ..constants import voidElements, namespaces
class TreeWalker(_base.TreeWalker):
def __iter__(self):
# Buffer the events so we can pass in the following one
previous = None
for event in self.tree:
if previous is not None:
for token in self.tokens(previous, event):
yield token
previous = event
# Don't forget the final event!
if previous is not None:
for token in self.tokens(previous, None):
yield token
def tokens(self, event, next):
kind, data, pos = event
if kind == START:
tag, attribs = data
name = tag.localname
namespace = tag.namespace
converted_attribs = {}
for k, v in attribs:
if isinstance(k, QName):
converted_attribs[(k.namespace, k.localname)] = v
else:
converted_attribs[(None, k)] = v
if namespace == namespaces["html"] and name in voidElements:
for token in self.emptyTag(namespace, name, converted_attribs,
not next or next[0] != END
or next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, converted_attribs)
elif kind == END:
name = data.localname
namespace = data.namespace
if name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
| mit |
faneshion/MatchZoo | matchzoo/engine/base_preprocessor.py | 1 | 4116 | """:class:`BasePreprocessor` define input and ouutput for processors."""
import abc
import functools
import typing
from pathlib import Path
import dill
import matchzoo as mz
def validate_context(func):
"""Validate context in the preprocessor."""
@functools.wraps(func)
def transform_wrapper(self, *args, **kwargs):
if not self.context:
raise ValueError('Please call `fit` before calling `transform`.')
return func(self, *args, **kwargs)
return transform_wrapper
class BasePreprocessor(metaclass=abc.ABCMeta):
"""
:class:`BasePreprocessor` to input handle data.
A preprocessor should be used in two steps. First, `fit`, then,
`transform`. `fit` collects information into `context`, which includes
everything the preprocessor needs to `transform` together with other
useful information for later use. `fit` will only change the
preprocessor's inner state but not the input data. In contrast,
`transform` returns a modified copy of the input data without changing
the preprocessor's inner state.
"""
DATA_FILENAME = 'preprocessor.dill'
def __init__(self):
"""Initialization."""
self._context = {}
@property
def context(self):
"""Return context."""
return self._context
@abc.abstractmethod
def fit(
self,
data_pack: 'mz.DataPack',
verbose: int = 1
) -> 'BasePreprocessor':
"""
Fit parameters on input data.
This method is an abstract base method, need to be
implemented in the child class.
This method is expected to return itself as a callable
object.
:param data_pack: :class:`Datapack` object to be fitted.
:param verbose: Verbosity.
"""
@abc.abstractmethod
def transform(
self,
data_pack: 'mz.DataPack',
verbose: int = 1
) -> 'mz.DataPack':
"""
Transform input data to expected manner.
This method is an abstract base method, need to be
implemented in the child class.
:param data_pack: :class:`DataPack` object to be transformed.
:param verbose: Verbosity.
or list of text-left, text-right tuples.
"""
def fit_transform(
self,
data_pack: 'mz.DataPack',
verbose: int = 1
) -> 'mz.DataPack':
"""
Call fit-transform.
:param data_pack: :class:`DataPack` object to be processed.
:param verbose: Verbosity.
"""
return self.fit(data_pack, verbose=verbose) \
.transform(data_pack, verbose=verbose)
def save(self, dirpath: typing.Union[str, Path]):
"""
Save the :class:`DSSMPreprocessor` object.
A saved :class:`DSSMPreprocessor` is represented as a directory with
the `context` object (fitted parameters on training data), it will
be saved by `pickle`.
:param dirpath: directory path of the saved :class:`DSSMPreprocessor`.
"""
dirpath = Path(dirpath)
data_file_path = dirpath.joinpath(self.DATA_FILENAME)
if data_file_path.exists():
raise FileExistsError(
f'{data_file_path} instance exist, fail to save.')
elif not dirpath.exists():
dirpath.mkdir()
dill.dump(self, open(data_file_path, mode='wb'))
@classmethod
def _default_units(cls) -> list:
"""Prepare needed process units."""
return [
mz.preprocessors.units.tokenize.Tokenize(),
mz.preprocessors.units.lowercase.Lowercase(),
mz.preprocessors.units.punc_removal.PuncRemoval(),
]
def load_preprocessor(dirpath: typing.Union[str, Path]) -> 'mz.DataPack':
"""
Load the fitted `context`. The reverse function of :meth:`save`.
:param dirpath: directory path of the saved model.
:return: a :class:`DSSMPreprocessor` instance.
"""
dirpath = Path(dirpath)
data_file_path = dirpath.joinpath(BasePreprocessor.DATA_FILENAME)
return dill.load(open(data_file_path, 'rb'))
| apache-2.0 |
Russell-IO/ansible | lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py | 53 | 11516 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = """
module: ec2_metric_alarm
short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
description:
- Can create or delete AWS metric alarms.
- Metrics you wish to alarm on must already exist.
version_added: "1.6"
author: "Zacharie Eakin (@zeekin)"
options:
state:
description:
- register or deregister the alarm
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the alarm
required: true
metric:
description:
- Name of the monitored metric (e.g. CPUUtilization)
- Metric must already exist
required: false
namespace:
description:
- Name of the appropriate namespace ('AWS/EC2', 'System/Linux', etc.), which determines the category it will appear under in cloudwatch
required: false
statistic:
description:
- Operation applied to the metric
- Works in conjunction with period and evaluation_periods to determine the comparison value
required: false
choices: ['SampleCount','Average','Sum','Minimum','Maximum']
comparison:
description:
- Determines how the threshold value is compared
required: false
choices: ['<=','<','>','>=']
threshold:
description:
- Sets the min/max bound for triggering the alarm
required: false
period:
description:
- The time (in seconds) between metric evaluations
required: false
evaluation_periods:
description:
- The number of times in which the metric is evaluated before final calculation
required: false
unit:
description:
- The threshold's unit of measurement
required: false
choices:
- 'Seconds'
- 'Microseconds'
- 'Milliseconds'
- 'Bytes'
- 'Kilobytes'
- 'Megabytes'
- 'Gigabytes'
- 'Terabytes'
- 'Bits'
- 'Kilobits'
- 'Megabits'
- 'Gigabits'
- 'Terabits'
- 'Percent'
- 'Count'
- 'Bytes/Second'
- 'Kilobytes/Second'
- 'Megabytes/Second'
- 'Gigabytes/Second'
- 'Terabytes/Second'
- 'Bits/Second'
- 'Kilobits/Second'
- 'Megabits/Second'
- 'Gigabits/Second'
- 'Terabits/Second'
- 'Count/Second'
- 'None'
description:
description:
- A longer description of the alarm
required: false
dimensions:
description:
- Describes to what the alarm is applied
required: false
alarm_actions:
description:
- A list of the names action(s) taken when the alarm is in the 'alarm' status
required: false
insufficient_data_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status
required: false
ok_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'ok' status
required: false
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
- name: create alarm
ec2_metric_alarm:
state: present
region: ap-southeast-2
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 5.0
period: 300
evaluation_periods: 3
unit: "Percent"
description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
dimensions: {'InstanceId':'i-XXX'}
alarm_actions: ["action1","action2"]
'''
try:
import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import MetricAlarm
from boto.exception import BotoServerError, NoAuthHandlerFound
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (AnsibleAWSError, HAS_BOTO, connect_to_aws, ec2_argument_spec,
get_aws_connection_info)
def create_metric_alarm(connection, module):
name = module.params.get('name')
metric = module.params.get('metric')
namespace = module.params.get('namespace')
statistic = module.params.get('statistic')
comparison = module.params.get('comparison')
threshold = module.params.get('threshold')
period = module.params.get('period')
evaluation_periods = module.params.get('evaluation_periods')
unit = module.params.get('unit')
description = module.params.get('description')
dimensions = module.params.get('dimensions')
alarm_actions = module.params.get('alarm_actions')
insufficient_data_actions = module.params.get('insufficient_data_actions')
ok_actions = module.params.get('ok_actions')
alarms = connection.describe_alarms(alarm_names=[name])
if not alarms:
alm = MetricAlarm(
name=name,
metric=metric,
namespace=namespace,
statistic=statistic,
comparison=comparison,
threshold=threshold,
period=period,
evaluation_periods=evaluation_periods,
unit=unit,
description=description,
dimensions=dimensions,
alarm_actions=alarm_actions,
insufficient_data_actions=insufficient_data_actions,
ok_actions=ok_actions
)
try:
connection.create_alarm(alm)
changed = True
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
alarm = alarms[0]
changed = False
for attr in ('comparison', 'metric', 'namespace', 'statistic', 'threshold', 'period', 'evaluation_periods', 'unit', 'description'):
if getattr(alarm, attr) != module.params.get(attr):
changed = True
setattr(alarm, attr, module.params.get(attr))
# this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
comparison = alarm.comparison
comparisons = {'<=': 'LessThanOrEqualToThreshold', '<': 'LessThanThreshold', '>=': 'GreaterThanOrEqualToThreshold', '>': 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison]
dim1 = module.params.get('dimensions')
dim2 = alarm.dimensions
for keys in dim1:
if not isinstance(dim1[keys], list):
dim1[keys] = [dim1[keys]]
if keys not in dim2 or dim1[keys] != dim2[keys]:
changed = True
setattr(alarm, 'dimensions', dim1)
for attr in ('alarm_actions', 'insufficient_data_actions', 'ok_actions'):
action = module.params.get(attr) or []
# Boto and/or ansible may provide same elements in lists but in different order.
# Compare on sets since they do not need any order.
if set(getattr(alarm, attr)) != set(action):
changed = True
setattr(alarm, attr, module.params.get(attr))
try:
if changed:
connection.create_alarm(alarm)
except BotoServerError as e:
module.fail_json(msg=str(e))
result = alarms[0]
module.exit_json(changed=changed, name=result.name,
actions_enabled=result.actions_enabled,
alarm_actions=result.alarm_actions,
alarm_arn=result.alarm_arn,
comparison=result.comparison,
description=result.description,
dimensions=result.dimensions,
evaluation_periods=result.evaluation_periods,
insufficient_data_actions=result.insufficient_data_actions,
last_updated=result.last_updated,
metric=result.metric,
namespace=result.namespace,
ok_actions=result.ok_actions,
period=result.period,
state_reason=result.state_reason,
state_value=result.state_value,
statistic=result.statistic,
threshold=result.threshold,
unit=result.unit)
def delete_metric_alarm(connection, module):
name = module.params.get('name')
alarms = connection.describe_alarms(alarm_names=[name])
if alarms:
try:
connection.delete_alarms([name])
module.exit_json(changed=True)
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
metric=dict(type='str'),
namespace=dict(type='str'),
statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
threshold=dict(type='float'),
period=dict(type='int'),
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes',
'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second',
'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second',
'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
dimensions=dict(type='dict', default={}),
alarm_actions=dict(type='list'),
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except (NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'present':
create_metric_alarm(connection, module)
elif state == 'absent':
delete_metric_alarm(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
bolkedebruin/airflow | airflow/operators/hive_stats_operator.py | 1 | 1212 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.apache.hive.operators.hive_stats`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.apache.hive.operators.hive_stats import HiveStatsCollectionOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.apache.hive.operators.hive_stats`.",
DeprecationWarning, stacklevel=2
)
| apache-2.0 |
domeger/SplunkTAforPuppetEnterprise | bin/SplunkTAforPuppetEnterprise_rh_puppet_enterprise_overview_enforcement.py | 1 | 1978 |
import splunktaforpuppetenterprise_declare
from splunktaucclib.rest_handler.endpoint import (
field,
validator,
RestModel,
DataInputModel,
)
from splunktaucclib.rest_handler import admin_external, util
from splunk_aoblib.rest_migration import ConfigMigrationHandler
util.remove_http_proxy_env_vars()
fields = [
field.RestField(
'interval',
required=True,
encrypted=False,
default=None,
validator=validator.Pattern(
regex=r"""^\-[1-9]\d*$|^\d*$""",
)
),
field.RestField(
'index',
required=True,
encrypted=False,
default='default',
validator=validator.String(
min_len=1,
max_len=80,
)
),
field.RestField(
'puppet_enterprise_server_',
required=True,
encrypted=False,
default=None,
validator=validator.String(
min_len=0,
max_len=8192,
)
),
field.RestField(
'server_',
required=True,
encrypted=False,
default=None,
validator=validator.String(
min_len=0,
max_len=8192,
)
),
field.RestField(
'token_',
required=True,
encrypted=True,
default=None,
validator=validator.String(
min_len=0,
max_len=8192,
)
),
field.RestField(
'port_',
required=True,
encrypted=False,
default=None,
validator=validator.String(
min_len=0,
max_len=8192,
)
),
field.RestField(
'disabled',
required=False,
validator=None
)
]
model = RestModel(fields, name=None)
endpoint = DataInputModel(
'puppet_enterprise_overview_enforcement',
model,
)
if __name__ == '__main__':
admin_external.handle(
endpoint,
handler=ConfigMigrationHandler,
)
| apache-2.0 |
ubc/edx-platform | openedx/core/djangoapps/user_api/preferences/tests/test_api.py | 39 | 17811 | # -*- coding: utf-8 -*-
"""
Unit tests for preference APIs.
"""
import datetime
import ddt
import unittest
from mock import patch
from pytz import UTC
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from dateutil.parser import parse as parse_datetime
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ...accounts.api import create_account
from ...errors import UserNotFound, UserNotAuthorized, PreferenceValidationError, PreferenceUpdateError
from ...models import UserProfile, UserOrgTag
from ...preferences.api import (
get_user_preference, get_user_preferences, set_user_preference, update_user_preferences, delete_user_preference,
update_email_opt_in
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Account APIs are only supported in LMS')
class TestPreferenceAPI(TestCase):
"""
These tests specifically cover the parts of the API methods that are not covered by test_views.py.
This includes the specific types of error raised, and default behavior when optional arguments
are not specified.
"""
password = "test"
def setUp(self):
super(TestPreferenceAPI, self).setUp()
self.user = UserFactory.create(password=self.password)
self.different_user = UserFactory.create(password=self.password)
self.staff_user = UserFactory(is_staff=True, password=self.password)
self.no_such_user = UserFactory.create(password=self.password)
self.no_such_user.username = "no_such_user"
self.test_preference_key = "test_key"
self.test_preference_value = "test_value"
set_user_preference(self.user, self.test_preference_key, self.test_preference_value)
def test_get_user_preference(self):
"""
Verifies the basic behavior of get_user_preference.
"""
self.assertEqual(
get_user_preference(self.user, self.test_preference_key),
self.test_preference_value
)
self.assertEqual(
get_user_preference(self.staff_user, self.test_preference_key, username=self.user.username),
self.test_preference_value
)
def test_get_user_preference_errors(self):
"""
Verifies that get_user_preference returns appropriate errors.
"""
with self.assertRaises(UserNotFound):
get_user_preference(self.user, self.test_preference_key, username="no_such_user")
with self.assertRaises(UserNotFound):
get_user_preference(self.no_such_user, self.test_preference_key)
with self.assertRaises(UserNotAuthorized):
get_user_preference(self.different_user, self.test_preference_key, username=self.user.username)
def test_get_user_preferences(self):
"""
Verifies the basic behavior of get_user_preferences.
"""
expected_user_preferences = {
self.test_preference_key: self.test_preference_value,
}
self.assertEqual(get_user_preferences(self.user), expected_user_preferences)
self.assertEqual(get_user_preferences(self.staff_user, username=self.user.username), expected_user_preferences)
def test_get_user_preferences_errors(self):
"""
Verifies that get_user_preferences returns appropriate errors.
"""
with self.assertRaises(UserNotFound):
get_user_preferences(self.user, username="no_such_user")
with self.assertRaises(UserNotFound):
get_user_preferences(self.no_such_user)
with self.assertRaises(UserNotAuthorized):
get_user_preferences(self.different_user, username=self.user.username)
def test_set_user_preference(self):
"""
Verifies the basic behavior of set_user_preference.
"""
test_key = u'ⓟⓡⓔⓕⓔⓡⓔⓝⓒⓔ_ⓚⓔⓨ'
test_value = u'ǝnןɐʌ_ǝɔuǝɹǝɟǝɹd'
set_user_preference(self.user, test_key, test_value)
self.assertEqual(get_user_preference(self.user, test_key), test_value)
set_user_preference(self.user, test_key, "new_value", username=self.user.username)
self.assertEqual(get_user_preference(self.user, test_key), "new_value")
@patch('openedx.core.djangoapps.user_api.models.UserPreference.save')
def test_set_user_preference_errors(self, user_preference_save):
"""
Verifies that set_user_preference returns appropriate errors.
"""
with self.assertRaises(UserNotFound):
set_user_preference(self.user, self.test_preference_key, "new_value", username="no_such_user")
with self.assertRaises(UserNotFound):
set_user_preference(self.no_such_user, self.test_preference_key, "new_value")
with self.assertRaises(UserNotAuthorized):
set_user_preference(self.staff_user, self.test_preference_key, "new_value", username=self.user.username)
with self.assertRaises(UserNotAuthorized):
set_user_preference(self.different_user, self.test_preference_key, "new_value", username=self.user.username)
too_long_key = "x" * 256
with self.assertRaises(PreferenceValidationError) as context_manager:
set_user_preference(self.user, too_long_key, "new_value")
errors = context_manager.exception.preference_errors
self.assertEqual(len(errors.keys()), 1)
self.assertEqual(
errors[too_long_key],
{
"developer_message": get_expected_validation_developer_message(too_long_key, "new_value"),
"user_message": get_expected_key_error_user_message(too_long_key, "new_value"),
}
)
for empty_value in (None, "", " "):
with self.assertRaises(PreferenceValidationError) as context_manager:
set_user_preference(self.user, self.test_preference_key, empty_value)
errors = context_manager.exception.preference_errors
self.assertEqual(len(errors.keys()), 1)
self.assertEqual(
errors[self.test_preference_key],
{
"developer_message": get_empty_preference_message(self.test_preference_key),
"user_message": get_empty_preference_message(self.test_preference_key),
}
)
user_preference_save.side_effect = [Exception, None]
with self.assertRaises(PreferenceUpdateError) as context_manager:
set_user_preference(self.user, u"new_key_ȻħȺɍłɇs", u"new_value_ȻħȺɍłɇs")
self.assertEqual(
context_manager.exception.developer_message,
u"Save failed for user preference 'new_key_ȻħȺɍłɇs' with value 'new_value_ȻħȺɍłɇs': "
)
self.assertEqual(
context_manager.exception.user_message,
u"Save failed for user preference 'new_key_ȻħȺɍłɇs' with value 'new_value_ȻħȺɍłɇs'."
)
def test_update_user_preferences(self):
"""
Verifies the basic behavior of update_user_preferences.
"""
expected_user_preferences = {
self.test_preference_key: "new_value",
}
set_user_preference(self.user, self.test_preference_key, "new_value")
self.assertEqual(
get_user_preference(self.user, self.test_preference_key),
"new_value"
)
set_user_preference(self.user, self.test_preference_key, "new_value", username=self.user.username)
self.assertEqual(
get_user_preference(self.user, self.test_preference_key),
"new_value"
)
@patch('openedx.core.djangoapps.user_api.models.UserPreference.delete')
@patch('openedx.core.djangoapps.user_api.models.UserPreference.save')
def test_update_user_preferences_errors(self, user_preference_save, user_preference_delete):
"""
Verifies that set_user_preferences returns appropriate errors.
"""
update_data = {
self.test_preference_key: "new_value"
}
with self.assertRaises(UserNotFound):
update_user_preferences(self.user, update_data, username="no_such_user")
with self.assertRaises(UserNotFound):
update_user_preferences(self.no_such_user, update_data)
with self.assertRaises(UserNotAuthorized):
update_user_preferences(self.staff_user, update_data, username=self.user.username)
with self.assertRaises(UserNotAuthorized):
update_user_preferences(self.different_user, update_data, username=self.user.username)
too_long_key = "x" * 256
with self.assertRaises(PreferenceValidationError) as context_manager:
update_user_preferences(self.user, {too_long_key: "new_value"})
errors = context_manager.exception.preference_errors
self.assertEqual(len(errors.keys()), 1)
self.assertEqual(
errors[too_long_key],
{
"developer_message": get_expected_validation_developer_message(too_long_key, "new_value"),
"user_message": get_expected_key_error_user_message(too_long_key, "new_value"),
}
)
for empty_value in ("", " "):
with self.assertRaises(PreferenceValidationError) as context_manager:
update_user_preferences(self.user, {self.test_preference_key: empty_value})
errors = context_manager.exception.preference_errors
self.assertEqual(len(errors.keys()), 1)
self.assertEqual(
errors[self.test_preference_key],
{
"developer_message": get_empty_preference_message(self.test_preference_key),
"user_message": get_empty_preference_message(self.test_preference_key),
}
)
user_preference_save.side_effect = [Exception, None]
with self.assertRaises(PreferenceUpdateError) as context_manager:
update_user_preferences(self.user, {self.test_preference_key: "new_value"})
self.assertEqual(
context_manager.exception.developer_message,
u"Save failed for user preference 'test_key' with value 'new_value': "
)
self.assertEqual(
context_manager.exception.user_message,
u"Save failed for user preference 'test_key' with value 'new_value'."
)
user_preference_delete.side_effect = [Exception, None]
with self.assertRaises(PreferenceUpdateError) as context_manager:
update_user_preferences(self.user, {self.test_preference_key: None})
self.assertEqual(
context_manager.exception.developer_message,
u"Delete failed for user preference 'test_key': "
)
self.assertEqual(
context_manager.exception.user_message,
u"Delete failed for user preference 'test_key'."
)
def test_delete_user_preference(self):
"""
Verifies the basic behavior of delete_user_preference.
"""
self.assertTrue(delete_user_preference(self.user, self.test_preference_key))
set_user_preference(self.user, self.test_preference_key, self.test_preference_value)
self.assertTrue(delete_user_preference(self.user, self.test_preference_key, username=self.user.username))
self.assertFalse(delete_user_preference(self.user, "no_such_key"))
@patch('openedx.core.djangoapps.user_api.models.UserPreference.delete')
def test_delete_user_preference_errors(self, user_preference_delete):
"""
Verifies that delete_user_preference returns appropriate errors.
"""
with self.assertRaises(UserNotFound):
delete_user_preference(self.user, self.test_preference_key, username="no_such_user")
with self.assertRaises(UserNotFound):
delete_user_preference(self.no_such_user, self.test_preference_key)
with self.assertRaises(UserNotAuthorized):
delete_user_preference(self.staff_user, self.test_preference_key, username=self.user.username)
with self.assertRaises(UserNotAuthorized):
delete_user_preference(self.different_user, self.test_preference_key, username=self.user.username)
user_preference_delete.side_effect = [Exception, None]
with self.assertRaises(PreferenceUpdateError) as context_manager:
delete_user_preference(self.user, self.test_preference_key)
self.assertEqual(
context_manager.exception.developer_message,
u"Delete failed for user preference 'test_key': "
)
self.assertEqual(
context_manager.exception.user_message,
u"Delete failed for user preference 'test_key'."
)
@ddt.ddt
class UpdateEmailOptInTests(ModuleStoreTestCase):
USERNAME = u'frank-underwood'
PASSWORD = u'ṕáśśẃőŕd'
EMAIL = u'frank+underwood@example.com'
@ddt.data(
# Check that a 27 year old can opt-in
(27, True, u"True"),
# Check that a 32-year old can opt-out
(32, False, u"False"),
# Check that someone 14 years old can opt-in
(14, True, u"True"),
# Check that someone 13 years old cannot opt-in (must have turned 13 before this year)
(13, True, u"False"),
# Check that someone 12 years old cannot opt-in
(12, True, u"False")
)
@ddt.unpack
@override_settings(EMAIL_OPTIN_MINIMUM_AGE=13)
def test_update_email_optin(self, age, option, expected_result):
# Create the course and account.
course = CourseFactory.create()
create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
# Set year of birth
user = User.objects.get(username=self.USERNAME)
profile = UserProfile.objects.get(user=user)
year_of_birth = datetime.datetime.now().year - age
profile.year_of_birth = year_of_birth
profile.save()
update_email_opt_in(user, course.id.org, option)
result_obj = UserOrgTag.objects.get(user=user, org=course.id.org, key='email-optin')
self.assertEqual(result_obj.value, expected_result)
def test_update_email_optin_no_age_set(self):
# Test that the API still works if no age is specified.
# Create the course and account.
course = CourseFactory.create()
create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
user = User.objects.get(username=self.USERNAME)
update_email_opt_in(user, course.id.org, True)
result_obj = UserOrgTag.objects.get(user=user, org=course.id.org, key='email-optin')
self.assertEqual(result_obj.value, u"True")
def test_update_email_optin_anonymous_user(self):
"""Verify that the API raises an exception for a user with no profile."""
course = CourseFactory.create()
no_profile_user, __ = User.objects.get_or_create(username="no_profile_user", password=self.PASSWORD)
with self.assertRaises(UserNotFound):
update_email_opt_in(no_profile_user, course.id.org, True)
@ddt.data(
# Check that a 27 year old can opt-in, then out.
(27, True, False, u"False"),
# Check that a 32-year old can opt-out, then in.
(32, False, True, u"True"),
# Check that someone 13 years old can opt-in, then out.
(13, True, False, u"False"),
# Check that someone 12 years old cannot opt-in, then explicitly out.
(12, True, False, u"False")
)
@ddt.unpack
@override_settings(EMAIL_OPTIN_MINIMUM_AGE=13)
def test_change_email_optin(self, age, option, second_option, expected_result):
# Create the course and account.
course = CourseFactory.create()
create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
# Set year of birth
user = User.objects.get(username=self.USERNAME)
profile = UserProfile.objects.get(user=user)
year_of_birth = datetime.datetime.now(UTC).year - age
profile.year_of_birth = year_of_birth
profile.save()
update_email_opt_in(user, course.id.org, option)
update_email_opt_in(user, course.id.org, second_option)
result_obj = UserOrgTag.objects.get(user=user, org=course.id.org, key='email-optin')
self.assertEqual(result_obj.value, expected_result)
def _assert_is_datetime(self, timestamp):
if not timestamp:
return False
try:
parse_datetime(timestamp)
except ValueError:
return False
else:
return True
def get_expected_validation_developer_message(preference_key, preference_value):
"""
Returns the expected dict of validation messages for the specified key.
"""
return u"Value '{preference_value}' not valid for preference '{preference_key}': {error}".format(
preference_key=preference_key,
preference_value=preference_value,
error={
"key": [u"Ensure this value has at most 255 characters (it has 256)."]
}
)
def get_expected_key_error_user_message(preference_key, preference_value):
"""
Returns the expected user message for an invalid key.
"""
return u"Invalid user preference key '{preference_key}'.".format(preference_key=preference_key)
def get_empty_preference_message(preference_key):
"""
Returns the validation message shown for an empty preference.
"""
return "Preference '{preference_key}' cannot be set to an empty value.".format(preference_key=preference_key)
| agpl-3.0 |
nightauer/quickdic-dictionary.dictionarypc | googlecode_upload.py | 304 | 8912 | #!/usr/bin/env python
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access Subversion repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]:
# Rest for another try.
user_name = password = None
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
MattFaus/CrowdTube-Connector | lib/gdata-2.0.18/src/gdata/blogger/client.py | 72 | 6695 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a client to communicate with the Blogger servers.
For documentation on the Blogger API, see:
http://code.google.com/apis/blogger/
"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import gdata.client
import gdata.gauth
import gdata.blogger.data
import atom.data
import atom.http_core
# List user's blogs, takes a user ID, or 'default'.
BLOGS_URL = 'http://www.blogger.com/feeds/%s/blogs'
# Takes a blog ID.
BLOG_POST_URL = 'http://www.blogger.com/feeds/%s/posts/default'
# Takes a blog ID.
BLOG_PAGE_URL = 'http://www.blogger.com/feeds/%s/pages/default'
# Takes a blog ID and post ID.
BLOG_POST_COMMENTS_URL = 'http://www.blogger.com/feeds/%s/%s/comments/default'
# Takes a blog ID.
BLOG_COMMENTS_URL = 'http://www.blogger.com/feeds/%s/comments/default'
# Takes a blog ID.
BLOG_ARCHIVE_URL = 'http://www.blogger.com/feeds/%s/archive/full'
class BloggerClient(gdata.client.GDClient):
api_version = '2'
auth_service = 'blogger'
auth_scopes = gdata.gauth.AUTH_SCOPES['blogger']
def get_blogs(self, user_id='default', auth_token=None,
desired_class=gdata.blogger.data.BlogFeed, **kwargs):
return self.get_feed(BLOGS_URL % user_id, auth_token=auth_token,
desired_class=desired_class, **kwargs)
GetBlogs = get_blogs
def get_posts(self, blog_id, auth_token=None,
desired_class=gdata.blogger.data.BlogPostFeed, query=None,
**kwargs):
return self.get_feed(BLOG_POST_URL % blog_id, auth_token=auth_token,
desired_class=desired_class, query=query, **kwargs)
GetPosts = get_posts
def get_pages(self, blog_id, auth_token=None,
desired_class=gdata.blogger.data.BlogPageFeed, query=None,
**kwargs):
return self.get_feed(BLOG_PAGE_URL % blog_id, auth_token=auth_token,
desired_class=desired_class, query=query, **kwargs)
GetPages = get_pages
def get_post_comments(self, blog_id, post_id, auth_token=None,
desired_class=gdata.blogger.data.CommentFeed,
query=None, **kwargs):
return self.get_feed(BLOG_POST_COMMENTS_URL % (blog_id, post_id),
auth_token=auth_token, desired_class=desired_class,
query=query, **kwargs)
GetPostComments = get_post_comments
def get_blog_comments(self, blog_id, auth_token=None,
desired_class=gdata.blogger.data.CommentFeed,
query=None, **kwargs):
return self.get_feed(BLOG_COMMENTS_URL % blog_id, auth_token=auth_token,
desired_class=desired_class, query=query, **kwargs)
GetBlogComments = get_blog_comments
def get_blog_archive(self, blog_id, auth_token=None, **kwargs):
return self.get_feed(BLOG_ARCHIVE_URL % blog_id, auth_token=auth_token,
**kwargs)
GetBlogArchive = get_blog_archive
def add_post(self, blog_id, title, body, labels=None, draft=False,
auth_token=None, title_type='text', body_type='html', **kwargs):
# Construct an atom Entry for the blog post to be sent to the server.
new_entry = gdata.blogger.data.BlogPost(
title=atom.data.Title(text=title, type=title_type),
content=atom.data.Content(text=body, type=body_type))
if labels:
for label in labels:
new_entry.add_label(label)
if draft:
new_entry.control = atom.data.Control(draft=atom.data.Draft(text='yes'))
return self.post(new_entry, BLOG_POST_URL % blog_id, auth_token=auth_token, **kwargs)
AddPost = add_post
def add_page(self, blog_id, title, body, draft=False, auth_token=None,
title_type='text', body_type='html', **kwargs):
new_entry = gdata.blogger.data.BlogPage(
title=atom.data.Title(text=title, type=title_type),
content=atom.data.Content(text=body, type=body_type))
if draft:
new_entry.control = atom.data.Control(draft=atom.data.Draft(text='yes'))
return self.post(new_entry, BLOG_PAGE_URL % blog_id, auth_token=auth_token, **kwargs)
AddPage = add_page
def add_comment(self, blog_id, post_id, body, auth_token=None,
title_type='text', body_type='html', **kwargs):
new_entry = gdata.blogger.data.Comment(
content=atom.data.Content(text=body, type=body_type))
return self.post(new_entry, BLOG_POST_COMMENTS_URL % (blog_id, post_id),
auth_token=auth_token, **kwargs)
AddComment = add_comment
def update(self, entry, auth_token=None, **kwargs):
# The Blogger API does not currently support ETags, so for now remove
# the ETag before performing an update.
old_etag = entry.etag
entry.etag = None
response = gdata.client.GDClient.update(self, entry,
auth_token=auth_token, **kwargs)
entry.etag = old_etag
return response
Update = update
def delete(self, entry_or_uri, auth_token=None, **kwargs):
if isinstance(entry_or_uri, (str, unicode, atom.http_core.Uri)):
return gdata.client.GDClient.delete(self, entry_or_uri,
auth_token=auth_token, **kwargs)
# The Blogger API does not currently support ETags, so for now remove
# the ETag before performing a delete.
old_etag = entry_or_uri.etag
entry_or_uri.etag = None
response = gdata.client.GDClient.delete(self, entry_or_uri,
auth_token=auth_token, **kwargs)
# TODO: if GDClient.delete raises and exception, the entry's etag may be
# left as None. Should revisit this logic.
entry_or_uri.etag = old_etag
return response
Delete = delete
class Query(gdata.client.Query):
def __init__(self, order_by=None, **kwargs):
gdata.client.Query.__init__(self, **kwargs)
self.order_by = order_by
def modify_request(self, http_request):
gdata.client._add_query_param('orderby', self.order_by, http_request)
gdata.client.Query.modify_request(self, http_request)
ModifyRequest = modify_request
| mit |
DueLaser/due_rasp | src/octoprint/plugins/svgtogcode/inkscape-ext/inkex.py | 6 | 8155 | #!/usr/bin/env python
"""
inkex.py
A helper module for creating Inkscape extensions
Copyright (C) 2005,2007 Aaron Spike, aaron@ekips.org
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys, copy, optparse, random, re
import gettext
from math import *
_ = gettext.gettext
#a dictionary of all of the xmlns prefixes in a standard inkscape doc
NSS = {
u'sodipodi' :u'http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd',
u'cc' :u'http://creativecommons.org/ns#',
u'ccOLD' :u'http://web.resource.org/cc/',
u'svg' :u'http://www.w3.org/2000/svg',
u'dc' :u'http://purl.org/dc/elements/1.1/',
u'rdf' :u'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
u'inkscape' :u'http://www.inkscape.org/namespaces/inkscape',
u'xlink' :u'http://www.w3.org/1999/xlink',
u'xml' :u'http://www.w3.org/XML/1998/namespace'
}
#a dictionary of unit to user unit conversion factors
uuconv = {'in':90.0, 'pt':1.25, 'px':1, 'mm':3.5433070866, 'cm':35.433070866, 'm':3543.3070866,
'km':3543307.0866, 'pc':15.0, 'yd':3240 , 'ft':1080}
def unittouu(string):
'''Returns userunits given a string representation of units in another system'''
unit = re.compile('(%s)$' % '|'.join(uuconv.keys()))
param = re.compile(r'(([-+]?[0-9]+(\.[0-9]*)?|[-+]?\.[0-9]+)([eE][-+]?[0-9]+)?)')
p = param.match(string)
u = unit.search(string)
if p:
retval = float(p.string[p.start():p.end()])
else:
retval = 0.0
if u:
try:
return retval * uuconv[u.string[u.start():u.end()]]
except KeyError:
pass
return retval
def uutounit(val, unit):
return val/uuconv[unit]
try:
from lxml import etree
except:
sys.exit(_('The fantastic lxml wrapper for libxml2 is required by inkex.py and therefore this extension. Please download and install the latest version from http://cheeseshop.python.org/pypi/lxml/, or install it through your package manager by a command like: sudo apt-get install python-lxml'))
def debug(what):
sys.stderr.write(str(what) + "\n")
return what
def errormsg(msg):
"""Intended for end-user-visible error messages.
(Currently just writes to stderr with an appended newline, but could do
something better in future: e.g. could add markup to distinguish error
messages from status messages or debugging output.)
Note that this should always be combined with translation:
import gettext
_ = gettext.gettext
...
inkex.errormsg(_("This extension requires two selected paths."))
"""
sys.stderr.write((unicode(msg) + "\n").encode("UTF-8"))
def check_inkbool(option, opt, value):
if str(value).capitalize() == 'True':
return True
elif str(value).capitalize() == 'False':
return False
else:
raise optparse.OptionValueError("option %s: invalid inkbool value: %s" % (opt, value))
def addNS(tag, ns=None):
val = tag
if ns!=None and len(ns)>0 and NSS.has_key(ns) and len(tag)>0 and tag[0]!='{':
val = "{%s}%s" % (NSS[ns], tag)
return val
class InkOption(optparse.Option):
TYPES = optparse.Option.TYPES + ("inkbool",)
TYPE_CHECKER = copy.copy(optparse.Option.TYPE_CHECKER)
TYPE_CHECKER["inkbool"] = check_inkbool
class Effect:
"""A class for creating Inkscape SVG Effects"""
#self.options = Defaults()
defaults = {
"ids": [],
"directory": None,
"file": None,
"engraving_laser_speed": 300,
"laser_intensity": 500,
#"suppress_all_messages": True,
"log_filename": '',
"unit": "G21 (All units in mm)",
"svgDPI": 90,
"biarc_max_split_depth": 4,
"fill_areas": False,
"fill_spacing": 0.25,
"cross_fill": False,
"fill_angle": 0.0,
"noheaders": "false",
"intensity_white": 0,
"intensity_black": 500,
"speed_white": 1500,
"speed_black": 250,
"pierce_time": 0,
"contrast": 1.0,
"sharpening": 1.0,
"dithering": False,
"beam_diameter": 0.25
}
def __init__(self, *args, **kwargs):
self.document=None
self.original_document=None
self.ctx=None
self.selected={}
self.doc_ids={}
self.options = self.defaults
#self.args=None
def effect(self, on_progress=None, on_progress_args=None, on_progress_kwargs=None):
pass
def setoptions(self, opts):
# set default values if option is missing
for key in self.options.keys():
if key in opts:
self.options[key] = opts[key]
else:
print("Using default %s = %s" %(key, str(self.options[key])))
def parse(self,file=None):
"""Parse document in specified file or on stdin"""
try:
try:
stream = open(file,'r')
except:
stream = open(self.svg_file,'r')
except:
stream = sys.stdin
p = etree.XMLParser(huge_tree=True)
self.document = etree.parse(stream, parser=p)
self.original_document = copy.deepcopy(self.document)
stream.close()
def getposinlayer(self):
#defaults
self.current_layer = self.document.getroot()
self.view_center = (0.0,0.0)
layerattr = self.document.xpath('//sodipodi:namedview/@inkscape:current-layer', namespaces=NSS)
if layerattr:
layername = layerattr[0]
layer = self.document.xpath('//svg:g[@id="%s"]' % layername, namespaces=NSS)
if layer:
self.current_layer = layer[0]
xattr = self.document.xpath('//sodipodi:namedview/@inkscape:cx', namespaces=NSS)
yattr = self.document.xpath('//sodipodi:namedview/@inkscape:cy', namespaces=NSS)
height = self.getDocumentHeight();
doc_height = unittouu(height)
if xattr and yattr:
x = xattr[0]
y = yattr[0]
if x and y:
self.view_center = (float(x), doc_height - float(y)) # FIXME: y-coordinate flip, eliminate it when it's gone in Inkscape
def getselected(self):
"""Collect selected nodes"""
for i in self.options['ids']:
path = '//*[@id="%s"]' % i
for node in self.document.xpath(path, namespaces=NSS):
self.selected[i] = node
def getElementById(self, id):
path = '//*[@id="%s"]' % id
el_list = self.document.xpath(path, namespaces=NSS)
if el_list:
return el_list[0]
else:
return None
def getParentNode(self, node):
for parent in self.document.getiterator():
if node in parent.getchildren():
return parent
break
def getdocids(self):
docIdNodes = self.document.xpath('//@id', namespaces=NSS)
for m in docIdNodes:
self.doc_ids[m] = 1
def getNamedView(self):
return self.document.xpath('//sodipodi:namedview', namespaces=NSS)[0]
def createGuide(self, posX, posY, angle):
atts = {
'position': str(posX)+','+str(posY),
'orientation': str(sin(radians(angle)))+','+str(-cos(radians(angle)))
}
guide = etree.SubElement(
self.getNamedView(),
addNS('guide','sodipodi'), atts )
return guide
def output(self):
"""Serialize document into XML on stdout"""
original = etree.tostring(self.original_document)
result = etree.tostring(self.document)
if original != result:
self.document.write(sys.stdout)
def affect(self, on_progress=None, on_progress_args=None, on_progress_kwargs=None):
"""Affect an SVG document with a callback effect"""
self.parse()
self.getposinlayer()
self.getselected()
self.getdocids()
self.effect(on_progress, on_progress_args, on_progress_kwargs)
def uniqueId(self, old_id, make_new_id = True):
new_id = old_id
if make_new_id:
while new_id in self.doc_ids:
new_id += random.choice('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
self.doc_ids[new_id] = 1
return new_id
def xpathSingle(self, path):
try:
retval = self.document.xpath(path, namespaces=NSS)[0]
except:
errormsg(_("No matching node for expression: %s") % path)
retval = None
return retval
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 encoding=utf-8 textwidth=99
| agpl-3.0 |
Mirantis/tempest | tempest/api/identity/admin/v3/test_domains.py | 4 | 3750 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import test
class DomainsTestJSON(base.BaseIdentityV3AdminTest):
_interface = 'json'
def _delete_domain(self, domain_id):
# It is necessary to disable the domain before deleting,
# or else it would result in unauthorized error
self.client.update_domain(domain_id, enabled=False)
self.client.delete_domain(domain_id)
@test.attr(type='smoke')
def test_list_domains(self):
# Test to list domains
domain_ids = list()
fetched_ids = list()
for _ in range(3):
_, domain = self.client.create_domain(
data_utils.rand_name('domain-'),
description=data_utils.rand_name('domain-desc-'))
# Delete the domain at the end of this method
self.addCleanup(self._delete_domain, domain['id'])
domain_ids.append(domain['id'])
# List and Verify Domains
_, body = self.client.list_domains()
for d in body:
fetched_ids.append(d['id'])
missing_doms = [d for d in domain_ids if d not in fetched_ids]
self.assertEqual(0, len(missing_doms))
@test.attr(type='smoke')
def test_create_update_delete_domain(self):
d_name = data_utils.rand_name('domain-')
d_desc = data_utils.rand_name('domain-desc-')
_, domain = self.client.create_domain(
d_name, description=d_desc)
self.addCleanup(self._delete_domain, domain['id'])
self.assertIn('id', domain)
self.assertIn('description', domain)
self.assertIn('name', domain)
self.assertIn('enabled', domain)
self.assertIn('links', domain)
self.assertIsNotNone(domain['id'])
self.assertEqual(d_name, domain['name'])
self.assertEqual(d_desc, domain['description'])
if self._interface == "json":
self.assertEqual(True, domain['enabled'])
else:
self.assertEqual('true', str(domain['enabled']).lower())
new_desc = data_utils.rand_name('new-desc-')
new_name = data_utils.rand_name('new-name-')
_, updated_domain = self.client.update_domain(
domain['id'], name=new_name, description=new_desc)
self.assertIn('id', updated_domain)
self.assertIn('description', updated_domain)
self.assertIn('name', updated_domain)
self.assertIn('enabled', updated_domain)
self.assertIn('links', updated_domain)
self.assertIsNotNone(updated_domain['id'])
self.assertEqual(new_name, updated_domain['name'])
self.assertEqual(new_desc, updated_domain['description'])
self.assertEqual('true', str(updated_domain['enabled']).lower())
_, fetched_domain = self.client.get_domain(domain['id'])
self.assertEqual(new_name, fetched_domain['name'])
self.assertEqual(new_desc, fetched_domain['description'])
self.assertEqual('true', str(fetched_domain['enabled']).lower())
class DomainsTestXML(DomainsTestJSON):
_interface = 'xml'
| apache-2.0 |
nischalsheth/contrail-controller | src/config/api-server/tests/test_subnet_ip_count.py | 3 | 6473 | #
# Copyright (c) 2013,2014 Juniper Networks, Inc. All rights reserved.
#
import gevent
import os
import sys
import socket
import errno
import uuid
import logging
import coverage
import testtools
from testtools.matchers import Equals, MismatchError, Not, Contains
from testtools import content, content_type, ExpectedException
import unittest
import re
import json
import copy
import inspect
import pycassa
import kombu
import requests
from vnc_api.vnc_api import *
import vnc_api.gen.vnc_api_test_gen
from vnc_api.gen.resource_test import *
import cfgm_common
from cfgm_common import vnc_cgitb
vnc_cgitb.enable(format='text')
sys.path.append('../common/tests')
from test_utils import *
import test_common
import test_case
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class TestSubnet(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestSubnet, cls).setUpClass(*args, **kwargs)
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestSubnet, cls).tearDownClass(*args, **kwargs)
def test_subnet_ip_count(self):
# Object needs to be cleanup after the test to get same results for
# repetative test. Until objects are not cleanedup, change subnet before
# testing it again.
domain_name = 'my-domain'
proj_name = 'my-proj'
subnet1 = '192.168.1.0'
prefix1 = 30
subnet2 = '10.10.1.0'
prefix2 = 29
vn_name = 'my-fe'
domain = Domain(domain_name)
self._vnc_lib.domain_create(domain)
print 'Created domain'
project = Project(proj_name, domain)
self._vnc_lib.project_create(project)
print 'Created Project'
ipam = NetworkIpam('default-network-ipam', project, IpamType("dhcp"))
self._vnc_lib.network_ipam_create(ipam)
print 'Created network ipam'
ipam = self._vnc_lib.network_ipam_read(fq_name=[domain_name, proj_name,
'default-network-ipam'])
print 'Read network ipam'
ipam_sn_1 = IpamSubnetType(subnet=SubnetType(subnet1, prefix1))
ipam_sn_2 = IpamSubnetType(subnet=SubnetType(subnet2, prefix2))
vn = VirtualNetwork(vn_name, project)
vn.add_network_ipam(ipam, VnSubnetsType([ipam_sn_1, ipam_sn_2]))
self._vnc_lib.virtual_network_create(vn)
print 'Created Virtual Network object ', vn.uuid
print 'Read no of instance ip for each subnet'
print '["192.168.1.0/30", "10.10.1.0/29"]'
subnet_list = ["192.168.1.0/30", "10.10.1.0/29"]
result = self._vnc_lib.virtual_network_subnet_ip_count(vn, subnet_list)
print 'Expected output: {"ip_count_list": [0, 0]}'
print 'Actual output:', result
net_obj = self._vnc_lib.virtual_network_read(id=vn.uuid)
ip_obj1 = InstanceIp(name=str(uuid.uuid4()))
ip_obj1.uuid = ip_obj1.name
print 'Created Instance IP object 1 ', ip_obj1.uuid
vm_inst_obj1 = VirtualMachine(str(uuid.uuid4()))
vm_inst_obj1.uuid = vm_inst_obj1.name
self._vnc_lib.virtual_machine_create(vm_inst_obj1)
id_perms = IdPermsType(enable=True)
port_obj1 = VirtualMachineInterface(
str(uuid.uuid4()), vm_inst_obj1, id_perms=id_perms)
port_obj1.uuid = port_obj1.name
port_obj1.set_virtual_network(vn)
ip_obj1.set_virtual_machine_interface(port_obj1)
ip_obj1.set_virtual_network(net_obj)
port_id1 = self._vnc_lib.virtual_machine_interface_create(port_obj1)
print 'Allocating an IP address for first VM'
ip_id1 = self._vnc_lib.instance_ip_create(ip_obj1)
ip_obj1 = self._vnc_lib.instance_ip_read(id=ip_id1)
ip_addr1 = ip_obj1.get_instance_ip_address()
print ' got IP Address for first instance', ip_addr1
net_obj = self._vnc_lib.virtual_network_read(id=vn.uuid)
result = self._vnc_lib.virtual_network_subnet_ip_count(vn, subnet_list)
print 'Expected output: {"ip_count_list": [1, 0]}'
print 'Actual output:', result
net_obj = self._vnc_lib.virtual_network_read(id=vn.uuid)
ip_obj2 = InstanceIp(name=str(uuid.uuid4()))
ip_obj2.uuid = ip_obj2.name
print 'Created Instance IP object 2', ip_obj2.uuid
vm_inst_obj2 = VirtualMachine(str(uuid.uuid4()))
vm_inst_obj2.uuid = vm_inst_obj2.name
self._vnc_lib.virtual_machine_create(vm_inst_obj2)
id_perms = IdPermsType(enable=True)
port_obj2 = VirtualMachineInterface(
str(uuid.uuid4()), vm_inst_obj2, id_perms=id_perms)
port_obj2.uuid = port_obj2.name
port_obj2.set_virtual_network(vn)
ip_obj2.set_virtual_machine_interface(port_obj2)
ip_obj2.set_virtual_network(net_obj)
port_id2 = self._vnc_lib.virtual_machine_interface_create(port_obj2)
print 'Allocating an IP address for Second VM'
ip_id2 = self._vnc_lib.instance_ip_create(ip_obj2)
ip_obj2 = self._vnc_lib.instance_ip_read(id=ip_id2)
ip_addr2 = ip_obj2.get_instance_ip_address()
print ' got IP Address for Second instance', ip_addr2
net_obj = self._vnc_lib.virtual_network_read(id=vn.uuid)
result = self._vnc_lib.virtual_network_subnet_ip_count(vn, subnet_list)
print 'Expected output: {"ip_count_list": [1, 1]}'
print 'Actual output:', result
print result
# cleanup
print 'Cleaning up'
self._vnc_lib.instance_ip_delete(id=ip_id1)
self._vnc_lib.instance_ip_delete(id=ip_id2)
self._vnc_lib.virtual_machine_interface_delete(id=port_obj1.uuid)
self._vnc_lib.virtual_machine_interface_delete(id=port_obj2.uuid)
self._vnc_lib.virtual_machine_delete(id=vm_inst_obj1.uuid)
self._vnc_lib.virtual_machine_delete(id=vm_inst_obj2.uuid)
self._vnc_lib.virtual_network_delete(id=vn.uuid)
self._vnc_lib.network_ipam_delete(id=ipam.uuid)
self._vnc_lib.project_delete(id=project.uuid)
self._vnc_lib.domain_delete(id=domain.uuid)
if __name__ == '__main__':
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
unittest.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.