repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
alien4cloud/alien4cloud-cloudify3-provider | alien4cloud-cloudify3-provider/src/test/resources/outputs/blueprints/openstack/tomcat/device-mapping-scripts/mapping.py | 16 | 7152 | def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
# Check the inputs
mandatories = ['iaas', 'os_mapping', 'volume_instance_id', 'device_key']
for param in mandatories:
if param not in inputs:
raise SystemExit("The parameter '{0}' is missing".format(param))
# Method which actually call the script corresponding to the IaaS and the OS that do the mapping
def do_mapping(current_os, iaas, device_name):
map_script_path = None
ctx.logger.info("inside current os: '{0}'".format(current_os))
command_prefix = None
if 'windows' == current_os:
ctx.logger.info('[MAPPING] windows')
map_script_path = ctx.download_resource("device-mapping-scripts/{0}/mapDevice.ps1".format(iaas))
command_prefix="C:\\Windows\\Sysnative\\WindowsPowerShell\\v1.0\\powershell.exe -executionpolicy bypass -File"
else:
ctx.logger.info("[MAPPING] linux")
map_script_path = ctx.download_resource("device-mapping-scripts/{0}/mapDevice.sh".format(iaas))
env_map = {'DEVICE_NAME' : device_name}
new_script_process = {'env': env_map}
convert_env_value_to_string(new_script_process['env'])
outputs = execute(map_script_path, new_script_process, outputNames=None, command_prefix=command_prefix)
return outputs['last_output']
# Method will do the device mapping if the OS needs a mapping for the device
def map_device_name(iaas, os_mapping, device_name):
new_device_name = None
current_os = platform.system().lower()
ctx.logger.info("current os: '{0}'".format(current_os))
if current_os in os_mapping:
new_device_name = do_mapping(current_os, iaas, device_name)
return new_device_name
# Retrieve requiert parameters
volume_instance_id = inputs['volume_instance_id']
iaas = inputs['iaas'] # correspond to the folder where to find the mapping scripts
os_mapping = inputs['os_mapping'] # values: windows or/and linux. it means that the specified os will need a mapping
device_key = inputs['device_key'] # the attribute name of the volume node which contains the device value
# Retrieve the current device_name from the attributes of the volume node
volume = client.node_instances.get(volume_instance_id)
ctx.logger.debug("[MAPPING] volume: {0}".format(volume))
saved_device_key = "cfy_{0}_saved".format(device_key)
if saved_device_key in volume.runtime_properties:
device_name = volume.runtime_properties[saved_device_key]
elif device_key in volume.runtime_properties:
device_name = volume.runtime_properties[device_key]
else:
ctx.logger.warning("No '{0}' keyname in runtime properties, retrieve the value from the properties of the node '{1}'".format(device_key, volume.node_id))
volume_node = client.nodes.get(volume.deployment_id, volume.node_id)
device_name = volume_node.properties[device_key]
# Do the mapping
mapped_device = map_device_name(iaas, os_mapping, device_name)
# Update the device_name attributes if needed
if mapped_device is not None:
if saved_device_key not in volume.runtime_properties:
volume.runtime_properties[saved_device_key] = device_name
volume.runtime_properties[device_key] = mapped_device
client.node_instances.update(volume_instance_id, None, volume.runtime_properties, volume.version)
ctx.logger.info("[MAPPING] volume: {0} updated".format(volume))
else:
ctx.logger.info("[MAPPING] No mapping for {0}".format(volume_instance_id))
| apache-2.0 |
vwvww/servo | tests/wpt/web-platform-tests/infrastructure/assumptions/tools/ahem-generate-table.py | 58 | 2992 | from __future__ import print_function, unicode_literals
import itertools
import unicodedata
from fontTools.ttLib import TTFont
try:
chr(0x100)
except ValueError:
chr = unichr
def grouper(n, iterable):
"""
>>> list(grouper(3, 'ABCDEFG'))
[['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]
"""
iterable = iter(iterable)
return iter(lambda: list(itertools.islice(iterable, n)), [])
ttf = TTFont("../../../fonts/Ahem.ttf")
chars = {char for table in ttf['cmap'].tables for char in table.cmap.keys()}
# exclude chars that can't be represented as HTML numeric character refs
chars = chars - (set(range(0x80, 0x9F+1)) | {0x00})
chars_sorted = sorted(chars)
per_row = 17
doctype = "<!doctype html>"
title = "<title>Ahem checker</title>"
style_open = """
<style>
* {
padding: 0;
margin: 0;
border: none;
}
td {
width: 34px;
}""".strip()
style_close = "</style>"
style_font_face = """
@font-face {
font-family: Ahem;
src: url("../../fonts/Ahem.ttf");
}""".strip()
style_table_font_specified = """
table {
font: 15px/1 Ahem;
border-collapse: separate;
border-spacing: 1px;
table-layout: fixed;
}""".strip()
style_table_font_unspecified = """
table {
font-size: 15px;
line-height: 1;
border-collapse: separate;
border-spacing: 1px;
table-layout: fixed;
}""".strip()
def build_header(is_test, rel, href):
rv = [doctype, title]
if rel != None and href != None:
rv.append('<link rel="%s" href="%s">' % (rel, href))
rv.append(style_open)
if not is_test:
if rel == None and href == None:
# ahem-notref.html
rv.append(style_table_font_unspecified)
else:
# ahem-ref.html
rv.append(style_font_face)
rv.append(style_table_font_specified)
else:
# ahem.html
rv.append(style_table_font_specified)
rv.append(style_close)
return "\n".join(rv)
def build_table():
rv = ["\n"]
rv.append("<table>\n")
for row in grouper(per_row, chars_sorted):
rv.append(" " * 4 + "<tr>\n")
for codepoint in row:
assert codepoint <= 0xFFFF
try:
name = unicodedata.name(chr(codepoint))
except ValueError:
rv.append(" " * 8 + "<td>&#x%04X;x <!-- U+%04X -->\n" % (codepoint, codepoint))
else:
rv.append(" " * 8 + "<td>&#x%04X;x <!-- U+%04X: %s -->\n" % (codepoint, codepoint, name))
rv.append("</table>\n")
return "".join(rv)
cases = [
# file, is_test, rel
("../ahem.html", True, "match"),
("../ahem-ref.html", False, "mismatch"),
("../ahem-notref.html", False, None),
]
table = build_table()
for index, case in enumerate(cases):
next_index = index + 1
file, is_test, rel = case
href = cases[next_index][0][3:] if next_index < len(cases) else None
header = build_header(is_test, rel, href)
with open(file, "w") as file:
file.write("%s%s" % (header, table))
| mpl-2.0 |
CameronTEllis/brainiak | brainiak/hyperparamopt/hpo.py | 5 | 11683 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hyper Parameter Optimization (HPO)
This implementation is based on the work in [Bergstra2011]_ and
[Bergstra2013]_.
.. [Bergstra2011] "Algorithms for Hyper-Parameter Optimization",
James S. Bergstra and Bardenet, Rémi and Bengio, Yoshua
and Kégl, Balázs. NIPS 2011
.. [Bergstra2013] "Making a Science of Model Search:
Hyperparameter Optimization in Hundreds of Dimensions for
Vision Architectures", James Bergstra, Daniel Yamins, David Cox.
JMLR W&CP 28 (1) : 115–123, 2013
"""
# Authors: Narayanan Sundaram (Intel Labs)
import logging
import math
import numpy as np
from scipy.special import erf
import scipy.stats as st
logger = logging.getLogger(__name__)
__all__ = [
"fmin",
]
def get_sigma(x, min_limit=-np.inf, max_limit=np.inf):
"""Compute the standard deviations around the points for a 1D GMM.
We take the distance from the nearest left and right neighbors
for each point, then use the max as the estimate of standard
deviation for the gaussian mixture around that point.
Arguments
---------
x : 1D array
Set of points to create the GMM
min_limit : Optional[float], default : -inf
Minimum limit for the distribution
max_limit : Optional[float], default : inf
maximum limit for the distribution
Returns
-------
1D array
Array of standard deviations
"""
z = np.append(x, [min_limit, max_limit])
sigma = np.ones(x.shape)
for i in range(x.size):
# Calculate the nearest left neighbor of x[i]
# Find the minimum of (x[i] - k) for k < x[i]
xleft = z[np.argmin([(x[i] - k) if k < x[i] else np.inf for k in z])]
# Calculate the nearest right neighbor of x[i]
# Find the minimum of (k - x[i]) for k > x[i]
xright = z[np.argmin([(k - x[i]) if k > x[i] else np.inf for k in z])]
sigma[i] = max(x[i] - xleft, xright - x[i])
if sigma[i] == np.inf:
sigma[i] = min(x[i] - xleft, xright - x[i])
if (sigma[i] == -np.inf): # should never happen
sigma[i] = 1.0
return sigma
class gmm_1d_distribution:
"""GMM 1D distribution.
Given a set of points, we create this object so that we
can calculate likelihoods and generate samples from this
1D Gaussian mixture model.
Attributes
----------
points : 1D array
Set of points to create the GMM
N : int
Number of points to create the GMM
min_limit : Optional[float], default : -inf
Minimum limit for the distribution
max_limit : Optional[float], default : inf
Maximum limit for the distribution
weights : Optional[1D array], default : array of ones
Used to weight the points non-uniformly if required
"""
def __init__(self, x, min_limit=-np.inf, max_limit=np.inf, weights=1.0):
self.points = x
self.N = x.size
self.min_limit = min_limit
self.max_limit = max_limit
self.sigma = get_sigma(x, min_limit=min_limit, max_limit=max_limit)
self.weights = (2
/ (erf((max_limit - x) / (np.sqrt(2.) * self.sigma))
- erf((min_limit - x) / (np.sqrt(2.) * self.sigma)))
* weights)
self.W_sum = np.sum(self.weights)
def get_gmm_pdf(self, x):
"""Calculate the GMM likelihood for a single point.
.. math::
y = \\sum_{i=1}^{N} w_i
\\times \\text{normpdf}(x, x_i, \\sigma_i)/\\sum_{i=1}^{N} w_i
:label: gmm-likelihood
Arguments
---------
x : float
Point at which likelihood needs to be computed
Returns
-------
float
Likelihood value at x
"""
def my_norm_pdf(xt, mu, sigma):
z = (xt - mu) / sigma
return (math.exp(-0.5 * z * z)
/ (math.sqrt(2. * np.pi) * sigma))
y = 0
if (x < self.min_limit):
return 0
if (x > self.max_limit):
return 0
for _x in range(self.points.size):
y += (my_norm_pdf(x, self.points[_x], self.sigma[_x])
* self.weights[_x]) / self.W_sum
return y
def __call__(self, x):
"""Return the GMM likelihood for given point(s).
See :eq:`gmm-likelihood`.
Arguments
---------
x : scalar (or) 1D array of reals
Point(s) at which likelihood needs to be computed
Returns
-------
scalar (or) 1D array
Likelihood values at the given point(s)
"""
if np.isscalar(x):
return self.get_gmm_pdf(x)
else:
return np.array([self.get_gmm_pdf(t) for t in x])
def get_samples(self, n):
"""Sample the GMM distribution.
Arguments
---------
n : int
Number of samples needed
Returns
-------
1D array
Samples from the distribution
"""
normalized_w = self.weights / np.sum(self.weights)
get_rand_index = st.rv_discrete(values=(range(self.N),
normalized_w)).rvs(size=n)
samples = np.zeros(n)
k = 0
j = 0
while (k < n):
i = get_rand_index[j]
j = j + 1
if (j == n):
get_rand_index = st.rv_discrete(values=(range(self.N),
normalized_w)).rvs(size=n)
j = 0
v = np.random.normal(loc=self.points[i], scale=self.sigma[i])
if (v > self.max_limit or v < self.min_limit):
continue
else:
samples[k] = v
k = k + 1
if (k == n):
break
return samples
def get_next_sample(x, y, min_limit=-np.inf, max_limit=np.inf):
"""Get the next point to try, given the previous samples.
We use [Bergstra2013]_ to compute the point that gives the largest
Expected improvement (EI) in the optimization function. This model fits 2
different GMMs - one for points that have loss values in the bottom 15%
and another for the rest. Then we sample from the former distribution
and estimate EI as the ratio of the likelihoods of the 2 distributions.
We pick the point with the best EI among the samples that is also not
very close to a point we have sampled earlier.
Arguments
---------
x : 1D array
Samples generated from the distribution so far
y : 1D array
Loss values at the corresponding samples
min_limit : float, default : -inf
Minimum limit for the distribution
max_limit : float, default : +inf
Maximum limit for the distribution
Returns
-------
float
Next value to use for HPO
"""
z = np.array(list(zip(x, y)), dtype=np.dtype([('x', float), ('y', float)]))
z = np.sort(z, order='y')
n = y.shape[0]
g = int(np.round(np.ceil(0.15 * n)))
ldata = z[0:g]
gdata = z[g:n]
lymin = ldata['y'].min()
lymax = ldata['y'].max()
weights = (lymax - ldata['y']) / (lymax - lymin)
lx = gmm_1d_distribution(ldata['x'], min_limit=min_limit,
max_limit=max_limit, weights=weights)
gx = gmm_1d_distribution(gdata['x'], min_limit=min_limit,
max_limit=max_limit)
samples = lx.get_samples(n=1000)
ei = lx(samples) / gx(samples)
h = (x.max() - x.min()) / (10 * x.size)
# TODO
# assumes prior of x is uniform; should ideally change for other priors
# d = np.abs(x - samples[ei.argmax()]).min()
# CDF(x+d/2) - CDF(x-d/2) < 1/(10*x.size) then reject else accept
s = 0
while (np.abs(x - samples[ei.argmax()]).min() < h):
ei[ei.argmax()] = 0
s = s + 1
if (s == samples.size):
break
xnext = samples[ei.argmax()]
return xnext
def fmin(loss_fn,
space,
max_evals,
trials,
init_random_evals=30,
explore_prob=0.2):
"""Find the minimum of function through hyper parameter optimization.
Arguments
---------
loss_fn : ``function(*args) -> float``
Function that takes in a dictionary and returns a real value.
This is the function to be minimized.
space : dictionary
Custom dictionary specifying the range and distribution of
the hyperparamters.
E.g. ``space = {'x': {'dist':scipy.stats.uniform(0,1),
'lo':0, 'hi':1}}``
for a 1-dimensional space with variable x in range [0,1]
max_evals : int
Maximum number of evaluations of loss_fn allowed
trials : list
Holds the output of the optimization trials.
Need not be empty to begin with, new trials are appended
at the end.
init_random_evals : Optional[int], default 30
Number of random trials to initialize the
optimization.
explore_prob : Optional[float], default 0.2
Controls the exploration-vs-exploitation ratio. Value should
be in [0,1]. By default, 20% of trails are random samples.
Returns
-------
trial entry (dictionary of hyperparameters)
Best hyperparameter setting found.
E.g. {'x': 5.6, 'loss' : 0.5} where x is the best hyparameter
value found and loss is the value of the function for the
best hyperparameter value(s).
Raises
------
ValueError
If the distribution specified in space does not support a ``rvs()``
method to generate random numbers, a ValueError is raised.
"""
for s in space:
if not hasattr(space[s]['dist'], 'rvs'):
raise ValueError('Unknown distribution type for variable')
if 'lo' not in space[s]:
space[s]['lo'] = -np.inf
if 'hi' not in space[s]:
space[s]['hi'] = np.inf
if len(trials) > init_random_evals:
init_random_evals = 0
for t in range(max_evals):
sdict = {}
if t >= init_random_evals and np.random.random() > explore_prob:
use_random_sampling = False
else:
use_random_sampling = True
yarray = np.array([tr['loss'] for tr in trials])
for s in space:
sarray = np.array([tr[s] for tr in trials])
if use_random_sampling:
sdict[s] = space[s]['dist'].rvs()
else:
sdict[s] = get_next_sample(sarray, yarray,
min_limit=space[s]['lo'],
max_limit=space[s]['hi'])
logger.debug('Explore' if use_random_sampling else 'Exploit')
logger.info('Next point ', t, ' = ', sdict)
y = loss_fn(sdict)
sdict['loss'] = y
trials.append(sdict)
yarray = np.array([tr['loss'] for tr in trials])
yargmin = yarray.argmin()
logger.info('Best point so far = ', trials[yargmin])
return trials[yargmin]
| apache-2.0 |
technoapurva/css | spkm.py | 2 | 4158 | #!/usr/bin/env python
"""
@package css
@file css/spkm.py
@author Edward Hunter
@brief Spherical k-means core algorithm and support routines.
"""
# Copyright and licence.
"""
Copyright (C) 2014 Edward Hunter
edward.a.hunter@gmail.com
840 24th Street
San Diego, CA 92102
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from common import *
def count_cluster_sizes(no_clusters, no_docs, r):
cluster_sizes = [0 for x in range(no_clusters)]
for j in range(no_clusters):
for i in range(no_docs):
if r[i] == j:
cluster_sizes[j] += 1
return cluster_sizes
def initialize_clusters(x, cluster_ids, doc_ids):
no_docs = len(doc_ids)
no_clusters = len(cluster_ids)
# Initialize shuffled docs ids.
shuffled_docs = np.array(doc_ids)
np.random.shuffle(shuffled_docs)
# Initialize new assigments.
r_new = np.zeros(no_docs, np.int32)
# Initialize the centroids.
mu = np.copy(x)[shuffled_docs[:no_clusters],:]
return shuffled_docs, r_new, mu
def spkmeans(x, no_clusters, verbose=False, **kwargs):
if not isinstance(x, np.ndarray):
x = x.toarray()
# Get doc and cluster ids.
no_docs = x.shape[0]
cluster_ids = range(no_clusters)
doc_ids = range(no_docs)
on_empty = kwargs.get('on_empty','restart')
# Initialize the clusters
shuffled_docs, r_new, mu = initialize_clusters(x, cluster_ids, doc_ids)
# Initialize count and similary array.
count = 0
similarity = []
# Spherical k means loop.
while True:
# Iteration start time.
startime = time.time()
# Update assignments.
# Copy the old assignments.
r = np.copy(r_new)
# Compute the new assignments.
products = np.dot(x,mu.T)
r_new = np.argmax(products,axis=1)
# Collect and sort the new scores.
scores = np.array([products[i, r_new[i]] for i in doc_ids])
scores_idx = np.argsort(scores)
# Fix empty clusters here.
empty = [i for i in cluster_ids if i not in r_new]
if empty:
print 'Iteration %i: empty clusters: %s' % (count, str(empty))
if on_empty == 'restart':
print 'Reinitializing algorithm.'
shuffled_docs, r_new, mu = initialize_clusters(x, cluster_ids,
doc_ids)
continue
else:
print 'Reassinging remote data to empty clusters.'
for i,j in enumerate(empty):
r_new[scores_idx[-(i+1)]] = j
empty = [i for i in cluster_ids if i not in r_new]
if verbose:
print 'Adjusted empty clusers: ' + str(empty)
newsim = 0
for i in range(no_docs):
newsim += products[i,r_new[i]]
similarity.append(newsim)
# Exit if assigments do not change.
if np.all(r == r_new):
sizes = count_cluster_sizes(no_clusters, no_docs, r)
return mu, r, similarity, sizes
# Update centroids.
mu = np.zeros_like(mu)
for i in range(no_docs):
mu[r_new[i],:] += x[i,:]
for j in range(no_clusters):
mu_norm = np.linalg.norm(mu[j,:])
if mu_norm == 0.0:
print 'WARNING: Cluster %i empty!' % j
else:
mu[j,:] = mu[j,:]/mu_norm
delta = time.time() - startime
if verbose:
print 'Iteration %i: %.2f seconds.' % (count, delta)
count += 1
| gpl-3.0 |
philipgian/pre-commit | tests/make_archives_test.py | 1 | 1979 | from __future__ import absolute_import
from __future__ import unicode_literals
import os.path
import tarfile
import mock
import pytest
from pre_commit import make_archives
from pre_commit.util import cmd_output
from pre_commit.util import cwd
from testing.fixtures import git_dir
from testing.util import get_head_sha
from testing.util import skipif_slowtests_false
def test_make_archive(tempdir_factory):
output_dir = tempdir_factory.get()
git_path = git_dir(tempdir_factory)
# Add a files to the git directory
with cwd(git_path):
cmd_output('touch', 'foo')
cmd_output('git', 'add', '.')
cmd_output('git', 'commit', '-m', 'foo')
# We'll use this sha
head_sha = get_head_sha('.')
# And check that this file doesn't exist
cmd_output('touch', 'bar')
cmd_output('git', 'add', '.')
cmd_output('git', 'commit', '-m', 'bar')
# Do the thing
archive_path = make_archives.make_archive(
'foo', git_path, head_sha, output_dir,
)
assert archive_path == os.path.join(output_dir, 'foo.tar.gz')
assert os.path.exists(archive_path)
extract_dir = tempdir_factory.get()
# Extract the tar
with tarfile.open(archive_path) as tf:
tf.extractall(extract_dir)
# Verify the contents of the tar
assert os.path.exists(os.path.join(extract_dir, 'foo'))
assert os.path.exists(os.path.join(extract_dir, 'foo', 'foo'))
assert not os.path.exists(os.path.join(extract_dir, 'foo', '.git'))
assert not os.path.exists(os.path.join(extract_dir, 'foo', 'bar'))
@skipif_slowtests_false
@pytest.mark.integration
def test_main(tempdir_factory):
path = tempdir_factory.get()
# Don't actually want to make these in the current repo
with mock.patch.object(make_archives, 'RESOURCES_DIR', path):
make_archives.main()
for archive, _, _ in make_archives.REPOS:
assert os.path.exists(os.path.join(path, archive + '.tar.gz'))
| mit |
svagionitis/youtube-dl | youtube_dl/extractor/gameone.py | 6 | 3870 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
xpath_with_ns,
parse_iso8601
)
NAMESPACE_MAP = {
'media': 'http://search.yahoo.com/mrss/',
}
# URL prefix to download the mp4 files directly instead of streaming via rtmp
# Credits go to XBox-Maniac
# http://board.jdownloader.org/showpost.php?p=185835&postcount=31
RAW_MP4_URL = 'http://cdn.riptide-mtvn.com/'
class GameOneIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gameone\.de/tv/(?P<id>\d+)'
_TEST = {
'url': 'http://www.gameone.de/tv/288',
'md5': '136656b7fb4c9cb4a8e2d500651c499b',
'info_dict': {
'id': '288',
'ext': 'mp4',
'title': 'Game One - Folge 288',
'duration': 1238,
'thumbnail': 'http://s3.gameone.de/gameone/assets/video_metas/teaser_images/000/643/636/big/640x360.jpg',
'description': 'FIFA-Pressepokal 2014, Star Citizen, Kingdom Come: Deliverance, Project Cars, Schöner Trants Nerdquiz Folge 2 Runde 1',
'age_limit': 16,
'upload_date': '20140513',
'timestamp': 1399980122,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
og_video = self._og_search_video_url(webpage, secure=False)
description = self._html_search_meta('description', webpage)
age_limit = int(
self._search_regex(
r'age=(\d+)',
self._html_search_meta(
'age-de-meta-label',
webpage),
'age_limit',
'0'))
mrss_url = self._search_regex(r'mrss=([^&]+)', og_video, 'mrss')
mrss = self._download_xml(mrss_url, video_id, 'Downloading mrss')
title = mrss.find('.//item/title').text
thumbnail = mrss.find('.//item/image').get('url')
timestamp = parse_iso8601(mrss.find('.//pubDate').text, delimiter=' ')
content = mrss.find(xpath_with_ns('.//media:content', NAMESPACE_MAP))
content_url = content.get('url')
content = self._download_xml(
content_url,
video_id,
'Downloading media:content')
rendition_items = content.findall('.//rendition')
duration = int(rendition_items[0].get('duration'))
formats = [
{
'url': re.sub(r'.*/(r2)', RAW_MP4_URL + r'\1', r.find('./src').text),
'width': int(r.get('width')),
'height': int(r.get('height')),
'tbr': int(r.get('bitrate')),
}
for r in rendition_items
]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
'description': description,
'age_limit': age_limit,
'timestamp': timestamp,
}
class GameOnePlaylistIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gameone\.de(?:/tv)?/?$'
IE_NAME = 'gameone:playlist'
_TEST = {
'url': 'http://www.gameone.de/tv',
'info_dict': {
'title': 'GameOne',
},
'playlist_mincount': 294,
}
def _real_extract(self, url):
webpage = self._download_webpage('http://www.gameone.de/tv', 'TV')
max_id = max(map(int, re.findall(r'<a href="/tv/(\d+)"', webpage)))
entries = [
self.url_result('http://www.gameone.de/tv/%d' % video_id, 'GameOne')
for video_id in range(max_id, 0, -1)]
return {
'_type': 'playlist',
'title': 'GameOne',
'entries': entries,
}
| unlicense |
Ledoux/ShareYourSystem | Pythonlogy/ShareYourSystem/Standards/Tutorials/_Drafts/Distance/__init__.py | 1 | 4579 | #<ImportSpecificModules>
import ShareYourSystem as SYS
import numpy as np
import scipy.stats
from tables import *
import time
import operator
import os
#</ImportSpecificModules>
#<DefineLocals>
#</DefineLocals>
#<DefineClass>
class DistanceClass(SYS.ObjectsClass):
#<DefineHookMethods>
def initAfter(self):
#<DefineSpecificDo>
self.IntsList=[1,4,3]
self.PowerFloat=0.5
self.SquaredIntsList=[1,16,3]
self.UnitsInt=3
self.DistanceFloat=np.sqrt(sum(self.SquaredIntsList))
#</DefineSpecificDo>
#Definition the features
self['App_Model_ParameterizingDict']={
'ColumningTuplesList':
[
#ColumnStr #Col
('PowerFloat', Float64Col()),
('IntsList', (Int64Col,'UnitsInt'))
],
'IsFeaturingBool':True,
'ScanningTuplesList':
[
('IntsList',[[1,2,3],[4,5]])
]
}
#Definition the outputs
self['App_Model_ResultingDict']={
'ColumningTuplesList':
[
#ColumnStr #Col
('SquaredIntsList', (Int64Col,'UnitsInt')),
('DistanceFloat', Float64Col()),
('IntsList', (Int64Col,'UnitsInt'))
],
'JoiningTuple':("","Parameter")
}
def outputAfter(self,**_LocalOutputingVariablesDict):
#set the SquaredIntsList
self.SquaredIntsList=map(lambda __Int:__Int**2,self.IntsList)
#set the DistanceFloat
self.DistanceFloat=np.power(sum(self.SquaredIntsList),self.PowerFloat)
#</DefineHookMethods>
#</DefineTriggeringHookMethods>
def bindIntsListAfter(self):
#Bind with UnitsInt setting
self.UnitsInt=len(self.IntsList)
#</DefineTriggeringHookMethods>
#</DefineClass>
#<DefineAttestingFunctions>
def attest_insert():
#Insert the default output
Distance=SYS.DistanceClass(
).update(
[
('IntsList',[4,5]),
('PowerFloat',0.5)
]
).insert('Result'
).update(
[
('IntsList',[4,5]),
('PowerFloat',1.)
]
).insert(
).update(
[
('IntsList',[4,5]),
('PowerFloat',2.)
]
).insert(
).update(
[
('IntsList',[1,2,3]),
('PowerFloat',0.5)
]
).insert(
).update(
[
('IntsList',[4,6]),
('PowerFloat',1.)
]
).insert(
).update(
[
('IntsList',[1,2,3]),
('PowerFloat',1.)
]
).insert(
).update(
[
('IntsList',[0,1]),
('PowerFloat',0.5)
]
).insert(
).hdfclose()
#Return the object and the h5py
return "\n\n\n\n"+SYS.represent(
Distance
)+'\n\n\n'+SYS.represent(
os.popen('/usr/local/bin/h5ls -dlr '+Distance.HdformatingPathStr).read()
)
def attest_retrieve():
Distance=SYS.DistanceClass(
).__setitem__('/App_Model_ResultingDict/RetrievingIndexesList',(0,2)
).retrieve('Result'
).hdfclose()
#Return the object and the h5py
return "\n\n\n\n"+SYS.represent(
Distance
)
def attest_find():
Distance=SYS.DistanceClass(
).update(
[
('/App_Model_ParameterizingDict/FindingTuplesList',[
('IntsList',(SYS.getIsEqualBool,[4,5])),
]),
('/App_Model_ResultingDict/update',
[
('MergingTuplesList',
[
('UnitsInt',(operator.eq,2))
]
),
('FindingTuplesList',[
('DistanceFloat',(operator.gt,30.)),
#('__IntsList',(SYS.getIsEqualBool,[4,5])),
])
]
)
]
).find('Result'
).hdfclose()
#Return the object and the h5py
return "\n\n\n\n"+SYS.represent(
Distance
)
def attest_recover():
Distance=SYS.DistanceClass(
).update(
[
('/App_Model_ParameterizingDict/FindingTuplesList',[
('IntsList',(SYS.getIsEqualBool,[4,5])),
('PowerFloat',(SYS.getIsEqualBool,1.))
]),
('/App_Model_ResultingDict/update',
[
('MergingTuplesList',
[
('UnitsInt',(operator.eq,2))
]
),
('FindingTuplesList',[
('DistanceFloat',(operator.gt,30.)),
#('__IntsList',(SYS.getIsEqualBool,[4,5])),
])
]
)
]
).recover('Result'
).hdfclose()
#Return the object and the h5py
return "\n\n\n\n"+SYS.represent(
Distance
)
def attest_scan():
#Scan
Distance=SYS.DistanceClass(
).scan('Result'
).hdfclose()
#Return the object and the h5py
return "\n\n\n\n"+SYS.represent(
Distance
)+'\n\n\n'+SYS.represent(
os.popen('/usr/local/bin/h5ls -dlr '+Distance.HdformatingPathStr).read()
)
#</DefineAttestingFunctions>
| mit |
slimta/python-slimta | slimta/util/__init__.py | 1 | 4971 | # Copyright (c) 2016 Ian C. Good
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""Package containing a variety of useful modules utilities that didn't really
belong anywhere else.
"""
from __future__ import absolute_import
from gevent import socket
__all__ = ['build_ipv4_socket_creator', 'create_connection_ipv4',
'create_listeners']
def build_ipv4_socket_creator(only_ports=None):
"""Returns a function that will act like
:py:func:`socket.create_connection` but only using IPv4 addresses. This
function can be used as the ``socket_creator`` argument to some classes
like :class:`~slimta.relay.smtp.mx.MxSmtpRelay`.
:param only_ports: If given, can be a list to limit which ports are
restricted to IPv4. Connections to all other ports may
be IPv6.
"""
def socket_creator(*args, **kwargs):
return create_connection_ipv4(*args, only_ports=only_ports, **kwargs)
return socket_creator
def create_connection_ipv4(address, timeout=None, source_address=None,
only_ports=None):
"""Attempts to mimick to :py:func:`socket.create_connection`, but
connections are only made to IPv4 addresses.
:param only_ports: If given, can be a list to limit which ports are
restricted to IPv4. Connections to all other ports may
be IPv6.
"""
host, port = address
if only_ports and port not in only_ports:
return socket.create_connection(address, timeout, source_address)
last_exc = None
for res in socket.getaddrinfo(host, port, socket.AF_INET):
_, _, _, _, sockaddr = res
try:
return socket.create_connection(sockaddr, timeout, source_address)
except socket.error as exc:
last_exc = exc
if last_exc is not None:
raise last_exc
else:
raise socket.error('getaddrinfo returns an empty list')
def create_listeners(address,
family=socket.AF_UNSPEC,
socktype=socket.SOCK_STREAM,
proto=socket.IPPROTO_IP):
"""Uses :func:`socket.getaddrinfo` to create listening sockets for
available socket parameters. For example, giving *address* as
``('localhost', 80)`` on a system with IPv6 would return one socket bound
to ``127.0.0.1`` and one bound to ``::1`.
May also be used for ``socket.AF_UNIX`` with a file path to produce a
single unix domain socket listening on that path.
:param address: A ``(host, port)`` tuple to listen on.
:param family: the socket family, default ``AF_UNSPEC``.
:param socktype: the socket type, default ``SOCK_STREAM``.
:param proto: the socket protocol, default ``IPPROTO_IP``.
"""
if family == socket.AF_UNIX:
sock = socket.socket(family, socktype, proto)
_init_socket(sock, address)
return [sock]
elif not isinstance(address, tuple) or len(address) != 2:
raise ValueError(address)
flags = socket.AI_PASSIVE
host, port = address
listeners = []
last_exc = None
for res in socket.getaddrinfo(host, port, family, socktype, proto, flags):
fam, typ, prt, _, sockaddr = res
try:
sock = socket.socket(fam, typ, prt)
_init_socket(sock, sockaddr)
except socket.error as exc:
last_exc = exc
else:
listeners.append(sock)
if last_exc and not listeners:
raise last_exc
return listeners
def _init_socket(sock, sockaddr):
try:
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
except socket.error:
pass
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error:
pass
sock.setblocking(0)
sock.bind(sockaddr)
if sock.type != socket.SOCK_DGRAM:
sock.listen(socket.SOMAXCONN)
# vim:et:fdm=marker:sts=4:sw=4:ts=4
| mit |
XiaosongWei/crosswalk-test-suite | wrt/wrt-manifest2-android-tests/manifest2/name_null_value.py | 4 | 2252 | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Hongjuan, Wang<hongjuanx.wang@intel.com>
import unittest
import os
import commands
import comm
class TestManifestFunctions(unittest.TestCase):
def test_name_nullvalue(self):
comm.setUp()
manifestPath = comm.ConstPath + \
"/../testapp/manifest_name_nullvalue/manifest.json"
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE, manifestPath)
packInfo = commands.getstatusoutput(cmd)
self.assertNotEquals(0, packInfo[0])
errorInfo = "There is a parser error in manifest.json file"
self.assertIn(errorInfo, packInfo[1])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
Azure/azure-sdk-for-python | sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/datab/aio/_configuration.py | 1 | 3204 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
VERSION = "unknown"
class DataBoxEdgeManagementClientConfiguration(Configuration):
"""Configuration for DataBoxEdgeManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The subscription ID.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(DataBoxEdgeManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2020-12-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-databoxedge/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| mit |
jeromecc/doctoctocbot | src/crowdfunding/migrations/0013_tiers.py | 1 | 1118 | # Generated by Django 2.0.13 on 2019-02-25 05:21
from decimal import Decimal
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('crowdfunding', '0012_auto_20190224_0523'),
]
operations = [
migrations.CreateModel(
name='Tiers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.CharField(max_length=191)),
('description', models.CharField(max_length=191)),
('emoji', models.CharField(blank=True, max_length=4)),
('image', models.ImageField(blank=True, upload_to='')),
('min', models.DecimalField(decimal_places=2, default=Decimal('0'), max_digits=12)),
('max', models.DecimalField(decimal_places=2, default=Decimal('Infinity'), max_digits=12)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='crowdfunding.Project')),
],
),
]
| mpl-2.0 |
zack3241/incubator-airflow | airflow/migrations/versions/1507a7289a2f_create_is_encrypted.py | 46 | 1959 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""create is_encrypted
Revision ID: 1507a7289a2f
Revises: e3a246e0dc1
Create Date: 2015-08-18 18:57:51.927315
"""
# revision identifiers, used by Alembic.
revision = '1507a7289a2f'
down_revision = 'e3a246e0dc1'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.engine.reflection import Inspector
connectionhelper = sa.Table(
'connection',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('is_encrypted')
)
def upgrade():
# first check if the user already has this done. This should only be
# true for users who are upgrading from a previous version of Airflow
# that predates Alembic integration
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
# this will only be true if 'connection' already exists in the db,
# but not if alembic created it in a previous migration
if 'connection' in inspector.get_table_names():
col_names = [c['name'] for c in inspector.get_columns('connection')]
if 'is_encrypted' in col_names:
return
op.add_column(
'connection',
sa.Column('is_encrypted', sa.Boolean, unique=False, default=False))
conn = op.get_bind()
conn.execute(
connectionhelper.update().values(is_encrypted=False)
)
def downgrade():
op.drop_column('connection', 'is_encrypted')
| apache-2.0 |
yanni4night/ursa-django | app/settings.py | 1 | 2208 | """
Django settings for ursa-django project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'dev')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'uq==k2a4+j^3i3)wns^+3%9)ww+eysjo0)-sg(hu5q$6=uqg^+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'app.urls'
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'zh-cn'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = False
USE_L10N = False
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = [os.path.join(PROJECT_ROOT, 'templates')] | mit |
bodi000/odoo | addons/portal_project/tests/__init__.py | 170 | 1124 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_access_rights
checks = [
test_access_rights,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kho0810/likelion_Web | lib/jinja2/testsuite/core_tags.py | 412 | 11858 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.core_tags
~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the core tags like for and if.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment, TemplateSyntaxError, UndefinedError, \
DictLoader
env = Environment()
class ForLoopTestCase(JinjaTestCase):
def test_simple(self):
tmpl = env.from_string('{% for item in seq %}{{ item }}{% endfor %}')
assert tmpl.render(seq=list(range(10))) == '0123456789'
def test_else(self):
tmpl = env.from_string('{% for item in seq %}XXX{% else %}...{% endfor %}')
assert tmpl.render() == '...'
def test_empty_blocks(self):
tmpl = env.from_string('<{% for item in seq %}{% else %}{% endfor %}>')
assert tmpl.render() == '<>'
def test_context_vars(self):
tmpl = env.from_string('''{% for item in seq -%}
{{ loop.index }}|{{ loop.index0 }}|{{ loop.revindex }}|{{
loop.revindex0 }}|{{ loop.first }}|{{ loop.last }}|{{
loop.length }}###{% endfor %}''')
one, two, _ = tmpl.render(seq=[0, 1]).split('###')
(one_index, one_index0, one_revindex, one_revindex0, one_first,
one_last, one_length) = one.split('|')
(two_index, two_index0, two_revindex, two_revindex0, two_first,
two_last, two_length) = two.split('|')
assert int(one_index) == 1 and int(two_index) == 2
assert int(one_index0) == 0 and int(two_index0) == 1
assert int(one_revindex) == 2 and int(two_revindex) == 1
assert int(one_revindex0) == 1 and int(two_revindex0) == 0
assert one_first == 'True' and two_first == 'False'
assert one_last == 'False' and two_last == 'True'
assert one_length == two_length == '2'
def test_cycling(self):
tmpl = env.from_string('''{% for item in seq %}{{
loop.cycle('<1>', '<2>') }}{% endfor %}{%
for item in seq %}{{ loop.cycle(*through) }}{% endfor %}''')
output = tmpl.render(seq=list(range(4)), through=('<1>', '<2>'))
assert output == '<1><2>' * 4
def test_scope(self):
tmpl = env.from_string('{% for item in seq %}{% endfor %}{{ item }}')
output = tmpl.render(seq=list(range(10)))
assert not output
def test_varlen(self):
def inner():
for item in range(5):
yield item
tmpl = env.from_string('{% for item in iter %}{{ item }}{% endfor %}')
output = tmpl.render(iter=inner())
assert output == '01234'
def test_noniter(self):
tmpl = env.from_string('{% for item in none %}...{% endfor %}')
self.assert_raises(TypeError, tmpl.render)
def test_recursive(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
assert tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]) == '[1<[1][2]>][2<[1][2]>][3<[a]>]'
def test_recursive_depth0(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ loop.depth0 }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
self.assertEqual(tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]), '[0:1<[1:1][1:2]>][0:2<[1:1][1:2]>][0:3<[1:a]>]')
def test_recursive_depth(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ loop.depth }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
self.assertEqual(tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]), '[1:1<[2:1][2:2]>][1:2<[2:1][2:2]>][1:3<[2:a]>]')
def test_looploop(self):
tmpl = env.from_string('''{% for row in table %}
{%- set rowloop = loop -%}
{% for cell in row -%}
[{{ rowloop.index }}|{{ loop.index }}]
{%- endfor %}
{%- endfor %}''')
assert tmpl.render(table=['ab', 'cd']) == '[1|1][1|2][2|1][2|2]'
def test_reversed_bug(self):
tmpl = env.from_string('{% for i in items %}{{ i }}'
'{% if not loop.last %}'
',{% endif %}{% endfor %}')
assert tmpl.render(items=reversed([3, 2, 1])) == '1,2,3'
def test_loop_errors(self):
tmpl = env.from_string('''{% for item in [1] if loop.index
== 0 %}...{% endfor %}''')
self.assert_raises(UndefinedError, tmpl.render)
tmpl = env.from_string('''{% for item in [] %}...{% else
%}{{ loop }}{% endfor %}''')
assert tmpl.render() == ''
def test_loop_filter(self):
tmpl = env.from_string('{% for item in range(10) if item '
'is even %}[{{ item }}]{% endfor %}')
assert tmpl.render() == '[0][2][4][6][8]'
tmpl = env.from_string('''
{%- for item in range(10) if item is even %}[{{
loop.index }}:{{ item }}]{% endfor %}''')
assert tmpl.render() == '[1:0][2:2][3:4][4:6][5:8]'
def test_loop_unassignable(self):
self.assert_raises(TemplateSyntaxError, env.from_string,
'{% for loop in seq %}...{% endfor %}')
def test_scoped_special_var(self):
t = env.from_string('{% for s in seq %}[{{ loop.first }}{% for c in s %}'
'|{{ loop.first }}{% endfor %}]{% endfor %}')
assert t.render(seq=('ab', 'cd')) == '[True|True|False][False|True|False]'
def test_scoped_loop_var(self):
t = env.from_string('{% for x in seq %}{{ loop.first }}'
'{% for y in seq %}{% endfor %}{% endfor %}')
assert t.render(seq='ab') == 'TrueFalse'
t = env.from_string('{% for x in seq %}{% for y in seq %}'
'{{ loop.first }}{% endfor %}{% endfor %}')
assert t.render(seq='ab') == 'TrueFalseTrueFalse'
def test_recursive_empty_loop_iter(self):
t = env.from_string('''
{%- for item in foo recursive -%}{%- endfor -%}
''')
assert t.render(dict(foo=[])) == ''
def test_call_in_loop(self):
t = env.from_string('''
{%- macro do_something() -%}
[{{ caller() }}]
{%- endmacro %}
{%- for i in [1, 2, 3] %}
{%- call do_something() -%}
{{ i }}
{%- endcall %}
{%- endfor -%}
''')
assert t.render() == '[1][2][3]'
def test_scoping_bug(self):
t = env.from_string('''
{%- for item in foo %}...{{ item }}...{% endfor %}
{%- macro item(a) %}...{{ a }}...{% endmacro %}
{{- item(2) -}}
''')
assert t.render(foo=(1,)) == '...1......2...'
def test_unpacking(self):
tmpl = env.from_string('{% for a, b, c in [[1, 2, 3]] %}'
'{{ a }}|{{ b }}|{{ c }}{% endfor %}')
assert tmpl.render() == '1|2|3'
class IfConditionTestCase(JinjaTestCase):
def test_simple(self):
tmpl = env.from_string('''{% if true %}...{% endif %}''')
assert tmpl.render() == '...'
def test_elif(self):
tmpl = env.from_string('''{% if false %}XXX{% elif true
%}...{% else %}XXX{% endif %}''')
assert tmpl.render() == '...'
def test_else(self):
tmpl = env.from_string('{% if false %}XXX{% else %}...{% endif %}')
assert tmpl.render() == '...'
def test_empty(self):
tmpl = env.from_string('[{% if true %}{% else %}{% endif %}]')
assert tmpl.render() == '[]'
def test_complete(self):
tmpl = env.from_string('{% if a %}A{% elif b %}B{% elif c == d %}'
'C{% else %}D{% endif %}')
assert tmpl.render(a=0, b=False, c=42, d=42.0) == 'C'
def test_no_scope(self):
tmpl = env.from_string('{% if a %}{% set foo = 1 %}{% endif %}{{ foo }}')
assert tmpl.render(a=True) == '1'
tmpl = env.from_string('{% if true %}{% set foo = 1 %}{% endif %}{{ foo }}')
assert tmpl.render() == '1'
class MacrosTestCase(JinjaTestCase):
env = Environment(trim_blocks=True)
def test_simple(self):
tmpl = self.env.from_string('''\
{% macro say_hello(name) %}Hello {{ name }}!{% endmacro %}
{{ say_hello('Peter') }}''')
assert tmpl.render() == 'Hello Peter!'
def test_scoping(self):
tmpl = self.env.from_string('''\
{% macro level1(data1) %}
{% macro level2(data2) %}{{ data1 }}|{{ data2 }}{% endmacro %}
{{ level2('bar') }}{% endmacro %}
{{ level1('foo') }}''')
assert tmpl.render() == 'foo|bar'
def test_arguments(self):
tmpl = self.env.from_string('''\
{% macro m(a, b, c='c', d='d') %}{{ a }}|{{ b }}|{{ c }}|{{ d }}{% endmacro %}
{{ m() }}|{{ m('a') }}|{{ m('a', 'b') }}|{{ m(1, 2, 3) }}''')
assert tmpl.render() == '||c|d|a||c|d|a|b|c|d|1|2|3|d'
def test_varargs(self):
tmpl = self.env.from_string('''\
{% macro test() %}{{ varargs|join('|') }}{% endmacro %}\
{{ test(1, 2, 3) }}''')
assert tmpl.render() == '1|2|3'
def test_simple_call(self):
tmpl = self.env.from_string('''\
{% macro test() %}[[{{ caller() }}]]{% endmacro %}\
{% call test() %}data{% endcall %}''')
assert tmpl.render() == '[[data]]'
def test_complex_call(self):
tmpl = self.env.from_string('''\
{% macro test() %}[[{{ caller('data') }}]]{% endmacro %}\
{% call(data) test() %}{{ data }}{% endcall %}''')
assert tmpl.render() == '[[data]]'
def test_caller_undefined(self):
tmpl = self.env.from_string('''\
{% set caller = 42 %}\
{% macro test() %}{{ caller is not defined }}{% endmacro %}\
{{ test() }}''')
assert tmpl.render() == 'True'
def test_include(self):
self.env = Environment(loader=DictLoader({'include':
'{% macro test(foo) %}[{{ foo }}]{% endmacro %}'}))
tmpl = self.env.from_string('{% from "include" import test %}{{ test("foo") }}')
assert tmpl.render() == '[foo]'
def test_macro_api(self):
tmpl = self.env.from_string('{% macro foo(a, b) %}{% endmacro %}'
'{% macro bar() %}{{ varargs }}{{ kwargs }}{% endmacro %}'
'{% macro baz() %}{{ caller() }}{% endmacro %}')
assert tmpl.module.foo.arguments == ('a', 'b')
assert tmpl.module.foo.defaults == ()
assert tmpl.module.foo.name == 'foo'
assert not tmpl.module.foo.caller
assert not tmpl.module.foo.catch_kwargs
assert not tmpl.module.foo.catch_varargs
assert tmpl.module.bar.arguments == ()
assert tmpl.module.bar.defaults == ()
assert not tmpl.module.bar.caller
assert tmpl.module.bar.catch_kwargs
assert tmpl.module.bar.catch_varargs
assert tmpl.module.baz.caller
def test_callself(self):
tmpl = self.env.from_string('{% macro foo(x) %}{{ x }}{% if x > 1 %}|'
'{{ foo(x - 1) }}{% endif %}{% endmacro %}'
'{{ foo(5) }}')
assert tmpl.render() == '5|4|3|2|1'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ForLoopTestCase))
suite.addTest(unittest.makeSuite(IfConditionTestCase))
suite.addTest(unittest.makeSuite(MacrosTestCase))
return suite
| apache-2.0 |
adviti/melange | thirdparty/google_appengine/lib/django_1_2/tests/modeltests/model_inheritance_same_model_name/tests.py | 54 | 1510 | from django.test import TestCase
from modeltests.model_inheritance.models import Title
class InheritanceSameModelNameTests(TestCase):
def setUp(self):
# The Title model has distinct accessors for both
# model_inheritance.Copy and model_inheritance_same_model_name.Copy
# models.
self.title = Title.objects.create(title='Lorem Ipsum')
def test_inheritance_related_name(self):
from modeltests.model_inheritance.models import Copy
self.assertEquals(
self.title.attached_model_inheritance_copy_set.create(
content='Save $ on V1agr@',
url='http://v1agra.com/',
title='V1agra is spam',
), Copy.objects.get(content='Save $ on V1agr@'))
def test_inheritance_with_same_model_name(self):
from modeltests.model_inheritance_same_model_name.models import Copy
self.assertEquals(
self.title.attached_model_inheritance_same_model_name_copy_set.create(
content='The Web framework for perfectionists with deadlines.',
url='http://www.djangoproject.com/',
title='Django Rocks'
), Copy.objects.get(content='The Web framework for perfectionists with deadlines.'))
def test_related_name_attribute_exists(self):
# The Post model doesn't have an attribute called 'attached_%(app_label)s_%(class)s_set'.
self.assertEqual(hasattr(self.title, 'attached_%(app_label)s_%(class)s_set'), False)
| apache-2.0 |
luzpaz/QGIS | tests/src/python/test_qgslinesegment.py | 45 | 5434 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLineSegment2D.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '13/04/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsPointXY, QgsLineSegment2D)
from qgis.testing import start_app, unittest
start_app()
class TestQgsLineSegment2D(unittest.TestCase):
def testConstruct(self):
segment = QgsLineSegment2D(QgsPointXY(1, 2), QgsPointXY(3, 4))
self.assertEqual(segment.start(), QgsPointXY(1, 2))
self.assertEqual(segment.end(), QgsPointXY(3, 4))
segment = QgsLineSegment2D(1, 2, 3, 4)
self.assertEqual(segment.start(), QgsPointXY(1, 2))
self.assertEqual(segment.end(), QgsPointXY(3, 4))
def testGettersSetters(self):
segment = QgsLineSegment2D(QgsPointXY(1, 2), QgsPointXY(3, 4))
self.assertEqual(segment.start(), QgsPointXY(1, 2))
self.assertEqual(segment.end(), QgsPointXY(3, 4))
self.assertEqual(segment.startX(), 1)
self.assertEqual(segment.startY(), 2)
self.assertEqual(segment.endX(), 3)
self.assertEqual(segment.endY(), 4)
segment.setStartX(5)
self.assertEqual(segment.start(), QgsPointXY(5, 2))
self.assertEqual(segment.end(), QgsPointXY(3, 4))
self.assertEqual(segment.startX(), 5)
self.assertEqual(segment.startY(), 2)
self.assertEqual(segment.endX(), 3)
self.assertEqual(segment.endY(), 4)
segment.setStartY(6)
self.assertEqual(segment.start(), QgsPointXY(5, 6))
self.assertEqual(segment.end(), QgsPointXY(3, 4))
self.assertEqual(segment.startX(), 5)
self.assertEqual(segment.startY(), 6)
self.assertEqual(segment.endX(), 3)
self.assertEqual(segment.endY(), 4)
segment.setEndX(7)
self.assertEqual(segment.start(), QgsPointXY(5, 6))
self.assertEqual(segment.end(), QgsPointXY(7, 4))
self.assertEqual(segment.startX(), 5)
self.assertEqual(segment.startY(), 6)
self.assertEqual(segment.endX(), 7)
self.assertEqual(segment.endY(), 4)
segment.setEndY(8)
self.assertEqual(segment.start(), QgsPointXY(5, 6))
self.assertEqual(segment.end(), QgsPointXY(7, 8))
self.assertEqual(segment.startX(), 5)
self.assertEqual(segment.startY(), 6)
self.assertEqual(segment.endX(), 7)
self.assertEqual(segment.endY(), 8)
segment.setStart(QgsPointXY(1, 2))
self.assertEqual(segment.start(), QgsPointXY(1, 2))
self.assertEqual(segment.end(), QgsPointXY(7, 8))
self.assertEqual(segment.startX(), 1)
self.assertEqual(segment.startY(), 2)
self.assertEqual(segment.endX(), 7)
self.assertEqual(segment.endY(), 8)
segment.setEnd(QgsPointXY(3, 4))
self.assertEqual(segment.start(), QgsPointXY(1, 2))
self.assertEqual(segment.end(), QgsPointXY(3, 4))
self.assertEqual(segment.startX(), 1)
self.assertEqual(segment.startY(), 2)
self.assertEqual(segment.endX(), 3)
self.assertEqual(segment.endY(), 4)
def testEquality(self):
segment1 = QgsLineSegment2D(QgsPointXY(1, 2), QgsPointXY(3, 4))
segment2 = QgsLineSegment2D(QgsPointXY(1, 2), QgsPointXY(3, 4))
self.assertEqual(segment1, segment2)
self.assertFalse(segment1 != segment2)
segment2 = QgsLineSegment2D(QgsPointXY(1, 2), QgsPointXY(3, 5))
self.assertNotEqual(segment1, segment2)
self.assertTrue(segment1 != segment2)
segment2 = QgsLineSegment2D(QgsPointXY(1, 2), QgsPointXY(5, 4))
self.assertNotEqual(segment1, segment2)
self.assertTrue(segment1 != segment2)
segment2 = QgsLineSegment2D(QgsPointXY(1, 5), QgsPointXY(3, 4))
self.assertNotEqual(segment1, segment2)
self.assertTrue(segment1 != segment2)
segment2 = QgsLineSegment2D(QgsPointXY(5, 2), QgsPointXY(3, 4))
self.assertNotEqual(segment1, segment2)
self.assertTrue(segment1 != segment2)
def testLength(self):
segment = QgsLineSegment2D(QgsPointXY(1, 2), QgsPointXY(3, 5))
self.assertAlmostEqual(segment.length(), 3.60555127546, 5)
self.assertEqual(segment.lengthSquared(), 13)
def testPointLeftOfLine(self):
segment = QgsLineSegment2D(QgsPointXY(1, 2), QgsPointXY(3, 5))
self.assertEqual(segment.pointLeftOfLine(QgsPointXY(1.5, 6)), -1)
self.assertEqual(segment.pointLeftOfLine(QgsPointXY(1.5, -6)), 1)
self.assertEqual(segment.pointLeftOfLine(QgsPointXY(5, 8)), 0)
segment = QgsLineSegment2D(QgsPointXY(3, 5), QgsPointXY(1, 2))
self.assertEqual(segment.pointLeftOfLine(QgsPointXY(1.5, 6)), 1)
self.assertEqual(segment.pointLeftOfLine(QgsPointXY(1.5, -6)), -1)
self.assertEqual(segment.pointLeftOfLine(QgsPointXY(5, 8)), 0)
def testReverse(self):
segment = QgsLineSegment2D(QgsPointXY(1, 2), QgsPointXY(3, 4))
segment.reverse()
self.assertEqual(segment.start(), QgsPointXY(3, 4))
self.assertEqual(segment.end(), QgsPointXY(1, 2))
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
slightlymadphoenix/activityPointsApp | activitypoints/lib/python3.5/site-packages/wheel/pep425tags.py | 70 | 5760 | """Generate and work with PEP 425 Compatibility Tags."""
import distutils.util
import platform
import sys
import sysconfig
import warnings
def get_config_var(var):
try:
return sysconfig.get_config_var(var)
except IOError as e: # pip Issue #1074
warnings.warn("{0}".format(e), RuntimeWarning)
return None
def get_abbr_impl():
"""Return abbreviated implementation name."""
impl = platform.python_implementation()
if impl == 'PyPy':
return 'pp'
elif impl == 'Jython':
return 'jy'
elif impl == 'IronPython':
return 'ip'
elif impl == 'CPython':
return 'cp'
raise LookupError('Unknown Python implementation: ' + impl)
def get_impl_ver():
"""Return implementation version."""
impl_ver = get_config_var("py_version_nodot")
if not impl_ver or get_abbr_impl() == 'pp':
impl_ver = ''.join(map(str, get_impl_version_info()))
return impl_ver
def get_impl_version_info():
"""Return sys.version_info-like tuple for use in decrementing the minor
version."""
if get_abbr_impl() == 'pp':
# as per https://github.com/pypa/pip/issues/2882
return (sys.version_info[0], sys.pypy_version_info.major,
sys.pypy_version_info.minor)
else:
return sys.version_info[0], sys.version_info[1]
def get_flag(var, fallback, expected=True, warn=True):
"""Use a fallback method for determining SOABI flags if the needed config
var is unset or unavailable."""
val = get_config_var(var)
if val is None:
if warn:
warnings.warn("Config variable '{0}' is unset, Python ABI tag may "
"be incorrect".format(var), RuntimeWarning, 2)
return fallback()
return val == expected
def get_abi_tag():
"""Return the ABI tag based on SOABI (if available) or emulate SOABI
(CPython 2, PyPy)."""
soabi = get_config_var('SOABI')
impl = get_abbr_impl()
if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'):
d = ''
m = ''
u = ''
if get_flag('Py_DEBUG',
lambda: hasattr(sys, 'gettotalrefcount'),
warn=(impl == 'cp')):
d = 'd'
if get_flag('WITH_PYMALLOC',
lambda: impl == 'cp',
warn=(impl == 'cp')):
m = 'm'
if get_flag('Py_UNICODE_SIZE',
lambda: sys.maxunicode == 0x10ffff,
expected=4,
warn=(impl == 'cp' and
sys.version_info < (3, 3))) \
and sys.version_info < (3, 3):
u = 'u'
abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)
elif soabi and soabi.startswith('cpython-'):
abi = 'cp' + soabi.split('-')[1]
elif soabi:
abi = soabi.replace('.', '_').replace('-', '_')
else:
abi = None
return abi
def get_platform():
"""Return our platform name 'win32', 'linux_x86_64'"""
# XXX remove distutils dependency
result = distutils.util.get_platform().replace('.', '_').replace('-', '_')
if result == "linux_x86_64" and sys.maxsize == 2147483647:
# pip pull request #3497
result = "linux_i686"
return result
def get_supported(versions=None, supplied_platform=None):
"""Return a list of supported tags for each version specified in
`versions`.
:param versions: a list of string versions, of the form ["33", "32"],
or None. The first version will be assumed to support our ABI.
"""
supported = []
# Versions must be given with respect to the preference
if versions is None:
versions = []
version_info = get_impl_version_info()
major = version_info[:-1]
# Support all previous minor Python versions.
for minor in range(version_info[-1], -1, -1):
versions.append(''.join(map(str, major + (minor,))))
impl = get_abbr_impl()
abis = []
abi = get_abi_tag()
if abi:
abis[0:0] = [abi]
abi3s = set()
import imp
for suffix in imp.get_suffixes():
if suffix[0].startswith('.abi'):
abi3s.add(suffix[0].split('.', 2)[1])
abis.extend(sorted(list(abi3s)))
abis.append('none')
platforms = []
if supplied_platform:
platforms.append(supplied_platform)
platforms.append(get_platform())
# Current version, current API (built specifically for our Python):
for abi in abis:
for arch in platforms:
supported.append(('%s%s' % (impl, versions[0]), abi, arch))
# abi3 modules compatible with older version of Python
for version in versions[1:]:
# abi3 was introduced in Python 3.2
if version in ('31', '30'):
break
for abi in abi3s: # empty set if not Python 3
for arch in platforms:
supported.append(("%s%s" % (impl, version), abi, arch))
# No abi / arch, but requires our implementation:
for i, version in enumerate(versions):
supported.append(('%s%s' % (impl, version), 'none', 'any'))
if i == 0:
# Tagged specifically as being cross-version compatible
# (with just the major version specified)
supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
# Major Python version + platform; e.g. binaries not using the Python API
supported.append(('py%s' % (versions[0][0]), 'none', arch))
# No abi / arch, generic Python
for i, version in enumerate(versions):
supported.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
supported.append(('py%s' % (version[0]), 'none', 'any'))
return supported
| mit |
sarantapichos/faircoop-market | addons/account/account_analytic_line.py | 304 | 7914 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
from openerp.tools.translate import _
class account_analytic_line(osv.osv):
_inherit = 'account.analytic.line'
_description = 'Analytic Line'
_columns = {
'product_uom_id': fields.many2one('product.uom', 'Unit of Measure'),
'product_id': fields.many2one('product.product', 'Product'),
'general_account_id': fields.many2one('account.account', 'General Account', required=True, ondelete='restrict'),
'move_id': fields.many2one('account.move.line', 'Move Line', ondelete='cascade', select=True),
'journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal', required=True, ondelete='restrict', select=True),
'code': fields.char('Code', size=8),
'ref': fields.char('Ref.'),
'currency_id': fields.related('move_id', 'currency_id', type='many2one', relation='res.currency', string='Account Currency', store=True, help="The related account currency if not equal to the company one.", readonly=True),
'amount_currency': fields.related('move_id', 'amount_currency', type='float', string='Amount Currency', store=True, help="The amount expressed in the related account currency if not equal to the company one.", readonly=True),
}
_defaults = {
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=c),
}
_order = 'date desc'
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('from_date',False):
args.append(['date', '>=', context['from_date']])
if context.get('to_date',False):
args.append(['date','<=', context['to_date']])
return super(account_analytic_line, self).search(cr, uid, args, offset, limit,
order, context=context, count=count)
def _check_company(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.move_id and not l.account_id.company_id.id == l.move_id.account_id.company_id.id:
return False
return True
# Compute the cost based on the price type define into company
# property_valuation_price_type property
def on_change_unit_amount(self, cr, uid, id, prod_id, quantity, company_id,
unit=False, journal_id=False, context=None):
if context==None:
context={}
if not journal_id:
j_ids = self.pool.get('account.analytic.journal').search(cr, uid, [('type','=','purchase')])
journal_id = j_ids and j_ids[0] or False
if not journal_id or not prod_id:
return {}
product_obj = self.pool.get('product.product')
analytic_journal_obj =self.pool.get('account.analytic.journal')
product_price_type_obj = self.pool.get('product.price.type')
product_uom_obj = self.pool.get('product.uom')
j_id = analytic_journal_obj.browse(cr, uid, journal_id, context=context)
prod = product_obj.browse(cr, uid, prod_id, context=context)
result = 0.0
if prod_id:
unit_obj = False
if unit:
unit_obj = product_uom_obj.browse(cr, uid, unit, context=context)
if not unit_obj or prod.uom_id.category_id.id != unit_obj.category_id.id:
unit = prod.uom_id.id
if j_id.type == 'purchase':
if not unit_obj or prod.uom_po_id.category_id.id != unit_obj.category_id.id:
unit = prod.uom_po_id.id
if j_id.type <> 'sale':
a = prod.property_account_expense.id
if not a:
a = prod.categ_id.property_account_expense_categ.id
if not a:
raise osv.except_osv(_('Error!'),
_('There is no expense account defined ' \
'for this product: "%s" (id:%d).') % \
(prod.name, prod.id,))
else:
a = prod.property_account_income.id
if not a:
a = prod.categ_id.property_account_income_categ.id
if not a:
raise osv.except_osv(_('Error!'),
_('There is no income account defined ' \
'for this product: "%s" (id:%d).') % \
(prod.name, prod_id,))
flag = False
# Compute based on pricetype
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','standard_price')], context=context)
pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0]
if journal_id:
journal = analytic_journal_obj.browse(cr, uid, journal_id, context=context)
if journal.type == 'sale':
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','list_price')], context=context)
if product_price_type_ids:
pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0]
# Take the company currency as the reference one
if pricetype.field == 'list_price':
flag = True
ctx = context.copy()
if unit:
# price_get() will respect a 'uom' in its context, in order
# to return a default price for those units
ctx['uom'] = unit
amount_unit = prod.price_get(pricetype.field, context=ctx)[prod.id]
prec = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
amount = amount_unit * quantity or 0.0
result = round(amount, prec)
if not flag:
result *= -1
return {'value': {
'amount': result,
'general_account_id': a,
'product_uom_id': unit
}
}
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
if context.get('account_id', False):
# account_id in context may also be pointing to an account.account.id
cr.execute('select name from account_analytic_account where id=%s', (context['account_id'],))
res = cr.fetchone()
if res:
res = _('Entries: ')+ (res[0] or '')
return res
return False
class res_partner(osv.osv):
""" Inherits partner and adds contract information in the partner form """
_inherit = 'res.partner'
_columns = {
'contract_ids': fields.one2many('account.analytic.account', \
'partner_id', 'Contracts', readonly=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
AccelAI/accel.ai | flask-aws/lib/python2.7/site-packages/requests/exceptions.py | 352 | 2776 | # -*- coding: utf-8 -*-
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Requests' exceptions.
"""
from .packages.urllib3.exceptions import HTTPError as BaseHTTPError
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request."""
def __init__(self, *args, **kwargs):
"""
Initialize RequestException with `request` and `response` objects.
"""
response = kwargs.pop('response', None)
self.response = response
self.request = kwargs.pop('request', None)
if (response is not None and not self.request and
hasattr(response, 'request')):
self.request = self.response.request
super(RequestException, self).__init__(*args, **kwargs)
class HTTPError(RequestException):
"""An HTTP error occurred."""
class ConnectionError(RequestException):
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
class Timeout(RequestException):
"""The request timed out.
Catching this error will catch both
:exc:`~requests.exceptions.ConnectTimeout` and
:exc:`~requests.exceptions.ReadTimeout` errors.
"""
class ConnectTimeout(ConnectionError, Timeout):
"""The request timed out while trying to connect to the remote server.
Requests that produced this error are safe to retry.
"""
class ReadTimeout(Timeout):
"""The server did not send any data in the allotted amount of time."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class TooManyRedirects(RequestException):
"""Too many redirects."""
class MissingSchema(RequestException, ValueError):
"""The URL schema (e.g. http or https) is missing."""
class InvalidSchema(RequestException, ValueError):
"""See defaults.py for valid schemas."""
class InvalidURL(RequestException, ValueError):
""" The URL provided was somehow invalid. """
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""
class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content"""
class StreamConsumedError(RequestException, TypeError):
"""The content for this response was already consumed"""
class RetryError(RequestException):
"""Custom retries logic failed"""
# Warnings
class RequestsWarning(Warning):
"""Base warning for Requests."""
pass
class FileModeWarning(RequestsWarning, DeprecationWarning):
"""
A file was opened in text mode, but Requests determined its binary length.
"""
pass
| mit |
jart/tensorflow | tensorflow/python/training/checkpointable/tracking_test.py | 2 | 6361 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.training.checkpointable import base
from tensorflow.python.training.checkpointable import data_structures
from tensorflow.python.training.checkpointable import tracking
from tensorflow.python.training.checkpointable import util
from tensorflow.python.util import nest
class InterfaceTests(test.TestCase):
def testMultipleAssignment(self):
root = tracking.Checkpointable()
root.leaf = tracking.Checkpointable()
root.leaf = root.leaf
duplicate_name_dep = tracking.Checkpointable()
with self.assertRaisesRegexp(ValueError, "already declared"):
root._track_checkpointable(duplicate_name_dep, name="leaf")
# No error; we're overriding __setattr__, so we can't really stop people
# from doing this while maintaining backward compatibility.
root.leaf = duplicate_name_dep
root._track_checkpointable(duplicate_name_dep, name="leaf", overwrite=True)
self.assertIs(duplicate_name_dep, root._lookup_dependency("leaf"))
(_, dep_object), = root._checkpoint_dependencies
self.assertIs(duplicate_name_dep, dep_object)
def testNoDependency(self):
root = tracking.Checkpointable()
hasdep = tracking.Checkpointable()
root.hasdep = hasdep
nodep = tracking.Checkpointable()
root.nodep = data_structures.NoDependency(nodep)
self.assertEqual(1, len(root._checkpoint_dependencies))
self.assertIs(root._checkpoint_dependencies[0].ref, root.hasdep)
self.assertIs(root.hasdep, hasdep)
self.assertIs(root.nodep, nodep)
class NoDependencyModel(training.Model):
@base.no_automatic_dependency_tracking
def __init__(self):
super(NoDependencyModel, self).__init__()
self.a = []
self.b = tracking.Checkpointable()
nodeps = NoDependencyModel()
self.assertEqual([nodeps], util.list_objects(nodeps))
def testListBasic(self):
a = tracking.Checkpointable()
b = tracking.Checkpointable()
a.l = [b]
c = tracking.Checkpointable()
a.l.append(c)
a_deps = util.list_objects(a)
self.assertIn(b, a_deps)
self.assertIn(c, a_deps)
direct_a_dep, = a._checkpoint_dependencies
self.assertEqual("l", direct_a_dep.name)
self.assertIn(b, direct_a_dep.ref)
self.assertIn(c, direct_a_dep.ref)
@test_util.run_in_graph_and_eager_modes
def testMutationDirtiesList(self):
a = tracking.Checkpointable()
b = tracking.Checkpointable()
a.l = [b]
c = tracking.Checkpointable()
a.l.insert(0, c)
checkpoint = util.Checkpoint(a=a)
with self.assertRaisesRegexp(ValueError, "A list element was replaced"):
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
@test_util.run_in_graph_and_eager_modes
def testOutOfBandEditDirtiesList(self):
a = tracking.Checkpointable()
b = tracking.Checkpointable()
held_reference = [b]
a.l = held_reference
c = tracking.Checkpointable()
held_reference.append(c)
checkpoint = util.Checkpoint(a=a)
with self.assertRaisesRegexp(ValueError, "The wrapped list was modified"):
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
@test_util.run_in_graph_and_eager_modes
def testNestedLists(self):
a = tracking.Checkpointable()
a.l = []
b = tracking.Checkpointable()
a.l.append([b])
c = tracking.Checkpointable()
a.l[0].append(c)
a_deps = util.list_objects(a)
self.assertIn(b, a_deps)
self.assertIn(c, a_deps)
a.l[0].append(1)
d = tracking.Checkpointable()
a.l[0].append(d)
a_deps = util.list_objects(a)
self.assertIn(d, a_deps)
self.assertIn(b, a_deps)
self.assertIn(c, a_deps)
self.assertNotIn(1, a_deps)
e = tracking.Checkpointable()
f = tracking.Checkpointable()
a.l1 = [[], [e]]
a.l1[0].append(f)
a_deps = util.list_objects(a)
self.assertIn(e, a_deps)
self.assertIn(f, a_deps)
checkpoint = util.Checkpoint(a=a)
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
a.l[0].append(data_structures.NoDependency([]))
a.l[0][-1].append(5)
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
# Dirtying the inner list means the root object is unsaveable.
a.l[0][1] = 2
with self.assertRaisesRegexp(ValueError, "A list element was replaced"):
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
@test_util.run_in_graph_and_eager_modes
def testNoDepList(self):
a = training.Model()
a.l1 = data_structures.NoDependency([])
a.l1.insert(1, 0)
self.assertTrue(isinstance(a.l1, list))
checkpoint = util.Checkpoint(a=a)
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
a.l2 = []
a.l2.insert(1, 0)
with self.assertRaisesRegexp(ValueError, "A list element was replaced"):
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
@test_util.run_in_graph_and_eager_modes
def testAssertions(self):
a = tracking.Checkpointable()
a.l = [numpy.zeros([2, 2])]
self.assertAllEqual([numpy.zeros([2, 2])], a.l)
self.assertAllClose([numpy.zeros([2, 2])], a.l)
nest.map_structure(self.assertAllClose, a.l, [numpy.zeros([2, 2])])
a.tensors = [array_ops.ones([2, 2]), array_ops.zeros([3, 3])]
self.assertAllClose([numpy.ones([2, 2]), numpy.zeros([3, 3])],
self.evaluate(a.tensors))
if __name__ == "__main__":
test.main()
| apache-2.0 |
thelazier/dash | qa/rpc-tests/bip68-112-113-p2p.py | 40 | 27355 | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import ToHex, CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import *
from io import BytesIO
import time
'''
This test is meant to exercise activation of the first version bits soft fork
This soft fork will activate the following BIPS:
BIP 68 - nSequence relative lock times
BIP 112 - CHECKSEQUENCEVERIFY
BIP 113 - MedianTimePast semantics for nLockTime
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 82 blocks whose coinbases will be used to generate inputs for our tests
mine 61 blocks to transition from DEFINED to STARTED
mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period
mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN
mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572
mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered
mine 1 block and test that enforcement has triggered (which triggers ACTIVE)
Test BIP 113 is enforced
Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height
Mine 1 block so next height is 581 and test BIP 68 now passes time but not height
Mine 1 block so next height is 582 and test BIP 68 now passes time and height
Test that BIP 112 is enforced
Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates
And that after the soft fork activates transactions pass and fail as they should according to the rules.
For each BIP, transactions of versions 1 and 2 will be tested.
----------------
BIP 113:
bip113tx - modify the nLocktime variable
BIP 68:
bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below
BIP 112:
bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112tx_special - test negative argument to OP_CSV
'''
base_relative_locktime = 10
seq_disable_flag = 1<<31
seq_random_high_bit = 1<<25
seq_type_flag = 1<<22
seq_random_low_bit = 1<<18
# b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field
# relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1
relative_locktimes = []
for b31 in xrange(2):
b25times = []
for b25 in xrange(2):
b22times = []
for b22 in xrange(2):
b18times = []
for b18 in xrange(2):
rlt = base_relative_locktime
if (b31):
rlt = rlt | seq_disable_flag
if (b25):
rlt = rlt | seq_random_high_bit
if (b22):
rlt = rlt | seq_type_flag
if (b18):
rlt = rlt | seq_random_low_bit
b18times.append(rlt)
b22times.append(b18times)
b25times.append(b22times)
relative_locktimes.append(b25times)
def all_rlt_txs(txarray):
txs = []
for b31 in xrange(2):
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
txs.append(txarray[b31][b25][b22][b18])
return txs
class BIP68_112_113Test(ComparisonTestFramework):
def __init__(self):
self.num_nodes = 1
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=4']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def send_generic_input_tx(self, node, coinbases):
amount = Decimal("499.99")
return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount))))
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
return tx
def sign_transaction(self, node, unsignedtx):
rawtx = ToHex(unsignedtx)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in xrange(number):
block = self.create_test_block([], version)
test_blocks.append([block, True])
self.last_block_time += 600
self.tip = block.sha256
self.tipheight += 1
return test_blocks
def create_test_block(self, txs, version = 536870912):
block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600)
block.nVersion = version
block.vtx.extend(txs)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
return block
def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0):
txs = []
assert(len(bip68inputs) >= 16)
i = 0
for b31 in xrange(2):
b25txs = []
for b25 in xrange(2):
b22txs = []
for b22 in xrange(2):
b18txs = []
for b18 in xrange(2):
tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("499.98"))
i += 1
tx.nVersion = txversion
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
b18txs.append(self.sign_transaction(self.nodes[0], tx))
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def create_bip112special(self, input, txversion):
tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("499.98"))
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
return signtx
def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0):
txs = []
assert(len(bip112inputs) >= 16)
i = 0
for b31 in xrange(2):
b25txs = []
for b25 in xrange(2):
b22txs = []
for b22 in xrange(2):
b18txs = []
for b18 in xrange(2):
tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("499.98"))
i += 1
if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed
tx.vin[0].nSequence = base_relative_locktime + locktime_delta
else: # vary nSequence instead, OP_CSV is fixed
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
if (varyOP_CSV):
signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
else:
signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
b18txs.append(signtx)
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def get_tests(self):
long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time
self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs
self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time
self.tipheight = 82 # height of the next block to build
self.last_block_time = long_past_time
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.nodeaddress = self.nodes[0].getnewaddress()
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined')
test_blocks = self.generate_blocks(61, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 1
# Advanced from DEFINED to STARTED, height = 143
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 0
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 2
# Failed to advance past STARTED, height = 287
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# 108 out of 144 signal bit 0 to achieve lock-in
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 3
# Advanced from STARTED to LOCKED_IN, height = 431
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# 140 more version 4 blocks
test_blocks = self.generate_blocks(140, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 4
### Inputs at height = 572
# Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block)
# Note we reuse inputs for v1 and v2 txs so must test these separately
# 16 normal inputs
bip68inputs = []
for i in xrange(16):
bip68inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
# 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112basicinputs = []
for j in xrange(2):
inputs = []
for i in xrange(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112basicinputs.append(inputs)
# 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112diverseinputs = []
for j in xrange(2):
inputs = []
for i in xrange(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112diverseinputs.append(inputs)
# 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112specialinput = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
# 1 normal input
bip113input = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
self.nodes[0].setmocktime(self.last_block_time + 600)
inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572
self.nodes[0].setmocktime(0)
self.tip = int("0x" + inputblockhash + "L", 0)
self.tipheight += 1
self.last_block_time += 600
assert_equal(len(self.nodes[0].getblock(inputblockhash,True)["tx"]), 82+1)
# 2 more version 4 blocks
test_blocks = self.generate_blocks(2, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 5
# Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# Test both version 1 and version 2 transactions for all tests
# BIP113 test transaction will be modified before each use to put in appropriate block time
bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("499.98"))
bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("499.98"))
bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2.nVersion = 2
# For BIP68 test all 16 relative sequence locktimes
bip68txs_v1 = self.create_bip68txs(bip68inputs, 1)
bip68txs_v2 = self.create_bip68txs(bip68inputs, 2)
# For BIP112 test:
# 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1)
bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2)
# 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1)
bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1)
# sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1)
bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2)
# sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1)
bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1)
# -1 OP_CSV OP_DROP input
bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1)
bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2)
### TESTING ###
##################################
### Before Soft Forks Activate ###
##################################
# All txs should pass
### Version 1 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
success_txs.append(bip113signed1)
success_txs.append(bip112tx_special_v1)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v1))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 6
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
success_txs.append(bip113signed2)
success_txs.append(bip112tx_special_v2)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v2))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 7
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# 1 more version 4 block to get us to height 575 so the fork should now be active for the next block
test_blocks = self.generate_blocks(1, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 8
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active')
#################################
### After Soft Forks Activate ###
#################################
### BIP 113 ###
# BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10
# BIP 113 tests should now pass if the locktime is < MTP
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Next block height = 580 after 4 blocks of random version
test_blocks = self.generate_blocks(4, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 13
### BIP 68 ###
### Version 1 txs ###
# All still pass
success_txs = []
success_txs.extend(all_rlt_txs(bip68txs_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 14
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
bip68success_txs = []
# All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
bip68success_txs.append(bip68txs_v2[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
bip68timetxs = []
for b25 in xrange(2):
for b18 in xrange(2):
bip68timetxs.append(bip68txs_v2[0][b25][1][b18])
for tx in bip68timetxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19
bip68heighttxs = []
for b25 in xrange(2):
for b18 in xrange(2):
bip68heighttxs.append(bip68txs_v2[0][b25][0][b18])
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23
# Advance one block to 581
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 24
# Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
bip68success_txs.extend(bip68timetxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29
# Advance one block to 582
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 30
# All BIP 68 txs should pass
bip68success_txs.extend(bip68heighttxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### BIP 112 ###
### Version 1 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
success_txs = []
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18])
success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(success_txs), True]]) # 33
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18])
fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18])
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81
### Version 2 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
success_txs = []
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV
success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9
yield TestInstance([[self.create_test_block(success_txs), True]]) # 83
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##
# All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
fail_txs = []
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115
# If sequencelock types mismatch, tx should fail
fail_txs = []
for b25 in xrange(2):
for b18 in xrange(2):
fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence
fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123
# Remaining txs should pass, just test masking works properly
success_txs = []
for b25 in xrange(2):
for b18 in xrange(2):
success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence
success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV
yield TestInstance([[self.create_test_block(success_txs), True]]) # 124
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Additional test, of checking that comparison of two time types works properly
time_txs = []
for b25 in xrange(2):
for b18 in xrange(2):
tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18]
tx.vin[0].nSequence = base_relative_locktime | seq_type_flag
signtx = self.sign_transaction(self.nodes[0], tx)
time_txs.append(signtx)
yield TestInstance([[self.create_test_block(time_txs), True]]) # 125
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Missing aspects of test
## Testing empty stack fails
if __name__ == '__main__':
BIP68_112_113Test().main()
| mit |
balister/GNU-Radio | gr-digital/python/digital/qa_costas_loop_cc.py | 12 | 4967 | #!/usr/bin/env python
#
# Copyright 2011,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import random
import cmath
from gnuradio import gr, gr_unittest, digital, blocks
from gnuradio.digital import psk
class test_costas_loop_cc(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test01(self):
# test basic functionality by setting all gains to 0
natfreq = 0.0
order = 2
self.test = digital.costas_loop_cc(natfreq, order)
data = 100*[complex(1,0),]
self.src = blocks.vector_source_c(data, False)
self.snk = blocks.vector_sink_c()
self.tb.connect(self.src, self.test, self.snk)
self.tb.run()
expected_result = data
dst_data = self.snk.data()
self.assertComplexTuplesAlmostEqual(expected_result, dst_data, 5)
def test02(self):
# Make sure it doesn't diverge given perfect data
natfreq = 0.25
order = 2
self.test = digital.costas_loop_cc(natfreq, order)
data = [complex(2*random.randint(0,1)-1, 0) for i in xrange(100)]
self.src = blocks.vector_source_c(data, False)
self.snk = blocks.vector_sink_c()
self.tb.connect(self.src, self.test, self.snk)
self.tb.run()
expected_result = data
dst_data = self.snk.data()
self.assertComplexTuplesAlmostEqual(expected_result, dst_data, 5)
def test03(self):
# BPSK Convergence test with static rotation
natfreq = 0.25
order = 2
self.test = digital.costas_loop_cc(natfreq, order)
rot = cmath.exp(0.2j) # some small rotation
data = [complex(2*random.randint(0,1)-1, 0) for i in xrange(100)]
N = 40 # settling time
expected_result = data[N:]
data = [rot*d for d in data]
self.src = blocks.vector_source_c(data, False)
self.snk = blocks.vector_sink_c()
self.tb.connect(self.src, self.test, self.snk)
self.tb.run()
dst_data = self.snk.data()[N:]
# generously compare results; the loop will converge near to, but
# not exactly on, the target data
self.assertComplexTuplesAlmostEqual(expected_result, dst_data, 2)
def test04(self):
# QPSK Convergence test with static rotation
natfreq = 0.25
order = 4
self.test = digital.costas_loop_cc(natfreq, order)
rot = cmath.exp(0.2j) # some small rotation
data = [complex(2*random.randint(0,1)-1, 2*random.randint(0,1)-1)
for i in xrange(100)]
N = 40 # settling time
expected_result = data[N:]
data = [rot*d for d in data]
self.src = blocks.vector_source_c(data, False)
self.snk = blocks.vector_sink_c()
self.tb.connect(self.src, self.test, self.snk)
self.tb.run()
dst_data = self.snk.data()[N:]
# generously compare results; the loop will converge near to, but
# not exactly on, the target data
self.assertComplexTuplesAlmostEqual(expected_result, dst_data, 2)
def test05(self):
# 8PSK Convergence test with static rotation
natfreq = 0.25
order = 8
self.test = digital.costas_loop_cc(natfreq, order)
rot = cmath.exp(-cmath.pi/8.0j) # rotate to match Costas rotation
const = psk.psk_constellation(order)
data = [random.randint(0,7) for i in xrange(100)]
data = [2*rot*const.points()[d] for d in data]
N = 40 # settling time
expected_result = data[N:]
rot = cmath.exp(0.1j) # some small rotation
data = [rot*d for d in data]
self.src = blocks.vector_source_c(data, False)
self.snk = blocks.vector_sink_c()
self.tb.connect(self.src, self.test, self.snk)
self.tb.run()
dst_data = self.snk.data()[N:]
# generously compare results; the loop will converge near to, but
# not exactly on, the target data
self.assertComplexTuplesAlmostEqual(expected_result, dst_data, 2)
if __name__ == '__main__':
gr_unittest.run(test_costas_loop_cc, "test_costas_loop_cc.xml")
| gpl-3.0 |
Mtax/xadmin | xadmin/views/list.py | 6 | 25794 | from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.core.paginator import InvalidPage, Paginator
from django.db import models
from django.http import HttpResponseRedirect
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_str, smart_unicode
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from xadmin.util import lookup_field, display_for_field, label_for_field, boolean_icon
from base import ModelAdminView, filter_hook, inclusion_tag, csrf_protect_m
# List settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
PAGE_VAR = 'p'
TO_FIELD_VAR = 't'
COL_LIST_VAR = '_cols'
ERROR_FLAG = 'e'
DOT = '.'
# Text to display within change-list table cells if the value is blank.
EMPTY_CHANGELIST_VALUE = _('Null')
class FakeMethodField(object):
"""
This class used when a column is an model function, wrap function as a fake field to display in select columns.
"""
def __init__(self, name, verbose_name):
# Initial comm field attrs
self.name = name
self.verbose_name = verbose_name
self.primary_key = False
class ResultRow(dict):
pass
class ResultItem(object):
def __init__(self, field_name, row):
self.classes = []
self.text = ' '
self.wraps = []
self.tag = 'td'
self.tag_attrs = []
self.allow_tags = False
self.btns = []
self.menus = []
self.is_display_link = False
self.row = row
self.field_name = field_name
self.field = None
self.attr = None
self.value = None
@property
def label(self):
text = mark_safe(
self.text) if self.allow_tags else conditional_escape(self.text)
if force_str(text) == '':
text = mark_safe(' ')
for wrap in self.wraps:
text = mark_safe(wrap % text)
return text
@property
def tagattrs(self):
return mark_safe(
'%s%s' % ((self.tag_attrs and ' '.join(self.tag_attrs) or ''),
(self.classes and (' class="%s"' % ' '.join(self.classes)) or '')))
class ResultHeader(ResultItem):
def __init__(self, field_name, row):
super(ResultHeader, self).__init__(field_name, row)
self.tag = 'th'
self.tag_attrs = ['scope="col"']
self.sortable = False
self.allow_tags = True
self.sorted = False
self.ascending = None
self.sort_priority = None
self.url_primary = None
self.url_remove = None
self.url_toggle = None
class ListAdminView(ModelAdminView):
"""
Display models objects view. this class has ordering and simple filter features.
"""
list_display = ('__str__',)
list_display_links = ()
list_display_links_details = False
list_select_related = None
list_per_page = 50
list_max_show_all = 200
list_exclude = ()
search_fields = ()
paginator_class = Paginator
ordering = None
# Change list templates
object_list_template = None
def init_request(self, *args, **kwargs):
if not self.has_view_permission():
raise PermissionDenied
request = self.request
request.session['LIST_QUERY'] = (self.model_info, self.request.META['QUERY_STRING'])
self.pk_attname = self.opts.pk.attname
self.lookup_opts = self.opts
self.list_display = self.get_list_display()
self.list_display_links = self.get_list_display_links()
# Get page number parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
# Get params from request
self.show_all = ALL_VAR in request.GET
self.to_field = request.GET.get(TO_FIELD_VAR)
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
@filter_hook
def get_list_display(self):
"""
Return a sequence containing the fields to be displayed on the list.
"""
self.base_list_display = (COL_LIST_VAR in self.request.GET and self.request.GET[COL_LIST_VAR] != "" and \
self.request.GET[COL_LIST_VAR].split('.')) or self.list_display
return list(self.base_list_display)
@filter_hook
def get_list_display_links(self):
"""
Return a sequence containing the fields to be displayed as links
on the changelist. The list_display parameter is the list of fields
returned by get_list_display().
"""
if self.list_display_links or not self.list_display:
return self.list_display_links
else:
# Use only the first item in list_display as link
return list(self.list_display)[:1]
def make_result_list(self):
# Get search parameters from the query string.
self.base_queryset = self.queryset()
self.list_queryset = self.get_list_queryset()
self.ordering_field_columns = self.get_ordering_field_columns()
self.paginator = self.get_paginator()
# Get the number of objects, with admin filters applied.
self.result_count = self.paginator.count
# Get the total number of objects, with no admin filters applied.
# Perform a slight optimization: Check to see whether any filters were
# given. If not, use paginator.hits to calculate the number of objects,
# because we've already done paginator.hits and the value is cached.
if not self.list_queryset.query.where:
self.full_result_count = self.result_count
else:
self.full_result_count = self.base_queryset.count()
self.can_show_all = self.result_count <= self.list_max_show_all
self.multi_page = self.result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and self.can_show_all) or not self.multi_page:
self.result_list = self.list_queryset._clone()
else:
try:
self.result_list = self.paginator.page(
self.page_num + 1).object_list
except InvalidPage:
if ERROR_FLAG in self.request.GET.keys():
return SimpleTemplateResponse('xadmin/views/invalid_setup.html', {
'title': _('Database error'),
})
return HttpResponseRedirect(self.request.path + '?' + ERROR_FLAG + '=1')
self.has_more = self.result_count > (
self.list_per_page * self.page_num + len(self.result_list))
@filter_hook
def get_result_list(self):
return self.make_result_list()
@filter_hook
def post_result_list(self):
return self.make_result_list()
@filter_hook
def get_list_queryset(self):
"""
Get model queryset. The query has been filted and ordered.
"""
# First, get queryset from base class.
queryset = self.queryset()
# Use select_related() if one of the list_display options is a field
# with a relationship and the provided queryset doesn't already have
# select_related defined.
if not queryset.query.select_related:
if self.list_select_related:
queryset = queryset.select_related()
elif self.list_select_related is None:
related_fields = []
for field_name in self.list_display:
try:
field = self.opts.get_field(field_name)
except models.FieldDoesNotExist:
pass
else:
if isinstance(field.rel, models.ManyToOneRel):
related_fields.append(field_name)
if related_fields:
queryset = queryset.select_related(*related_fields)
else:
pass
# Then, set queryset ordering.
queryset = queryset.order_by(*self.get_ordering())
# Return the queryset.
return queryset
# List ordering
def _get_default_ordering(self):
ordering = []
if self.ordering:
ordering = self.ordering
elif self.opts.ordering:
ordering = self.opts.ordering
return ordering
@filter_hook
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.opts.get_field(field_name)
return field.name
except models.FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self, field_name):
attr = getattr(self, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
@filter_hook
def get_ordering(self):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
ordering = list(super(ListAdminView, self).get_ordering()
or self._get_default_ordering())
if ORDER_VAR in self.params and self.params[ORDER_VAR]:
# Clear ordering and used params
ordering = [pfx + self.get_ordering_field(field_name) for n, pfx, field_name in
map(
lambda p: p.rpartition('-'),
self.params[ORDER_VAR].split('.'))
if self.get_ordering_field(field_name)]
# Ensure that the primary key is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.opts.pk.name
if not (set(ordering) & set(['pk', '-pk', pk_name, '-' + pk_name])):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
@filter_hook
def get_ordering_field_columns(self):
"""
Returns a SortedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying sort
# field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = SortedDict()
if ORDER_VAR not in self.params or not self.params[ORDER_VAR]:
# for ordering specified on ModelAdmin or model Meta, we don't know
# the right column numbers absolutely, because there might be more
# than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for attr in self.list_display:
if self.get_ordering_field(attr) == field:
ordering_fields[field] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, field_name = p.rpartition('-')
ordering_fields[field_name] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_check_field_url(self, f):
"""
Return the select column menu items link.
We must use base_list_display, because list_display maybe changed by plugins.
"""
fields = [fd for fd in self.base_list_display if fd != f.name]
if len(self.base_list_display) == len(fields):
if f.primary_key:
fields.insert(0, f.name)
else:
fields.append(f.name)
return self.get_query_string({COL_LIST_VAR: '.'.join(fields)})
def get_model_method_fields(self):
"""
Return the fields info defined in model. use FakeMethodField class wrap method as a db field.
"""
methods = []
for name in dir(self):
try:
if getattr(getattr(self, name), 'is_column', False):
methods.append((name, getattr(self, name)))
except:
pass
return [FakeMethodField(name, getattr(method, 'short_description', capfirst(name.replace('_', ' '))))
for name, method in methods]
@filter_hook
def get_context(self):
"""
Prepare the context for templates.
"""
self.title = _('%s List') % force_str(self.opts.verbose_name)
model_fields = [(f, f.name in self.list_display, self.get_check_field_url(f))
for f in (list(self.opts.fields) + self.get_model_method_fields()) if f.name not in self.list_exclude]
new_context = {
'model_name': force_str(self.opts.verbose_name_plural),
'title': self.title,
'cl': self,
'model_fields': model_fields,
'clean_select_field_url': self.get_query_string(remove=[COL_LIST_VAR]),
'has_add_permission': self.has_add_permission(),
'app_label': self.app_label,
'brand_name': self.opts.verbose_name_plural,
'brand_icon': self.get_model_icon(self.model),
'add_url': self.model_admin_url('add'),
'result_headers': self.result_headers(),
'results': self.results()
}
context = super(ListAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_response(self, context, *args, **kwargs):
pass
@csrf_protect_m
@filter_hook
def get(self, request, *args, **kwargs):
"""
The 'change list' admin view for this model.
"""
response = self.get_result_list()
if response:
return response
context = self.get_context()
context.update(kwargs or {})
response = self.get_response(context, *args, **kwargs)
return response or TemplateResponse(request, self.object_list_template or
self.get_template_list('views/model_list.html'), context, current_app=self.admin_site.name)
@filter_hook
def post_response(self, *args, **kwargs):
pass
@csrf_protect_m
@filter_hook
def post(self, request, *args, **kwargs):
return self.post_result_list() or self.post_response(*args, **kwargs) or self.get(request, *args, **kwargs)
@filter_hook
def get_paginator(self):
return self.paginator_class(self.list_queryset, self.list_per_page, 0, True)
@filter_hook
def get_page_number(self, i):
if i == DOT:
return mark_safe(u'<span class="dot-page">...</span> ')
elif i == self.page_num:
return mark_safe(u'<span class="this-page">%d</span> ' % (i + 1))
else:
return mark_safe(u'<a href="%s"%s>%d</a> ' % (escape(self.get_query_string({PAGE_VAR: i})), (i == self.paginator.num_pages - 1 and ' class="end"' or ''), i + 1))
# Result List methods
@filter_hook
def result_header(self, field_name, row):
ordering_field_columns = self.ordering_field_columns
item = ResultHeader(field_name, row)
text, attr = label_for_field(field_name, self.model,
model_admin=self,
return_attr=True
)
item.text = text
item.attr = attr
if attr and not getattr(attr, "admin_order_field", None):
return item
# OK, it is sortable if we got this far
th_classes = ['sortable']
order_type = ''
new_order_type = 'desc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if field_name in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(field_name).lower()
sort_priority = ordering_field_columns.keys().index(field_name) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_asc = [] # URL for making this field the primary sort
o_list_desc = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == field_name: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_asc.insert(0, j)
o_list_desc.insert(0, '-' + j)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_asc.append(param)
o_list_desc.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if field_name not in ordering_field_columns:
o_list_asc.insert(0, field_name)
o_list_desc.insert(0, '-' + field_name)
item.sorted = sorted
item.sortable = True
item.ascending = (order_type == "asc")
item.sort_priority = sort_priority
menus = [
('asc', o_list_asc, 'caret-up', _(u'Sort ASC')),
('desc', o_list_desc, 'caret-down', _(u'Sort DESC')),
]
if sorted:
row['num_sorted_fields'] = row['num_sorted_fields'] + 1
menus.append((None, o_list_remove, 'times', _(u'Cancel Sort')))
item.btns.append('<a class="toggle" href="%s"><i class="fa fa-%s"></i></a>' % (
self.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}), 'sort-up' if order_type == "asc" else 'sort-down'))
item.menus.extend(['<li%s><a href="%s" class="active"><i class="fa fa-%s"></i> %s</a></li>' %
(
(' class="active"' if sorted and order_type == i[
0] else ''),
self.get_query_string({ORDER_VAR: '.'.join(i[1])}), i[2], i[3]) for i in menus])
item.classes.extend(th_classes)
return item
@filter_hook
def result_headers(self):
"""
Generates the list column headers.
"""
row = ResultRow()
row['num_sorted_fields'] = 0
row.cells = [self.result_header(
field_name, row) for field_name in self.list_display]
return row
@filter_hook
def result_item(self, obj, field_name, row):
"""
Generates the actual list of data.
"""
item = ResultItem(field_name, row)
try:
f, attr, value = lookup_field(field_name, obj, self)
except (AttributeError, ObjectDoesNotExist):
item.text = mark_safe("<span class='text-muted'>%s</span>" % EMPTY_CHANGELIST_VALUE)
else:
if f is None:
item.allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
item.allow_tags = True
item.text = boolean_icon(value)
else:
item.text = smart_unicode(value)
else:
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(obj, f.name)
if field_val is None:
item.text = mark_safe("<span class='text-muted'>%s</span>" % EMPTY_CHANGELIST_VALUE)
else:
item.text = field_val
else:
item.text = display_for_field(value, f)
if isinstance(f, models.DateField)\
or isinstance(f, models.TimeField)\
or isinstance(f, models.ForeignKey):
item.classes.append('nowrap')
item.field = f
item.attr = attr
item.value = value
# If list_display_links not defined, add the link tag to the first field
if (item.row['is_display_first'] and not self.list_display_links) \
or field_name in self.list_display_links:
item.row['is_display_first'] = False
item.is_display_link = True
if self.list_display_links_details:
item_res_uri = self.model_admin_url("detail", getattr(obj, self.pk_attname))
if item_res_uri:
if self.has_change_permission(obj):
edit_url = self.model_admin_url("change", getattr(obj, self.pk_attname))
else:
edit_url = ""
item.wraps.append('<a data-res-uri="%s" data-edit-uri="%s" class="details-handler" rel="tooltip" title="%s">%%s</a>'
% (item_res_uri, edit_url, _(u'Details of %s') % str(obj)))
else:
url = self.url_for_result(obj)
item.wraps.append(u'<a href="%s">%%s</a>' % url)
return item
@filter_hook
def result_row(self, obj):
row = ResultRow()
row['is_display_first'] = True
row['object'] = obj
row.cells = [self.result_item(
obj, field_name, row) for field_name in self.list_display]
return row
@filter_hook
def results(self):
results = []
for obj in self.result_list:
results.append(self.result_row(obj))
return results
@filter_hook
def url_for_result(self, result):
return self.get_object_url(result)
# Media
@filter_hook
def get_media(self):
media = super(ListAdminView, self).get_media() + self.vendor('xadmin.page.list.js', 'xadmin.page.form.js')
if self.list_display_links_details:
media += self.vendor('xadmin.plugin.details.js', 'xadmin.form.css')
return media
# Blocks
@inclusion_tag('xadmin/includes/pagination.html')
def block_pagination(self, context, nodes, page_type='normal'):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = self.paginator, self.page_num
pagination_required = (
not self.show_all or not self.can_show_all) and self.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = {'normal': 5, 'small': 3}.get(page_type, 3)
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_EACH_SIDE - 1))
page_range.append(DOT)
page_range.extend(
range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(
range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(
paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = self.can_show_all and not self.show_all and self.multi_page
return {
'cl': self,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and self.get_query_string({ALL_VAR: ''}),
'page_range': map(self.get_page_number, page_range),
'ALL_VAR': ALL_VAR,
'1': 1,
}
| bsd-3-clause |
charbeljc/hr | __unported__/hr_public_holidays/__openerp__.py | 18 | 1366 | # -*- coding:utf-8 -*-
#
#
# Copyright (C) 2011,2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{
'name': 'Public Holidays',
'version': '1.0',
'category': 'Generic Modules/Human Resources',
'author': "Michael Telahun Makonnen <mmakonnen@gmail.com>,Odoo Community Association (OCA)",
'description': """
Manage Public Holidays
======================
""",
'website': 'http://miketelahun.wordpress.com',
'license': 'AGPL-3',
'depends': [
'hr',
],
'data': [
'security/ir.model.access.csv',
'hr_public_holidays_view.xml',
],
'test': [
],
'installable': False,
}
| agpl-3.0 |
lmprice/ansible | lib/ansible/modules/source_control/gitlab_project.py | 50 | 14738 | #!/usr/bin/python
# (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gitlab_project
short_description: Creates/updates/deletes Gitlab Projects
description:
- When the project does not exist in Gitlab, it will be created.
- When the project does exists and state=absent, the project will be deleted.
- When changes are made to the project, the project will be updated.
version_added: "2.1"
author: "Werner Dijkerman (@dj-wasabi)"
requirements:
- pyapi-gitlab python module
options:
server_url:
description:
- Url of Gitlab server, with protocol (http or https).
required: true
validate_certs:
description:
- When using https if SSL certificate needs to be verified.
type: bool
default: 'yes'
aliases:
- verify_ssl
login_user:
description:
- Gitlab user name.
login_password:
description:
- Gitlab password for login_user
login_token:
description:
- Gitlab token for logging in.
group:
description:
- The name of the group of which this projects belongs to.
- When not provided, project will belong to user which is configured in 'login_user' or 'login_token'
- When provided with username, project will be created for this user. 'login_user' or 'login_token' needs admin rights.
name:
description:
- The name of the project
required: true
path:
description:
- The path of the project you want to create, this will be server_url/<group>/path
- If not supplied, name will be used.
description:
description:
- An description for the project.
issues_enabled:
description:
- Whether you want to create issues or not.
- Possible values are true and false.
type: bool
default: 'yes'
merge_requests_enabled:
description:
- If merge requests can be made or not.
- Possible values are true and false.
type: bool
default: 'yes'
wiki_enabled:
description:
- If an wiki for this project should be available or not.
- Possible values are true and false.
type: bool
default: 'yes'
snippets_enabled:
description:
- If creating snippets should be available or not.
- Possible values are true and false.
type: bool
default: 'yes'
public:
description:
- If the project is public available or not.
- Setting this to true is same as setting visibility_level to 20.
- Possible values are true and false.
type: bool
default: 'no'
visibility_level:
description:
- Private. visibility_level is 0. Project access must be granted explicitly for each user.
- Internal. visibility_level is 10. The project can be cloned by any logged in user.
- Public. visibility_level is 20. The project can be cloned without any authentication.
- Possible values are 0, 10 and 20.
default: 0
import_url:
description:
- Git repository which will be imported into gitlab.
- Gitlab server needs read access to this git repository.
type: bool
default: 'no'
state:
description:
- create or delete project.
- Possible values are present and absent.
default: "present"
choices: ["present", "absent"]
'''
EXAMPLES = '''
- name: Delete Gitlab Project
gitlab_project:
server_url: http://gitlab.example.com
validate_certs: False
login_token: WnUzDsxjy8230-Dy_k
name: my_first_project
state: absent
delegate_to: localhost
- name: Create Gitlab Project in group Ansible
gitlab_project:
server_url: https://gitlab.example.com
validate_certs: True
login_user: dj-wasabi
login_password: MySecretPassword
name: my_first_project
group: ansible
issues_enabled: False
wiki_enabled: True
snippets_enabled: True
import_url: http://git.example.com/example/lab.git
state: present
delegate_to: localhost
'''
RETURN = '''# '''
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except:
HAS_GITLAB_PACKAGE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class GitLabProject(object):
def __init__(self, module, git):
self._module = module
self._gitlab = git
def createOrUpdateProject(self, project_exists, group_name, import_url, arguments):
is_user = False
group_id = self.getGroupId(group_name)
if not group_id:
group_id = self.getUserId(group_name)
is_user = True
if project_exists:
# Edit project
return self.updateProject(group_name, arguments)
else:
# Create project
if self._module.check_mode:
self._module.exit_json(changed=True)
return self.createProject(is_user, group_id, import_url, arguments)
def createProject(self, is_user, user_id, import_url, arguments):
if is_user:
result = self._gitlab.createprojectuser(user_id=user_id, import_url=import_url, **arguments)
else:
group_id = user_id
result = self._gitlab.createproject(namespace_id=group_id, import_url=import_url, **arguments)
if not result:
self._module.fail_json(msg="Failed to create project %r" % arguments['name'])
return result
def deleteProject(self, group_name, project_name):
if self.existsGroup(group_name):
project_owner = group_name
else:
project_owner = self._gitlab.currentuser()['username']
search_results = self._gitlab.searchproject(search=project_name)
for result in search_results:
owner = result['namespace']['name']
if owner == project_owner:
return self._gitlab.deleteproject(result['id'])
def existsProject(self, group_name, project_name):
if self.existsGroup(group_name):
project_owner = group_name
else:
project_owner = self._gitlab.currentuser()['username']
search_results = self._gitlab.searchproject(search=project_name)
for result in search_results:
owner = result['namespace']['name']
if owner == project_owner:
return True
return False
def existsGroup(self, group_name):
if group_name is not None:
# Find the group, if group not exists we try for user
for group in self._gitlab.getall(self._gitlab.getgroups):
if group['name'] == group_name:
return True
user_name = group_name
user_data = self._gitlab.getusers(search=user_name)
for data in user_data:
if 'id' in user_data:
return True
return False
def getGroupId(self, group_name):
if group_name is not None:
# Find the group, if group not exists we try for user
for group in self._gitlab.getall(self._gitlab.getgroups):
if group['name'] == group_name:
return group['id']
def getProjectId(self, group_name, project_name):
if self.existsGroup(group_name):
project_owner = group_name
else:
project_owner = self._gitlab.currentuser()['username']
search_results = self._gitlab.searchproject(search=project_name)
for result in search_results:
owner = result['namespace']['name']
if owner == project_owner:
return result['id']
def getUserId(self, user_name):
user_data = self._gitlab.getusers(search=user_name)
for data in user_data:
if 'id' in data:
return data['id']
return self._gitlab.currentuser()['id']
def to_bool(self, value):
if value:
return 1
else:
return 0
def updateProject(self, group_name, arguments):
project_changed = False
project_name = arguments['name']
project_id = self.getProjectId(group_name, project_name)
project_data = self._gitlab.getproject(project_id=project_id)
for arg_key, arg_value in arguments.items():
project_data_value = project_data[arg_key]
if isinstance(project_data_value, bool) or project_data_value is None:
to_bool = self.to_bool(project_data_value)
if to_bool != arg_value:
project_changed = True
continue
else:
if project_data_value != arg_value:
project_changed = True
if project_changed:
if self._module.check_mode:
self._module.exit_json(changed=True)
return self._gitlab.editproject(project_id=project_id, **arguments)
else:
return False
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True),
validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']),
login_user=dict(required=False, no_log=True),
login_password=dict(required=False, no_log=True),
login_token=dict(required=False, no_log=True),
group=dict(required=False),
name=dict(required=True),
path=dict(required=False),
description=dict(required=False),
issues_enabled=dict(default=True, type='bool'),
merge_requests_enabled=dict(default=True, type='bool'),
wiki_enabled=dict(default=True, type='bool'),
snippets_enabled=dict(default=True, type='bool'),
public=dict(default=False, type='bool'),
visibility_level=dict(default="0", choices=["0", "10", "20"]),
import_url=dict(required=False),
state=dict(default="present", choices=["present", 'absent']),
),
supports_check_mode=True
)
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg="Missing required gitlab module (check docs or install with: pip install pyapi-gitlab")
server_url = module.params['server_url']
verify_ssl = module.params['validate_certs']
login_user = module.params['login_user']
login_password = module.params['login_password']
login_token = module.params['login_token']
group_name = module.params['group']
project_name = module.params['name']
project_path = module.params['path']
description = module.params['description']
issues_enabled = module.params['issues_enabled']
merge_requests_enabled = module.params['merge_requests_enabled']
wiki_enabled = module.params['wiki_enabled']
snippets_enabled = module.params['snippets_enabled']
public = module.params['public']
visibility_level = module.params['visibility_level']
import_url = module.params['import_url']
state = module.params['state']
# We need both login_user and login_password or login_token, otherwise we fail.
if login_user is not None and login_password is not None:
use_credentials = True
elif login_token is not None:
use_credentials = False
else:
module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token")
# Set project_path to project_name if it is empty.
if project_path is None:
project_path = project_name.replace(" ", "_")
# Gitlab API makes no difference between upper and lower cases, so we lower them.
project_name = project_name.lower()
project_path = project_path.lower()
if group_name is not None:
group_name = group_name.lower()
# Lets make an connection to the Gitlab server_url, with either login_user and login_password
# or with login_token
try:
if use_credentials:
git = gitlab.Gitlab(host=server_url, verify_ssl=verify_ssl)
git.login(user=login_user, password=login_password)
else:
git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl)
except Exception as e:
module.fail_json(msg="Failed to connect to Gitlab server: %s " % to_native(e))
# Check if user is authorized or not before proceeding to any operations
# if not, exit from here
auth_msg = git.currentuser().get('message', None)
if auth_msg is not None and auth_msg == '401 Unauthorized':
module.fail_json(msg='User unauthorized',
details="User is not allowed to access Gitlab server "
"using login_token. Please check login_token")
# Validate if project exists and take action based on "state"
project = GitLabProject(module, git)
project_exists = project.existsProject(group_name, project_name)
# Creating the project dict
arguments = {"name": project_name,
"path": project_path,
"description": description,
"issues_enabled": project.to_bool(issues_enabled),
"merge_requests_enabled": project.to_bool(merge_requests_enabled),
"wiki_enabled": project.to_bool(wiki_enabled),
"snippets_enabled": project.to_bool(snippets_enabled),
"public": project.to_bool(public),
"visibility_level": int(visibility_level)}
if project_exists and state == "absent":
project.deleteProject(group_name, project_name)
module.exit_json(changed=True, result="Successfully deleted project %s" % project_name)
else:
if state == "absent":
module.exit_json(changed=False, result="Project deleted or does not exist")
else:
if project.createOrUpdateProject(project_exists, group_name, import_url, arguments):
module.exit_json(changed=True, result="Successfully created or updated the project %s" % project_name)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
fluxcapacitor/source.ml | jupyterhub.ml/notebooks/train_deploy/zz_under_construction/zz_old/TensorFlow/GoogleTraining/workshop_sections/transfer_learning/TF_Estimator/transfer_learning.py | 3 | 33478 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple transfer learning with an Inception v3 architecture model which
displays summaries in TensorBoard.
This example shows how to take a Inception v3 architecture model trained on
ImageNet images, and train a new top layer that can recognize other classes of
images.
This example is based on the code here:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/image_retraining/retrain.py
but has been further modified in a number of ways, including use of a custom
tf.contrib.learn.Estimator to support training, evaluation, prediction,
model checkpointing, and summary generation.
(The support for optional image distortion has also been removed in order to
simplify the example).
The top layer receives as input a 2048-dimensional vector for each image. We
train a softmax layer on top of this representation. Assuming the softmax layer
contains N labels, this corresponds to learning N + 2048*N model parameters
corresponding to the learned biases and weights.
Here's an example, which assumes you have a folder containing class-named
subfolders, each full of images for each label.
An example folder 'flower_photos' could have a structure like this:
~/flower_photos/daisy/photo1.jpg
~/flower_photos/daisy/photo2.jpg
...
~/flower_photos/rose/anotherphoto77.jpg
...
~/flower_photos/sunflower/somepicture.jpg
The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. Once your images are
prepared, you can process them and then run training on the images like this:
python transfer_learning.py --image_dir <your_toplevel_image_dir>
You can replace the image_dir argument with any folder containing subfolders of
images. The label for each image is taken from the name of the subfolder it's
in.
To view summary information in tensorboard, point it to the <model_dir>
(which if not specified will be automatically generated for you):
tensorboard --logdir=<model_dir>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import glob
import hashlib
import json
import os
import re
import struct
import sys
import tarfile
import time
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn import ModeKeys
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.metrics.python.ops import metric_ops
FLAGS = tf.app.flags.FLAGS
ARGFLAGS = None
LABELS_FILENAME = "output_labels.json"
# comment out for less info during the training runs.
tf.logging.set_verbosity(tf.logging.INFO)
# These are all parameters that are tied to the particular model architecture
# we're using for Inception v3. These include things like tensor names and
# their sizes. If you want to adapt this script to work with another model,
# you will need to update these to reflect the values in the network
# you're using.
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'
BOTTLENECK_TENSOR_SIZE = 2048
MODEL_INPUT_WIDTH = 299
MODEL_INPUT_HEIGHT = 299
MODEL_INPUT_DEPTH = 3
JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'
RESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
def create_image_lists(image_dir, testing_percentage, validation_percentage):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
If the model_dir already has a label list in it, use that to define the label
ordering as the images are processed.
Args:
image_dir: String path to a folder containing subfolders of images.
testing_percentage: Integer percentage of the images to reserve for tests.
validation_percentage: Integer percentage of images reserved
for validation.
Returns:
A dictionary containing an entry for each label subfolder, with images
split into training, testing, and validation sets within each label.
"""
if not gfile.Exists(image_dir):
print("Image directory '" + image_dir + "' not found.")
return None
# See if the model dir contains an existing labels list. This will only be
# the case if training using that model has ocurred previously.
labels_list = None
output_labels_file = os.path.join(ARGFLAGS.model_dir, LABELS_FILENAME)
if gfile.Exists(output_labels_file):
with open(output_labels_file, 'r') as lfile:
labels_string = lfile.read()
labels_list = json.loads(labels_string)
print("Found labels list: %s" % labels_list)
result = {}
if labels_list:
for l in labels_list:
result[l] = {}
sub_dirs = [x[0] for x in os.walk(image_dir)]
# The root directory comes first, so skip it.
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
print("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(glob.glob(file_glob))
if not file_list:
print('No files found')
continue
if len(file_list) < 20:
print('WARNING: Folder has less than 20 images, which may cause issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
print('WARNING: Folder {} has more than {} images. Some images will '
'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# We want to ignore anything after '_nohash_' in the file name.
hash_name = re.sub(r'_nohash_.*$', '', file_name)
# This looks a bit magical, but we need to decide whether this file
# should go into the training, testing, or validation sets, and we
# want to keep existing files in the same set even if more files
# are subsequently added.
# To do that, we need a stable way of deciding based on just the
# file name itself, so we do a hash of that and then use that to
# generate a probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if percentage_hash < validation_percentage:
validation_images.append(base_name)
elif percentage_hash < (testing_percentage + validation_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
""""Returns a path to an image for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category):
""""Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '.txt'
def create_inception_graph(dest_dir):
""""Creates a graph from saved GraphDef file and returns a Graph object.
Returns:
Graph holding the trained Inception network, and various tensors we'll be
manipulating.
"""
with tf.Session() as sess:
model_filename = os.path.join(
dest_dir, 'classify_image_graph_def.pb')
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
tf.import_graph_def(graph_def, name='', return_elements=[
BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
RESIZED_INPUT_TENSOR_NAME]))
return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
bottleneck_tensor):
"""Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
bottleneck_values = sess.run(
bottleneck_tensor,
{image_data_tensor: image_data})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def maybe_download_and_extract(dest_dir='/tmp/imagenet'):
"""Download and extract model tar file.
If the pretrained model we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a directory.
"""
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_dir, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL,
filepath,
_progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_dir)
def ensure_dir_exists(dir_name):
"""Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def write_list_of_floats_to_file(list_of_floats, file_path):
"""Writes a given list of floats to a binary file.
Args:
list_of_floats: List of floats we want to write to a file.
file_path: Path to a file where list of floats will be stored.
"""
s = struct.pack('d' * BOTTLENECK_TENSOR_SIZE, *list_of_floats)
with open(file_path, 'wb') as f:
f.write(s)
def read_list_of_floats_from_file(file_path):
"""Reads list of floats from a given file.
Args:
file_path: Path to a file where list of floats was stored.
Returns:
Array of bottleneck values (list of floats).
"""
with open(file_path, 'rb') as f:
s = struct.unpack('d' * BOTTLENECK_TENSOR_SIZE, f.read())
return list(s)
bottleneck_path_2_bottleneck_values = {}
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor):
"""Retrieves or calculates bottleneck values for an image.
If a cached version of the bottleneck data exists on-disk, return that,
otherwise calculate the data and save it to disk for future use.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be modulo-ed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of which set to pull images from: training, testing,
or validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: The tensor to feed loaded jpeg data into.
bottleneck_tensor: The output tensor for the bottleneck values.
Returns:
Numpy array of values produced by the bottleneck layer for the image.
"""
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index,
bottleneck_dir, category)
if not os.path.exists(bottleneck_path):
print('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = gfile.FastGFile(image_path, 'rb').read()
bottleneck_values = run_bottleneck_on_image(sess, image_data,
jpeg_data_tensor,
bottleneck_tensor)
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
bottleneck_tensor: The penultimate output layer of the graph.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(sess, image_lists, label_name, index,
image_dir, category, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
print(str(how_many_bottlenecks) + ' bottleneck files created.')
def get_all_cached_bottlenecks(
sess, image_lists, category, bottleneck_dir, image_dir, jpeg_data_tensor,
bottleneck_tensor):
bottlenecks = []
ground_truths = []
label_names = list(image_lists.keys())
for label_index in range(len(label_names)):
label_name = label_names[label_index]
for image_index in range(len(image_lists[label_name][category])):
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, bottleneck_tensor)
ground_truth = np.zeros(len(label_names), dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.scalar_summary('stddev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
def add_final_training_ops(
class_count, mode, final_tensor_name,
bottleneck_input, ground_truth_input):
"""Adds a new softmax and fully-connected layer for training.
We need to retrain the top layer to identify our new classes, so this
function adds the right operations to the graph, along with some variables
to hold the weights, and then sets up all the gradients for the backward
pass.
The set up for the softmax and fully-connected layers is based on:
https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
Args:
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces
results.
bottleneck_tensor: The output of the main CNN graph.
Returns:
The tensors for the training and cross entropy results, and tensors for the
bottleneck input and ground truth input.
"""
# Organizing the following ops as `final_training_ops` so they're easier
# to see in TensorBoard
train_step = None
cross_entropy_mean = None
layer_name = 'final_training_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
layer_weights = tf.Variable(
tf.truncated_normal(
[BOTTLENECK_TENSOR_SIZE, class_count],
stddev=0.001), name='final_weights')
variable_summaries(layer_weights, layer_name + '/weights')
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases, layer_name + '/biases')
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.histogram_summary(layer_name + '/pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
tf.histogram_summary(final_tensor_name + '/activations', final_tensor)
if mode in [ModeKeys.EVAL, ModeKeys.TRAIN]:
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits, ground_truth_input)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.scalar_summary('cross entropy', cross_entropy_mean)
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(
ARGFLAGS.learning_rate).minimize(
cross_entropy_mean,
global_step=tf.contrib.framework.get_global_step())
return (train_step, cross_entropy_mean, final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Nothing.
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(result_tensor, 1), \
tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.scalar_summary('accuracy', evaluation_step)
return evaluation_step
def make_model_fn(class_count, final_tensor_name):
def _make_model(bottleneck_input, ground_truth_input, mode, params):
prediction_dict = {}
train_step = None
cross_entropy = None
# Add the new layer that we'll be training.
(train_step, cross_entropy,
final_tensor) = add_final_training_ops(
class_count, mode, final_tensor_name,
bottleneck_input, ground_truth_input)
if mode == ModeKeys.EVAL:
prediction_dict['loss'] = cross_entropy
# Create the operations we need to evaluate accuracy
acc = add_evaluation_step(final_tensor, ground_truth_input)
prediction_dict['accuracy'] = acc
if mode == ModeKeys.INFER:
predclass = tf.argmax(final_tensor, 1)
prediction_dict["class_vector"] = final_tensor
prediction_dict["index"] = predclass
return prediction_dict, cross_entropy, train_step
return _make_model
METRICS = {
'loss': metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_mean,
prediction_key='loss'
),
'accuracy': metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_mean,
prediction_key='accuracy'
)
}
def make_image_predictions(
classifier, jpeg_data_tensor, bottleneck_tensor, path_list, labels_list):
"""Use the learned model to make predictions."""
if not labels_list:
output_labels_file = os.path.join(ARGFLAGS.model_dir, LABELS_FILENAME)
if gfile.Exists(output_labels_file):
with open(output_labels_file, 'r') as lfile:
labels_string = lfile.read()
labels_list = json.loads(labels_string)
print("labels list: %s" % labels_list)
else:
print("Labels list %s not found" % output_labels_file)
return None
sess = tf.Session()
bottlenecks = []
print("Predicting for images: %s" % path_list)
for img_path in path_list:
# get bottleneck for an image path. Don't cache the bottleneck values here.
if not gfile.Exists(img_path):
tf.logging.fatal('File does not exist %s', img_path)
image_data = gfile.FastGFile(img_path, 'rb').read()
bottleneck_values = run_bottleneck_on_image(sess, image_data,
jpeg_data_tensor,
bottleneck_tensor)
bottlenecks.append(bottleneck_values)
prediction_input = np.array(bottlenecks)
predictions = classifier.predict(x=prediction_input, as_iterable=True)
print("Predictions:")
for _, p in enumerate(predictions):
print("---------")
for k in p.keys():
print("%s is: %s " % (k, p[k]))
if k == "index":
print("index label is: %s" % labels_list[p[k]])
def get_prediction_images(img_dir):
"""Grab images from the prediction directory."""
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
if not gfile.Exists(img_dir):
print("Image directory '" + img_dir + "' not found.")
return None
print("Looking for images in '" + img_dir + "'")
for extension in extensions:
file_glob = os.path.join(img_dir, '*.' + extension)
file_list.extend(glob.glob(file_glob))
if not file_list:
print('No image files found')
return file_list
def main(_):
print("Using model directory %s" % ARGFLAGS.model_dir)
# Set up the pre-trained graph.
maybe_download_and_extract(dest_dir=ARGFLAGS.incp_model_dir)
graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (
create_inception_graph(ARGFLAGS.incp_model_dir))
sess = tf.Session()
labels_list = None
if not ARGFLAGS.predict_only:
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(
ARGFLAGS.image_dir, ARGFLAGS.testing_percentage,
ARGFLAGS.validation_percentage)
class_count = len(image_lists.keys())
if class_count == 0:
print('No valid folders of images found at ' + ARGFLAGS.image_dir)
return -1
if class_count == 1:
print('Only one valid folder of images found at ' + ARGFLAGS.image_dir +
' - multiple classes are needed for classification.')
return -1
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(
sess, image_lists, ARGFLAGS.image_dir, ARGFLAGS.bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
else:
# load the labels list, needed to create the model; exit if it's not there
output_labels_file = os.path.join(ARGFLAGS.model_dir, LABELS_FILENAME)
if gfile.Exists(output_labels_file):
with open(output_labels_file, 'r') as lfile:
labels_string = lfile.read()
labels_list = json.loads(labels_string)
print("labels list: %s" % labels_list)
class_count = len(labels_list)
else:
print("Labels list %s not found" % output_labels_file)
return None
# Define the custom estimator
model_fn = make_model_fn(class_count, ARGFLAGS.final_tensor_name)
model_params = {}
classifier = tf.contrib.learn.Estimator(
model_fn=model_fn, params=model_params, model_dir=ARGFLAGS.model_dir)
if not ARGFLAGS.predict_only:
train_bottlenecks, train_ground_truth = get_all_cached_bottlenecks(
sess, image_lists, 'training',
ARGFLAGS.bottleneck_dir, ARGFLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor)
train_bottlenecks = np.array(train_bottlenecks)
train_ground_truth = np.array(train_ground_truth)
# then run the training, unless doing prediction only
print("Starting training for %s steps max" % ARGFLAGS.num_steps)
classifier.fit(
x=train_bottlenecks.astype(np.float32),
y=train_ground_truth, batch_size=50,
max_steps=ARGFLAGS.num_steps)
# We've completed our training, so run a test evaluation on
# some new images we haven't used before.
test_bottlenecks, test_ground_truth = get_all_cached_bottlenecks(
sess, image_lists, 'testing',
ARGFLAGS.bottleneck_dir, ARGFLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor)
test_bottlenecks = np.array(test_bottlenecks)
test_ground_truth = np.array(test_ground_truth)
print("evaluating....")
print(classifier.evaluate(
test_bottlenecks.astype(np.float32), test_ground_truth, metrics=METRICS))
# write the output labels file if it doesn't already exist
output_labels_file = os.path.join(ARGFLAGS.model_dir, LABELS_FILENAME)
if gfile.Exists(output_labels_file):
print("Labels list file already exists; not writing.")
else:
output_labels = json.dumps(list(image_lists.keys()))
with gfile.FastGFile(output_labels_file, 'w') as f:
f.write(output_labels)
print("\nPredicting...")
img_list = get_prediction_images(ARGFLAGS.prediction_img_dir)
if not img_list:
print("No images found in %s" % ARGFLAGS.prediction_img_dir)
else:
make_image_predictions(
classifier, jpeg_data_tensor, bottleneck_tensor, img_list, labels_list)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Input and output file flags.
parser.add_argument('--image_dir', type=str, required=True,
help="Path to folders of labeled images.")
# where the model information lives
parser.add_argument('--model_dir', type=str,
default=os.path.join(
"/tmp/tfmodels/img_classify",
str(int(time.time()))),
help='Directory for storing model info')
# whether to run prediction only.
parser.add_argument(
'--predict_only', dest='predict_only', action='store_true',
help="Run prediction only; checkpointed model must exist.")
parser.set_defaults(predict_only=False)
parser.add_argument(
'--prediction_img_dir', type=str, default='prediction_images',
help="Directory of images to use for predictions")
# Details of the training configuration.
parser.add_argument(
'--num_steps', type=int, default=15000,
help="How many training steps to run before ending.")
parser.add_argument(
'--learning_rate', type=float, default=0.01,
help="How large a learning rate to use when training.")
parser.add_argument(
'--testing_percentage', type=int, default=10,
help="What percentage of images to use as a test set.")
parser.add_argument(
'--validation_percentage', type=int, default=10,
help="What percentage of images to use as a validation set.")
parser.add_argument(
'--eval_step_interval', type=int, default=10,
help="How often to evaluate the training results.")
parser.add_argument(
'--train_batch_size', type=int, default=100,
help="How many images to train on at a time.")
parser.add_argument(
'--test_batch_size', type=int, default=500,
help="""How many images to test on at a time. This
test set is only used infrequently to verify
the overall accuracy of the model.""")
parser.add_argument(
'--validation_batch_size', type=int, default=100,
help="""How many images to use in an evaluation batch. This validation
set is used much more often than the test set, and is an early
indicator of how accurate the model is during training.""")
# File-system cache locations.
parser.add_argument(
'--incp_model_dir', type=str, default='/tmp/imagenet',
help="""Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.""")
parser.add_argument(
'--bottleneck_dir', type=str, default='/tmp/bottleneck',
help="Path to cache bottleneck layer values as files.")
parser.add_argument(
'--final_tensor_name', type=str, default='final_result',
help="""The name of the output classification
layer in the retrained graph.""")
# Controls the distortions used during training.
parser.add_argument(
'--flip_hz', dest='flip_hz', action='store_true',
help="Flip half of the training images horizontally")
parser.add_argument(
'--no-flip_hz', dest='flip_hz', action='store_false',
help="Don't flip half the training images horizontally. The default.")
parser.set_defaults(flip_hz=False)
parser.add_argument(
'--random_crop', type=int, default=0,
help="""A percentage determining how much of a margin to randomly
crop off the training images.""")
parser.add_argument(
'--random_scale', type=int, default=0,
help="""A percentage determining how much to randomly scale up
the size of the training images by.""")
parser.add_argument(
'--random_brightness', type=int, default=0,
help="""A percentage determining how much to randomly multiply
the training image input pixels up or down by.""")
ARGFLAGS = parser.parse_args()
tf.app.run()
| apache-2.0 |
chiefspace/udemy-rest-api | udemy_rest_api_section6/env/lib/python3.4/site-packages/setuptools/tests/test_dist_info.py | 452 | 2615 | """Test .dist-info style distributions.
"""
import os
import shutil
import tempfile
import unittest
import textwrap
try:
import ast
except:
pass
import pkg_resources
from setuptools.tests.py26compat import skipIf
def DALS(s):
"dedent and left-strip"
return textwrap.dedent(s).lstrip()
class TestDistInfo(unittest.TestCase):
def test_distinfo(self):
dists = {}
for d in pkg_resources.find_distributions(self.tmpdir):
dists[d.project_name] = d
assert len(dists) == 2, dists
unversioned = dists['UnversionedDistribution']
versioned = dists['VersionedDistribution']
assert versioned.version == '2.718' # from filename
assert unversioned.version == '0.3' # from METADATA
@skipIf('ast' not in globals(),
"ast is used to test conditional dependencies (Python >= 2.6)")
def test_conditional_dependencies(self):
requires = [pkg_resources.Requirement.parse('splort==4'),
pkg_resources.Requirement.parse('quux>=1.1')]
for d in pkg_resources.find_distributions(self.tmpdir):
self.assertEqual(d.requires(), requires[:1])
self.assertEqual(d.requires(extras=('baz',)), requires)
self.assertEqual(d.extras, ['baz'])
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
versioned = os.path.join(self.tmpdir,
'VersionedDistribution-2.718.dist-info')
os.mkdir(versioned)
metadata_file = open(os.path.join(versioned, 'METADATA'), 'w+')
try:
metadata_file.write(DALS(
"""
Metadata-Version: 1.2
Name: VersionedDistribution
Requires-Dist: splort (4)
Provides-Extra: baz
Requires-Dist: quux (>=1.1); extra == 'baz'
"""))
finally:
metadata_file.close()
unversioned = os.path.join(self.tmpdir,
'UnversionedDistribution.dist-info')
os.mkdir(unversioned)
metadata_file = open(os.path.join(unversioned, 'METADATA'), 'w+')
try:
metadata_file.write(DALS(
"""
Metadata-Version: 1.2
Name: UnversionedDistribution
Version: 0.3
Requires-Dist: splort (==4)
Provides-Extra: baz
Requires-Dist: quux (>=1.1); extra == 'baz'
"""))
finally:
metadata_file.close()
def tearDown(self):
shutil.rmtree(self.tmpdir)
| gpl-2.0 |
alexgleith/Quantum-GIS | tests/src/python/test_qgsblendmodes.py | 2 | 7485 | # -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgsblendmodes.py
---------------------
Date : May 2013
Copyright : (C) 2013 by Nyall Dawson, Massimo Endrighi
Email : nyall dot dawson at gmail.com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'May 2013'
__copyright__ = '(C) 2013, Nyall Dawson, Massimo Endrighi'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import (QgsVectorLayer,
QgsMapLayerRegistry,
QgsMapRenderer,
QgsCoordinateReferenceSystem,
QgsRenderChecker,
QgsRasterLayer,
QgsRasterDataProvider,
QgsMultiBandColorRenderer,
QGis)
from utilities import (unitTestDataPath,
getQgisTestApp,
TestCase,
unittest,
expectedFailure
)
# Convenience instances in case you may need them
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsBlendModes(TestCase):
def __init__(self, methodName):
"""Run once on class initialisation."""
unittest.TestCase.__init__(self, methodName)
# initialize class MapRegistry, Canvas, MapRenderer, Map and PAL
self.mMapRegistry = QgsMapLayerRegistry.instance()
# create point layer
myShpFile = os.path.join(TEST_DATA_DIR, 'points.shp')
self.mPointLayer = QgsVectorLayer(myShpFile, 'Points', 'ogr')
self.mMapRegistry.addMapLayer(self.mPointLayer)
# create polygon layer
myShpFile = os.path.join(TEST_DATA_DIR, 'polys.shp')
self.mPolygonLayer = QgsVectorLayer(myShpFile, 'Polygons', 'ogr')
self.mMapRegistry.addMapLayer(self.mPolygonLayer)
# create line layer
myShpFile = os.path.join(TEST_DATA_DIR, 'lines.shp')
self.mLineLayer = QgsVectorLayer(myShpFile, 'Lines', 'ogr')
self.mMapRegistry.addMapLayer(self.mLineLayer)
# create two raster layers
myRasterFile = os.path.join(TEST_DATA_DIR, 'landsat.tif')
self.mRasterLayer1 = QgsRasterLayer(myRasterFile, "raster1")
self.mRasterLayer2 = QgsRasterLayer(myRasterFile, "raster2")
myMultiBandRenderer1 = QgsMultiBandColorRenderer(self.mRasterLayer1.dataProvider(), 2, 3, 4)
self.mRasterLayer1.setRenderer(myMultiBandRenderer1)
self.mMapRegistry.addMapLayer(self.mRasterLayer1)
myMultiBandRenderer2 = QgsMultiBandColorRenderer(self.mRasterLayer2.dataProvider(), 2, 3, 4)
self.mRasterLayer2.setRenderer(myMultiBandRenderer2)
self.mMapRegistry.addMapLayer(self.mRasterLayer2)
# to match blend modes test comparisons background
self.mCanvas = CANVAS
self.mCanvas.setCanvasColor(QColor(152, 219, 249))
self.mMap = self.mCanvas.map()
self.mMap.resize(QSize(400, 400))
self.mMapRenderer = self.mCanvas.mapRenderer()
self.mMapRenderer.setOutputSize(QSize(400, 400), 72)
def testVectorBlending(self):
"""Test that blend modes work for vector layers."""
#Add vector layers to map
myLayers = QStringList()
myLayers.append(self.mLineLayer.id())
myLayers.append(self.mPolygonLayer.id())
self.mMapRenderer.setLayerSet(myLayers)
self.mMapRenderer.setExtent(self.mPointLayer.extent())
#Set blending modes for both layers
self.mLineLayer.setBlendMode(QPainter.CompositionMode_Difference)
self.mPolygonLayer.setBlendMode(QPainter.CompositionMode_Difference)
checker = QgsRenderChecker()
checker.setControlName("expected_vector_blendmodes")
checker.setMapRenderer(self.mMapRenderer)
myResult = checker.runTest("vector_blendmodes");
myMessage = ('vector blending failed')
assert myResult, myMessage
#Reset layers
self.mLineLayer.setBlendMode(QPainter.CompositionMode_SourceOver)
self.mPolygonLayer.setBlendMode(QPainter.CompositionMode_SourceOver)
def testVectorFeatureBlending(self):
"""Test that feature blend modes work for vector layers."""
#Add vector layers to map
myLayers = QStringList()
myLayers.append(self.mLineLayer.id())
myLayers.append(self.mPolygonLayer.id())
self.mMapRenderer.setLayerSet(myLayers)
self.mMapRenderer.setExtent(self.mPointLayer.extent())
#Set feature blending for line layer
self.mLineLayer.setFeatureBlendMode(QPainter.CompositionMode_Plus)
checker = QgsRenderChecker()
checker.setControlName("expected_vector_featureblendmodes")
checker.setMapRenderer(self.mMapRenderer)
myResult = checker.runTest("vector_featureblendmodes");
myMessage = ('vector feature blending failed')
assert myResult, myMessage
#Reset layers
self.mLineLayer.setFeatureBlendMode(QPainter.CompositionMode_SourceOver)
def testVectorLayerTransparency(self):
"""Test that layer transparency works for vector layers."""
#Add vector layers to map
myLayers = QStringList()
myLayers.append(self.mLineLayer.id())
myLayers.append(self.mPolygonLayer.id())
self.mMapRenderer.setLayerSet(myLayers)
self.mMapRenderer.setExtent(self.mPointLayer.extent())
#Set feature blending for line layer
self.mLineLayer.setLayerTransparency( 50 )
checker = QgsRenderChecker()
checker.setControlName("expected_vector_layertransparency")
checker.setMapRenderer(self.mMapRenderer)
myResult = checker.runTest("vector_layertransparency");
myMessage = ('vector layer transparency failed')
assert myResult, myMessage
def testRasterBlending(self):
"""Test that blend modes work for raster layers."""
#Add raster layers to map
myLayers = QStringList()
myLayers.append(self.mRasterLayer1.id())
myLayers.append(self.mRasterLayer2.id())
self.mMapRenderer.setLayerSet(myLayers)
self.mMapRenderer.setExtent(self.mRasterLayer1.extent())
#Set blending mode for top layer
self.mRasterLayer1.setBlendMode(QPainter.CompositionMode_Plus)
checker = QgsRenderChecker()
checker.setControlName("expected_raster_blendmodes")
checker.setMapRenderer(self.mMapRenderer)
myResult = checker.runTest("raster_blendmodes");
myMessage = ('raster blending failed')
assert myResult, myMessage
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
iotaledger/iota.lib.py | iota/multisig/commands/prepare_multisig_transfer.py | 1 | 5102 | from typing import List, Optional
import filters as f
from iota import Address, ProposedTransaction
from iota.commands import FilterCommand, RequestFilter
from iota.commands.core import GetBalancesCommand
from iota.exceptions import with_context
from iota.filters import Trytes
from iota.multisig.transaction import ProposedMultisigBundle
from iota.multisig.types import MultisigAddress
__all__ = [
'PrepareMultisigTransferCommand',
]
class PrepareMultisigTransferCommand(FilterCommand):
"""
Implements `prepare_multisig_transfer` multisig API command.
References:
- :py:meth:`iota.multisig.api.MultisigIota.prepare_multisig_transfer`
"""
command = 'prepareMultisigTransfer'
def get_request_filter(self) -> 'PrepareMultisigTransferRequestFilter':
return PrepareMultisigTransferRequestFilter()
def get_response_filter(self):
pass
async def _execute(self, request: dict) -> dict:
change_address: Optional[Address] = request['changeAddress']
multisig_input: MultisigAddress = request['multisigInput']
transfers: List[ProposedTransaction] = request['transfers']
bundle = ProposedMultisigBundle(transfers)
want_to_spend = bundle.balance
if want_to_spend > 0:
gb_response = await GetBalancesCommand(self.adapter)(
addresses=[multisig_input],
)
multisig_input.balance = gb_response['balances'][0]
if multisig_input.balance < want_to_spend:
raise with_context(
exc=ValueError(
'Insufficient balance; found {found}, need {need} '
'(``exc.context`` has more info).'.format(
found=multisig_input.balance,
need=want_to_spend,
),
),
# The structure of this context object is intended
# to match the one from ``PrepareTransferCommand``.
context={
'available_to_spend': multisig_input.balance,
'confirmed_inputs': [multisig_input],
'request': request,
'want_to_spend': want_to_spend,
},
)
bundle.add_inputs([multisig_input])
if bundle.balance < 0:
if change_address:
bundle.send_unspent_inputs_to(change_address)
else:
#
# Unlike :py:meth:`iota.api.Iota.prepare_transfer`
# where all of the inputs are owned by the same
# seed, creating a multisig transfer usually
# involves multiple people.
#
# It would be unfair to the participants of the
# transaction if we were to automatically generate a
# change address using the seed of whoever happened
# to invoke the
# :py:meth:`MultisigIota.prepare_multisig_transfer`
# method!
#
raise with_context(
exc=ValueError(
'Bundle has unspent inputs, '
'but no change address specified.',
),
context={
'available_to_spend': multisig_input.balance,
'balance': bundle.balance,
'confirmed_inputs': [multisig_input],
'request': request,
'want_to_spend': want_to_spend,
},
)
else:
raise with_context(
exc=ValueError(
'Use ``prepare_transfer`` '
'to create a bundle without spending IOTAs.',
),
context={
'request': request,
},
)
bundle.finalize()
# Return the bundle with inputs unsigned.
return {
'trytes': bundle.as_tryte_strings(),
}
class PrepareMultisigTransferRequestFilter(RequestFilter):
def __init__(self) -> None:
super(PrepareMultisigTransferRequestFilter, self).__init__(
{
'changeAddress': Trytes(Address),
'multisigInput': f.Required | f.Type(MultisigAddress),
'transfers':
f.Required | f.Array | f.FilterRepeater(
f.Required | f.Type(ProposedTransaction),
),
},
allow_missing_keys={
'changeAddress',
},
)
| mit |
sahiljain/catapult | telemetry/telemetry/testing/progress_reporter_unittest.py | 31 | 1547 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.testing import progress_reporter
class TestFoo(unittest.TestCase):
# Test method doesn't have test- prefix intentionally. This is so that
# run_test script won't run this test.
def RunPassingTest(self):
pass
def RunFailingTest(self):
self.fail('expected failure')
class LoggingProgressReporter(object):
def __init__(self):
self._call_log = []
@property
def call_log(self):
return tuple(self._call_log)
def __getattr__(self, name):
def wrapper(*_):
self._call_log.append(name)
return wrapper
class ProgressReporterTest(unittest.TestCase):
def testTestRunner(self):
suite = progress_reporter.TestSuite()
suite.addTest(TestFoo(methodName='RunPassingTest'))
suite.addTest(TestFoo(methodName='RunFailingTest'))
reporter = LoggingProgressReporter()
runner = progress_reporter.TestRunner()
progress_reporters = (reporter,)
result = runner.run(suite, progress_reporters, 1, None)
self.assertEqual(len(result.successes), 1)
self.assertEqual(len(result.failures), 1)
self.assertEqual(len(result.failures_and_errors), 1)
expected = (
'StartTestRun', 'StartTestSuite',
'StartTest', 'Success', 'StopTest',
'StartTest', 'Failure', 'StopTest',
'StopTestSuite', 'StopTestRun',
)
self.assertEqual(reporter.call_log, expected)
| bsd-3-clause |
google-research/google-research | smu/parser/smu_utils_lib_test.py | 1 | 35529 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for smu_utils_lib."""
import copy
import os
import tempfile
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import pandas as pd
from rdkit import Chem
from google.protobuf import text_format
from smu import dataset_pb2
from smu.parser import smu_parser_lib
from smu.parser import smu_utils_lib
MAIN_DAT_FILE = 'x07_sample.dat'
STAGE1_DAT_FILE = 'x07_stage1.dat'
TESTDATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'testdata')
def str_to_bond_topology(s):
bt = dataset_pb2.BondTopology()
text_format.Parse(s, bt)
return bt
def get_stage1_conformer():
parser = smu_parser_lib.SmuParser(
os.path.join(TESTDATA_PATH, STAGE1_DAT_FILE))
conformer, _ = next(parser.process_stage1())
return conformer
def get_stage2_conformer():
parser = smu_parser_lib.SmuParser(os.path.join(TESTDATA_PATH, MAIN_DAT_FILE))
conformer, _ = next(parser.process_stage2())
return conformer
class SpecialIDTest(absltest.TestCase):
def test_from_dat_id(self):
self.assertIsNone(
smu_utils_lib.special_case_bt_id_from_dat_id(123456, 'CC'))
self.assertEqual(smu_utils_lib.special_case_bt_id_from_dat_id(999998, 'O'),
899650)
self.assertEqual(smu_utils_lib.special_case_bt_id_from_dat_id(0, 'O'),
899650)
with self.assertRaises(ValueError):
smu_utils_lib.special_case_bt_id_from_dat_id(0, 'NotASpecialCaseSmiles')
def test_from_bt_id(self):
self.assertIsNone(smu_utils_lib.special_case_dat_id_from_bt_id(123456))
self.assertEqual(
smu_utils_lib.special_case_dat_id_from_bt_id(899651), 999997)
class GetCompositionTest(absltest.TestCase):
def test_simple(self):
bt = dataset_pb2.BondTopology()
bt.atoms.extend([dataset_pb2.BondTopology.ATOM_C,
dataset_pb2.BondTopology.ATOM_C,
dataset_pb2.BondTopology.ATOM_N,
dataset_pb2.BondTopology.ATOM_H,
dataset_pb2.BondTopology.ATOM_H,
dataset_pb2.BondTopology.ATOM_H])
self.assertEqual('x03_c2nh3', smu_utils_lib.get_composition(bt))
class GetCanonicalStoichiometryWithHydrogensTest(absltest.TestCase):
def test_cyclobutane(self):
bt = smu_utils_lib.create_bond_topology('CCCC', '110011', '2222')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(ch2)4')
def test_ethylene(self):
bt = smu_utils_lib.create_bond_topology('CC', '2', '22')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(ch2)2')
def test_acrylic_acid(self):
bt = smu_utils_lib.create_bond_topology('CCCOO', '2000100210', '21001')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt),
'(c)(ch)(ch2)(o)(oh)')
def test_fluorine(self):
bt = smu_utils_lib.create_bond_topology('OFF', '110', '000')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(o)(f)2')
def test_fully_saturated(self):
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('C', '', '4')), '(ch4)')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('N', '', '3')), '(nh3)')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('O', '', '2')), '(oh2)')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('F', '', '1')), '(fh)')
def test_nplus_oneg(self):
bt = smu_utils_lib.create_bond_topology('NO', '1', '30')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt),
'(nh3)(o)')
class ParseBondTopologyTest(absltest.TestCase):
def test_4_heavy(self):
num_atoms, atoms_str, matrix, hydrogens = smu_utils_lib.parse_bond_topology_line(
' 4 N+O O O- 010110 3000')
self.assertEqual(num_atoms, 4)
self.assertEqual(atoms_str, 'N+O O O-')
self.assertEqual(matrix, '010110')
self.assertEqual(hydrogens, '3000')
def test_7_heavy(self):
num_atoms, atoms_str, matrix, hydrogens = smu_utils_lib.parse_bond_topology_line(
' 7 N+O O O O-F F 001011101001000000000 1000000')
self.assertEqual(num_atoms, 7)
self.assertEqual(atoms_str, 'N+O O O O-F F ') # Note the trailing space
self.assertEqual(matrix, '001011101001000000000')
self.assertEqual(hydrogens, '1000000')
class CreateBondTopologyTest(absltest.TestCase):
def test_no_charged(self):
got = smu_utils_lib.create_bond_topology('CNFF', '111000', '1200')
expected_str = '''
atoms: ATOM_C
atoms: ATOM_N
atoms: ATOM_F
atoms: ATOM_F
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 5
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 6
bond_type: BOND_SINGLE
}
'''
expected = str_to_bond_topology(expected_str)
self.assertEqual(str(expected), str(got))
def test_charged(self):
# This is actually C N N+O-
got = smu_utils_lib.create_bond_topology('CNNO', '200101', '2020')
expected_str = '''
atoms: ATOM_C
atoms: ATOM_N
atoms: ATOM_NPOS
atoms: ATOM_ONEG
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
bonds {
atom_a: 1
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
bonds {
atom_b: 5
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 6
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 7
bond_type: BOND_SINGLE
}
'''
expected = str_to_bond_topology(expected_str)
self.assertEqual(str(expected), str(got))
def test_one_heavy(self):
got = smu_utils_lib.create_bond_topology('C', '', '4')
expected_str = '''
atoms: ATOM_C
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
'''
expected = str_to_bond_topology(expected_str)
self.assertEqual(str(expected), str(got))
class FromCSVTest(absltest.TestCase):
def test_basic(self):
infile = tempfile.NamedTemporaryFile(mode='w', delete=False)
infile.write(
'id,num_atoms,atoms_str,connectivity_matrix,hydrogens,smiles\n')
infile.write('68,3,C N+O-,310,010,[NH+]#C[O-]\n')
infile.write('134,4,N+O-F F ,111000,1000,[O-][NH+](F)F\n')
infile.close()
out = smu_utils_lib.generate_bond_topologies_from_csv(infile.name)
bt = next(out)
self.assertEqual(68, bt.bond_topology_id)
self.assertLen(bt.atoms, 4)
self.assertEqual(bt.smiles, '[NH+]#C[O-]')
bt = next(out)
self.assertEqual(134, bt.bond_topology_id)
self.assertLen(bt.atoms, 5)
self.assertEqual(bt.smiles, '[O-][NH+](F)F')
class ParseDuplicatesFileTest(absltest.TestCase):
def test_basic(self):
df = smu_utils_lib.parse_duplicates_file(
os.path.join(TESTDATA_PATH, 'small.equivalent_isomers.dat'))
pd.testing.assert_frame_equal(
pd.DataFrame(
columns=['name1', 'stoich1', 'btid1', 'shortconfid1', 'confid1',
'name2', 'stoich2', 'btid2', 'shortconfid2', 'confid2'],
data=[
['x07_c2n2o2fh3.224227.004',
'c2n2o2fh3', 224227, 4, 224227004,
'x07_c2n2o2fh3.224176.005',
'c2n2o2fh3', 224176, 5, 224176005],
['x07_c2n2o2fh3.260543.005',
'c2n2o2fh3', 260543, 5, 260543005,
'x07_c2n2o2fh3.224050.001',
'c2n2o2fh3', 224050, 1, 224050001],
]),
df,
check_like=True)
class BondTopologyToMoleculeTest(absltest.TestCase):
def test_o2(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
''')
got = smu_utils_lib.bond_topology_to_molecule(bond_topology)
self.assertEqual('O=O', Chem.MolToSmiles(got))
def test_methane(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_C
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
''')
got = smu_utils_lib.bond_topology_to_molecule(bond_topology)
self.assertEqual('[H]C([H])([H])[H]', Chem.MolToSmiles(got))
# This molecule is an N+ central atom, bonded to C (triply), O-, and F
def test_charged_molecule(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_C
atoms: ATOM_NPOS
atoms: ATOM_ONEG
atoms: ATOM_F
bonds {
atom_b: 1
bond_type: BOND_TRIPLE
}
bonds {
atom_a: 1
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 3
bond_type: BOND_SINGLE
}
''')
got = smu_utils_lib.bond_topology_to_molecule(bond_topology)
self.assertEqual('C#[N+]([O-])F', Chem.MolToSmiles(got))
class ConformerToMoleculeTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = get_stage2_conformer()
# We'll make a new initial_geometry which is just the current one with all
# coordinates multiplied by 1000
self.conformer.initial_geometries.append(
self.conformer.initial_geometries[0])
new_geom = self.conformer.initial_geometries[1]
for atom_pos in new_geom.atom_positions:
atom_pos.x = atom_pos.x * 1000
atom_pos.y = atom_pos.y * 1000
atom_pos.z = atom_pos.z * 1000
# For the extra bond_topology, we'll just copy the existing one and change
# the id. Through the dumb luck of the molecule we picked there's not a
# simple way to make this a new bond topology and still have it look valid
# to RDKit
self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])
self.conformer.bond_topologies[1].bond_topology_id = 99999
def test_all_outputs(self):
mols = list(smu_utils_lib.conformer_to_molecules(self.conformer))
self.assertLen(mols, 6) # 2 bond topologies * (1 opt geom + 2 init_geom)
self.assertEqual([m.GetProp('_Name') for m in mols], [
'SMU 618451001 bt=618451(0/2) geom=init(0/2)',
'SMU 618451001 bt=618451(0/2) geom=init(1/2)',
'SMU 618451001 bt=618451(0/2) geom=opt',
'SMU 618451001 bt=99999(1/2) geom=init(0/2)',
'SMU 618451001 bt=99999(1/2) geom=init(1/2)',
'SMU 618451001 bt=99999(1/2) geom=opt'
])
self.assertEqual(
'[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',
Chem.MolToSmiles(mols[0], kekuleSmiles=True, isomericSmiles=False))
self.assertEqual(
'[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',
Chem.MolToSmiles(mols[4], kekuleSmiles=True, isomericSmiles=False))
def test_initial_only(self):
mols = list(
smu_utils_lib.conformer_to_molecules(
self.conformer,
include_initial_geometries=True,
include_optimized_geometry=False,
include_all_bond_topologies=False))
self.assertLen(mols, 2)
self.assertEqual([m.GetProp('_Name') for m in mols], [
'SMU 618451001 bt=618451(0/2) geom=init(0/2)',
'SMU 618451001 bt=618451(0/2) geom=init(1/2)',
])
# This is just one random atom I picked from the .dat file and converted to
# angstroms instead of bohr.
self.assertEqual('C', mols[0].GetAtomWithIdx(1).GetSymbol())
np.testing.assert_allclose([0.6643, -3.470301, 3.4766],
list(mols[0].GetConformer().GetAtomPosition(1)),
atol=1e-6)
self.assertEqual('C', mols[1].GetAtomWithIdx(1).GetSymbol())
np.testing.assert_allclose([664.299998, -3470.300473, 3476.600215],
list(mols[1].GetConformer().GetAtomPosition(1)),
atol=1e-6)
def test_optimized_only(self):
mols = list(
smu_utils_lib.conformer_to_molecules(
self.conformer,
include_initial_geometries=False,
include_optimized_geometry=True,
include_all_bond_topologies=False))
self.assertLen(mols, 1)
self.assertEqual(
mols[0].GetProp('_Name'),
'SMU 618451001 bt=618451(0/2) geom=opt',
)
self.assertEqual(
'[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',
Chem.MolToSmiles(mols[0], kekuleSmiles=True, isomericSmiles=False))
# This is just two random atoms I picked from the .dat file and converted to
# angstroms instead of bohr.
self.assertEqual('C', mols[0].GetAtomWithIdx(1).GetSymbol())
np.testing.assert_allclose([0.540254, -3.465543, 3.456982],
list(mols[0].GetConformer().GetAtomPosition(1)),
atol=1e-6)
self.assertEqual('H', mols[0].GetAtomWithIdx(13).GetSymbol())
np.testing.assert_allclose([2.135153, -1.817366, 0.226376],
list(mols[0].GetConformer().GetAtomPosition(13)),
atol=1e-6)
class SmilesCompareTest(absltest.TestCase):
def test_string_format(self):
# for some simplicity later on, we use shorter names
self.assertEqual('MISSING', str(smu_utils_lib.SmilesCompareResult.MISSING))
self.assertEqual('MISMATCH',
str(smu_utils_lib.SmilesCompareResult.MISMATCH))
self.assertEqual('MATCH', str(smu_utils_lib.SmilesCompareResult.MATCH))
def test_missing(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
''')
result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(
bond_topology)
self.assertEqual(smu_utils_lib.SmilesCompareResult.MISSING, result)
self.assertEqual('O=O', with_h)
self.assertEqual('O=O', without_h)
# Also directly test compute_smiles_for_bond_topology
self.assertEqual(
'O=O',
smu_utils_lib.compute_smiles_for_bond_topology(
bond_topology, include_hs=True))
def test_mismatch(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
smiles: "BlahBlahBlah"
''')
result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(
bond_topology)
self.assertEqual(smu_utils_lib.SmilesCompareResult.MISMATCH, result)
self.assertEqual('O=O', with_h)
self.assertEqual('O=O', without_h)
def test_matched_and_h_stripping(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_O
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
smiles: "O"
''')
result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(
bond_topology)
self.assertEqual(smu_utils_lib.SmilesCompareResult.MATCH, result)
self.assertEqual('[H]O[H]', with_h)
self.assertEqual('O', without_h)
# Also directly test compute_smiles_for_bond_topology
self.assertEqual(
'[H]O[H]',
smu_utils_lib.compute_smiles_for_bond_topology(
bond_topology, include_hs=True))
self.assertEqual(
'O',
smu_utils_lib.compute_smiles_for_bond_topology(
bond_topology, include_hs=False))
def test_compute_smiles_from_molecule_no_hs(self):
mol = Chem.MolFromSmiles('FOC', sanitize=False)
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False), 'COF')
# This is expected. Even with include_hs=True, if there were no Hs in the
# molecule, they will not be in the smiles.
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=True), 'COF')
def test_compute_smiles_from_molecule_with_hs(self):
mol = Chem.MolFromSmiles('FOC', sanitize=False)
Chem.SanitizeMol(mol, Chem.rdmolops.SanitizeFlags.SANITIZE_ADJUSTHS)
mol = Chem.AddHs(mol)
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False), 'COF')
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=True),
'[H]C([H])([H])OF')
def test_compute_smiles_from_molecule_special_case(self):
mol = Chem.MolFromSmiles('C12=C3C4=C1C4=C23', sanitize=False)
# Double check that this really is the special case -- we get back the
# SMILES we put in even though it's not the one we want.
self.assertEqual('C12=C3C4=C1C4=C23',
Chem.MolToSmiles(mol, kekuleSmiles=True))
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False),
'C12=C3C1=C1C2=C31')
def test_compute_smiles_from_molecule_labeled_with_h(self):
mol = Chem.MolFromSmiles(
'[O-][N+]([H])([H])N([H])OC([H])([H])F', sanitize=False)
self.assertIsNotNone(mol)
self.assertEqual(
'[O-][N+:1]([H:2])([H:3])[N:4]([H:5])[O:6][C:7]([H:8])([H:9])[F:10]',
smu_utils_lib.compute_smiles_for_molecule(
mol, include_hs=True, labeled_atoms=True))
def test_compute_smiles_from_molecule_labeled_no_h(self):
mol = Chem.MolFromSmiles(
'[O-][N+]([H])([H])N([H])OC([H])([H])F', sanitize=False)
self.assertIsNotNone(mol)
self.assertEqual(
'[O-][NH2+:1][NH:2][O:3][CH2:4][F:5]',
smu_utils_lib.compute_smiles_for_molecule(
mol, include_hs=False, labeled_atoms=True))
class MergeConformersTest(absltest.TestCase):
def setUp(self):
super().setUp()
# We are relying on the fact that the first conformer in both x07_sample.dat
# and x07_stage1.dat are the same.
self.stage1_conformer = get_stage1_conformer()
self.stage2_conformer = get_stage2_conformer()
self.duplicate_conformer = dataset_pb2.Conformer()
self.duplicate_conformer.conformer_id = self.stage1_conformer.conformer_id
# A real duplicate conformer wouldn't have both of these fields filled in,
# but it's fine for the test to make sure everything is copied.
self.duplicate_conformer.duplicated_by = 123
self.duplicate_conformer.duplicate_of.extend([111, 222])
def test_two_stage2(self):
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer,
self.stage2_conformer)
def test_two_stage1(self):
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage1_conformer,
self.stage1_conformer)
def test_two_duplicates(self):
duplicate_conformer2 = copy.deepcopy(self.duplicate_conformer)
duplicate_conformer2.duplicate_of[:] = [333, 444]
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.duplicate_conformer, duplicate_conformer2)
self.assertIsNone(got_conflict)
self.assertEqual(123, got_conf.duplicated_by)
self.assertCountEqual([111, 222, 333, 444], got_conf.duplicate_of)
def test_stage2_stage1(self):
# Add a duplicate to stage1 to make sure it is copied
self.stage1_conformer.duplicate_of.append(999)
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.duplicate_of, [999])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_stage1_conflict_energy(self):
self.stage2_conformer.properties.initial_geometry_energy.value = -1.23
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertEqual(got_conflict, [
618451001,
1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True,
1, 1, 1, 1, -1.23, 0.052254, -406.522079, 2.5e-05, True, True
])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
# This stage2 values should be returned
self.assertEqual(got_conf.properties.initial_geometry_energy.value, -1.23)
def test_stage2_stage1_conflict_error_codes(self):
self.stage2_conformer.properties.errors.error_nstat1 = 999
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertEqual(got_conflict, [
618451001,
1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True,
999, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True
])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_stage1_conflict_missing_geometry(self):
self.stage2_conformer.ClearField('optimized_geometry')
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertEqual(got_conflict, [
618451001,
1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True,
1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, False
])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_stage1_no_conflict_minus1(self):
# If stage2 contains a -1, we keep that (stricter error checking later on)
self.stage2_conformer.properties.initial_geometry_energy.value = -1.0
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.properties.initial_geometry_energy.value, -1.0)
def test_stage2_stage1_no_conflict_approx_equal(self):
self.stage2_conformer.properties.initial_geometry_energy.value += 1e-7
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertIsNone(got_conflict)
# Just check a random field from stage2
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_duplicate(self):
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.duplicate_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.duplicate_of, [111, 222])
self.assertEqual(got_conf.duplicated_by, 123)
# Just check a random field from stage2
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage1_duplicate(self):
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage1_conformer, self.duplicate_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.duplicate_of, [111, 222])
self.assertEqual(got_conf.duplicated_by, 123)
# Just check a random field from stage1
self.assertTrue(got_conf.properties.HasField('initial_geometry_energy'))
def test_multiple_initial_geometries(self):
bad_conformer = copy.deepcopy(self.stage1_conformer)
bad_conformer.initial_geometries.append(bad_conformer.initial_geometries[0])
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(bad_conformer, self.stage2_conformer)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer, bad_conformer)
def test_multiple_bond_topologies(self):
bad_conformer = copy.deepcopy(self.stage1_conformer)
bad_conformer.bond_topologies.append(bad_conformer.bond_topologies[0])
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(bad_conformer, self.stage2_conformer)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer, bad_conformer)
def test_different_bond_topologies(self):
self.stage1_conformer.bond_topologies[0].atoms[0] = (
dataset_pb2.BondTopology.ATOM_H)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage1_conformer,
self.stage2_conformer)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer,
self.stage1_conformer)
class ConformerErrorTest(absltest.TestCase):
def test_stage1_no_error(self):
conformer = get_stage1_conformer()
self.assertFalse(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage1_error(self):
conformer = get_stage2_conformer()
conformer.properties.errors.error_frequencies = 123
self.assertTrue(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage2_no_error(self):
conformer = get_stage2_conformer()
self.assertFalse(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage2_error_in_1_expected_field(self):
conformer = get_stage2_conformer()
conformer.properties.errors.error_rotational_modes = 123
self.assertTrue(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage2_error_in_0_expected_field(self):
conformer = get_stage2_conformer()
# This field is 0 to indicate no error. Why the discrepancy? Who knows!
conformer.properties.errors.error_nsvg09 = 1
self.assertTrue(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage2_nstat1_is_3(self):
# This is the other bizaare case. nstat1 of 3 is still considered success.
conformer = get_stage2_conformer()
conformer.properties.errors.error_nstat1 = 3
self.assertFalse(smu_utils_lib.conformer_has_calculation_errors(conformer))
class FilterConformerByAvailabilityTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = dataset_pb2.Conformer()
properties = self.conformer.properties
# A STANDARD field
properties.single_point_energy_pbe0d3_6_311gd.value = 1.23
# A COMPLETE field
properties.homo_pbe0_aug_pc_1.value = 1.23
# An INTERNAL_ONLY field
properties.nuclear_repulsion_energy.value = 1.23
def test_standard(self):
smu_utils_lib.filter_conformer_by_availability(self.conformer,
[dataset_pb2.STANDARD])
self.assertTrue(
self.conformer.properties.HasField(
'single_point_energy_pbe0d3_6_311gd'))
self.assertFalse(self.conformer.properties.HasField('homo_pbe0_aug_pc_1'))
self.assertFalse(
self.conformer.properties.HasField('nuclear_repulsion_energy'))
def test_complete_and_internal_only(self):
smu_utils_lib.filter_conformer_by_availability(
self.conformer, [dataset_pb2.COMPLETE, dataset_pb2.INTERNAL_ONLY])
self.assertFalse(
self.conformer.properties.HasField(
'single_point_energy_pbe0d3_6_311gd'))
self.assertTrue(self.conformer.properties.HasField('homo_pbe0_aug_pc_1'))
self.assertTrue(
self.conformer.properties.HasField('nuclear_repulsion_energy'))
class ConformerToStandardTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = get_stage2_conformer()
def test_field_filtering(self):
# Check that the field which should be filtered starts out set
self.assertTrue(self.conformer.properties.HasField(
'single_point_energy_hf_6_31gd'))
got = smu_utils_lib.conformer_to_standard(self.conformer)
# Check for a field that was originally in self.conformer and should be
# filtered and a field which should still be present.
self.assertTrue(got.properties.HasField(
'single_point_energy_pbe0d3_6_311gd'))
self.assertFalse(
got.properties.HasField('single_point_energy_hf_6_31gd'))
def test_remove_error_conformer(self):
self.conformer.properties.errors.error_frequencies = 123
self.assertIsNone(smu_utils_lib.conformer_to_standard(self.conformer))
def test_remove_duplicate(self):
self.conformer.duplicated_by = 123
self.assertIsNone(smu_utils_lib.conformer_to_standard(self.conformer))
class DetermineFateTest(parameterized.TestCase):
def test_duplicate_same_topology(self):
conformer = get_stage1_conformer()
# bond topology is conformer_id // 1000
conformer.duplicated_by = conformer.conformer_id + 1
self.assertEqual(dataset_pb2.Conformer.FATE_DUPLICATE_SAME_TOPOLOGY,
smu_utils_lib.determine_fate(conformer))
def test_duplicate_different_topology(self):
conformer = get_stage1_conformer()
# bond topology is conformer_id // 1000
conformer.duplicated_by = conformer.conformer_id + 1000
self.assertEqual(dataset_pb2.Conformer.FATE_DUPLICATE_DIFFERENT_TOPOLOGY,
smu_utils_lib.determine_fate(conformer))
@parameterized.parameters(
(2, dataset_pb2.Conformer.FATE_GEOMETRY_OPTIMIZATION_PROBLEM),
(5, dataset_pb2.Conformer.FATE_DISASSOCIATED),
(4, dataset_pb2.Conformer.FATE_FORCE_CONSTANT_FAILURE),
(6, dataset_pb2.Conformer.FATE_DISCARDED_OTHER))
def test_geometry_failures(self, nstat1, expected_fate):
conformer = get_stage1_conformer()
conformer.properties.errors.error_nstat1 = nstat1
self.assertEqual(expected_fate, smu_utils_lib.determine_fate(conformer))
def test_no_result(self):
conformer = get_stage1_conformer()
self.assertEqual(dataset_pb2.Conformer.FATE_NO_CALCULATION_RESULTS,
smu_utils_lib.determine_fate(conformer))
def test_calculation_errors(self):
conformer = get_stage2_conformer()
# This is a random choice of an error to set. I just need some error.
conformer.properties.errors.error_atomic_analysis = 999
self.assertEqual(dataset_pb2.Conformer.FATE_CALCULATION_WITH_ERROR,
smu_utils_lib.determine_fate(conformer))
def test_success(self):
conformer = get_stage2_conformer()
self.assertEqual(dataset_pb2.Conformer.FATE_SUCCESS,
smu_utils_lib.determine_fate(conformer))
class ToBondTopologySummaryTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = get_stage2_conformer()
def test_dup_same(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_DUPLICATE_SAME_TOPOLOGY
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].bond_topology.bond_topology_id,
self.conformer.bond_topologies[0].bond_topology_id)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_duplicates_same_topology, 1)
def test_dup_diff(self):
self.conformer.fate = (
dataset_pb2.Conformer.FATE_DUPLICATE_DIFFERENT_TOPOLOGY)
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_duplicates_different_topology, 1)
def test_geometry_failed(self):
self.conformer.fate = (dataset_pb2.Conformer.FATE_DISCARDED_OTHER)
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_failed_geometry_optimization, 1)
def test_missing_calculation(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_NO_CALCULATION_RESULTS
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_kept_geometry, 1)
self.assertEqual(got[0].count_missing_calculation, 1)
def test_calculation_with_error(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_CALCULATION_WITH_ERROR
self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])
self.conformer.bond_topologies[-1].bond_topology_id = 123
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 2)
# We don't actually care about the order, but this is what comes out right
# now.
self.assertEqual(got[0].bond_topology.bond_topology_id, 123)
self.assertEqual(got[0].count_attempted_conformers, 0)
self.assertEqual(got[0].count_kept_geometry, 0)
self.assertEqual(got[0].count_calculation_with_error, 0)
self.assertEqual(got[0].count_detected_match_with_error, 1)
self.assertEqual(got[1].bond_topology.bond_topology_id,
self.conformer.bond_topologies[0].bond_topology_id)
self.assertEqual(got[1].count_attempted_conformers, 1)
self.assertEqual(got[1].count_kept_geometry, 1)
self.assertEqual(got[1].count_calculation_with_error, 1)
self.assertEqual(got[1].count_detected_match_with_error, 0)
def test_calculation_success(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_SUCCESS
self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])
self.conformer.bond_topologies[-1].bond_topology_id = 123
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 2)
# We don't actually care about the order, but this is what comes out right
# now.
self.assertEqual(got[0].bond_topology.bond_topology_id, 123)
self.assertEqual(got[0].count_attempted_conformers, 0)
self.assertEqual(got[0].count_kept_geometry, 0)
self.assertEqual(got[0].count_calculation_success, 0)
self.assertEqual(got[0].count_detected_match_success, 1)
self.assertEqual(got[1].bond_topology.bond_topology_id,
self.conformer.bond_topologies[0].bond_topology_id)
self.assertEqual(got[1].count_attempted_conformers, 1)
self.assertEqual(got[1].count_kept_geometry, 1)
self.assertEqual(got[1].count_calculation_success, 1)
self.assertEqual(got[1].count_detected_match_success, 0)
class LabeledSmilesTester(absltest.TestCase):
def test_atom_labels(self):
mol = Chem.MolFromSmiles('FCON[NH2+][O-]', sanitize=False)
self.assertIsNotNone(mol)
smiles_before = Chem.MolToSmiles(mol)
self.assertEqual(
smu_utils_lib.labeled_smiles(mol), 'F[CH2:1][O:2][NH:3][NH2+:4][O-:5]')
# Testing both the atom numbers and the smiles is redundant,
# but guards against possible future changes.
for atom in mol.GetAtoms():
self.assertEqual(atom.GetAtomMapNum(), 0)
self.assertEqual(Chem.MolToSmiles(mol), smiles_before)
if __name__ == '__main__':
absltest.main()
| apache-2.0 |
sonaht/ansible | lib/ansible/modules/cloud/amazon/s3_lifecycle.py | 71 | 15738 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: s3_lifecycle
short_description: Manage s3 bucket lifecycle rules in AWS
description:
- Manage s3 bucket lifecycle rules in AWS
version_added: "2.0"
author: "Rob White (@wimnat)"
notes:
- If specifying expiration time as days then transition time must also be specified in days
- If specifying expiration time as a date then transition time must also be specified as a date
requirements:
- python-dateutil
options:
name:
description:
- "Name of the s3 bucket"
required: true
expiration_date:
description:
- >
Indicates the lifetime of the objects that are subject to the rule by the date they will expire. The value must be ISO-8601 format, the time must
be midnight and a GMT timezone must be specified.
required: false
default: null
expiration_days:
description:
- "Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer."
required: false
default: null
prefix:
description:
- "Prefix identifying one or more objects to which the rule applies. If no prefix is specified, the rule will apply to the whole bucket."
required: false
default: null
rule_id:
description:
- "Unique identifier for the rule. The value cannot be longer than 255 characters. A unique value for the rule will be generated if no value is provided."
required: false
default: null
state:
description:
- "Create or remove the lifecycle rule"
required: false
default: present
choices: [ 'present', 'absent' ]
status:
description:
- "If 'enabled', the rule is currently being applied. If 'disabled', the rule is not currently being applied."
required: false
default: enabled
choices: [ 'enabled', 'disabled' ]
storage_class:
description:
- "The storage class to transition to. Currently there are two supported values - 'glacier' or 'standard_ia'."
- "The 'standard_ia' class is only being available from Ansible version 2.2."
required: false
default: glacier
choices: [ 'glacier', 'standard_ia']
transition_date:
description:
- >
Indicates the lifetime of the objects that are subject to the rule by the date they will transition to a different storage class.
The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. If transition_days is not specified,
this parameter is required."
required: false
default: null
transition_days:
description:
- "Indicates when, in days, an object transitions to a different storage class. If transition_date is not specified, this parameter is required."
required: false
default: null
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days
- s3_lifecycle:
name: mybucket
expiration_days: 30
prefix: /logs/
status: enabled
state: present
# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days
- s3_lifecycle:
name: mybucket
transition_days: 7
expiration_days: 90
prefix: /logs/
status: enabled
state: present
# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030.
# Note that midnight GMT must be specified.
# Be sure to quote your date strings
- s3_lifecycle:
name: mybucket
transition_date: "2020-12-30T00:00:00.000Z"
expiration_date: "2030-12-30T00:00:00.000Z"
prefix: /logs/
status: enabled
state: present
# Disable the rule created above
- s3_lifecycle:
name: mybucket
prefix: /logs/
status: disabled
state: present
# Delete the lifecycle rule created above
- s3_lifecycle:
name: mybucket
prefix: /logs/
state: absent
# Configure a lifecycle rule to transition all backup files older than 31 days in /backups/ to standard infrequent access class.
- s3_lifecycle:
name: mybucket
prefix: /backups/
storage_class: standard_ia
transition_days: 31
state: present
status: enabled
'''
import xml.etree.ElementTree as ET
import copy
import datetime
try:
import dateutil.parser
HAS_DATEUTIL = True
except ImportError:
HAS_DATEUTIL = False
try:
import boto
import boto.ec2
from boto.s3.connection import OrdinaryCallingFormat, Location
from boto.s3.lifecycle import Lifecycle, Rule, Expiration, Transition
from boto.exception import BotoServerError, S3CreateError, S3ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info
def create_lifecycle_rule(connection, module):
name = module.params.get("name")
expiration_date = module.params.get("expiration_date")
expiration_days = module.params.get("expiration_days")
prefix = module.params.get("prefix")
rule_id = module.params.get("rule_id")
status = module.params.get("status")
storage_class = module.params.get("storage_class")
transition_date = module.params.get("transition_date")
transition_days = module.params.get("transition_days")
changed = False
try:
bucket = connection.get_bucket(name)
except S3ResponseError as e:
module.fail_json(msg=e.message)
# Get the bucket's current lifecycle rules
try:
current_lifecycle_obj = bucket.get_lifecycle_config()
except S3ResponseError as e:
if e.error_code == "NoSuchLifecycleConfiguration":
current_lifecycle_obj = Lifecycle()
else:
module.fail_json(msg=e.message)
# Create expiration
if expiration_days is not None:
expiration_obj = Expiration(days=expiration_days)
elif expiration_date is not None:
expiration_obj = Expiration(date=expiration_date)
else:
expiration_obj = None
# Create transition
if transition_days is not None:
transition_obj = Transition(days=transition_days, storage_class=storage_class.upper())
elif transition_date is not None:
transition_obj = Transition(date=transition_date, storage_class=storage_class.upper())
else:
transition_obj = None
# Create rule
rule = Rule(rule_id, prefix, status.title(), expiration_obj, transition_obj)
# Create lifecycle
lifecycle_obj = Lifecycle()
appended = False
# If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule
if current_lifecycle_obj:
# If rule ID exists, use that for comparison otherwise compare based on prefix
for existing_rule in current_lifecycle_obj:
if rule.id == existing_rule.id:
if compare_rule(rule, existing_rule):
lifecycle_obj.append(rule)
appended = True
else:
lifecycle_obj.append(rule)
changed = True
appended = True
elif rule.prefix == existing_rule.prefix:
existing_rule.id = None
if compare_rule(rule, existing_rule):
lifecycle_obj.append(rule)
appended = True
else:
lifecycle_obj.append(rule)
changed = True
appended = True
else:
lifecycle_obj.append(existing_rule)
# If nothing appended then append now as the rule must not exist
if not appended:
lifecycle_obj.append(rule)
changed = True
else:
lifecycle_obj.append(rule)
changed = True
# Write lifecycle to bucket
try:
bucket.configure_lifecycle(lifecycle_obj)
except S3ResponseError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed)
def compare_rule(rule_a, rule_b):
# Copy objects
rule1 = copy.deepcopy(rule_a)
rule2 = copy.deepcopy(rule_b)
# Delete Rule from Rule
try:
del rule1.Rule
except AttributeError:
pass
try:
del rule2.Rule
except AttributeError:
pass
# Extract Expiration and Transition objects
rule1_expiration = rule1.expiration
rule1_transition = rule1.transition
rule2_expiration = rule2.expiration
rule2_transition = rule2.transition
# Delete the Expiration and Transition objects from the Rule objects
del rule1.expiration
del rule1.transition
del rule2.expiration
del rule2.transition
# Compare
if rule1_transition is None:
rule1_transition = Transition()
if rule2_transition is None:
rule2_transition = Transition()
if rule1_expiration is None:
rule1_expiration = Expiration()
if rule2_expiration is None:
rule2_expiration = Expiration()
if (rule1.__dict__ == rule2.__dict__ and
rule1_expiration.__dict__ == rule2_expiration.__dict__ and
rule1_transition.__dict__ == rule2_transition.__dict__):
return True
else:
return False
def destroy_lifecycle_rule(connection, module):
name = module.params.get("name")
prefix = module.params.get("prefix")
rule_id = module.params.get("rule_id")
changed = False
if prefix is None:
prefix = ""
try:
bucket = connection.get_bucket(name)
except S3ResponseError as e:
module.fail_json(msg=e.message)
# Get the bucket's current lifecycle rules
try:
current_lifecycle_obj = bucket.get_lifecycle_config()
except S3ResponseError as e:
if e.error_code == "NoSuchLifecycleConfiguration":
module.exit_json(changed=changed)
else:
module.fail_json(msg=e.message)
# Create lifecycle
lifecycle_obj = Lifecycle()
# Check if rule exists
# If an ID exists, use that otherwise compare based on prefix
if rule_id is not None:
for existing_rule in current_lifecycle_obj:
if rule_id == existing_rule.id:
# We're not keeping the rule (i.e. deleting) so mark as changed
changed = True
else:
lifecycle_obj.append(existing_rule)
else:
for existing_rule in current_lifecycle_obj:
if prefix == existing_rule.prefix:
# We're not keeping the rule (i.e. deleting) so mark as changed
changed = True
else:
lifecycle_obj.append(existing_rule)
# Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration
try:
if lifecycle_obj:
bucket.configure_lifecycle(lifecycle_obj)
else:
bucket.delete_lifecycle_configuration()
except BotoServerError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name = dict(required=True, type='str'),
expiration_days = dict(default=None, required=False, type='int'),
expiration_date = dict(default=None, required=False, type='str'),
prefix = dict(default=None, required=False),
requester_pays = dict(default='no', type='bool'),
rule_id = dict(required=False, type='str'),
state = dict(default='present', choices=['present', 'absent']),
status = dict(default='enabled', choices=['enabled', 'disabled']),
storage_class = dict(default='glacier', type='str', choices=['glacier', 'standard_ia']),
transition_days = dict(default=None, required=False, type='int'),
transition_date = dict(default=None, required=False, type='str')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive = [
[ 'expiration_days', 'expiration_date' ],
[ 'expiration_days', 'transition_date' ],
[ 'transition_days', 'transition_date' ],
[ 'transition_days', 'expiration_date' ]
]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
if not HAS_DATEUTIL:
module.fail_json(msg='dateutil required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region in ('us-east-1', '', None):
# S3ism for the US Standard region
location = Location.DEFAULT
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
try:
connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
if connection is None:
connection = boto.connect_s3(**aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
expiration_date = module.params.get("expiration_date")
transition_date = module.params.get("transition_date")
state = module.params.get("state")
storage_class = module.params.get("storage_class")
# If expiration_date set, check string is valid
if expiration_date is not None:
try:
datetime.datetime.strptime(expiration_date, "%Y-%m-%dT%H:%M:%S.000Z")
except ValueError as e:
module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
if transition_date is not None:
try:
datetime.datetime.strptime(transition_date, "%Y-%m-%dT%H:%M:%S.000Z")
except ValueError as e:
module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
boto_required_version = (2,40,0)
if storage_class == 'standard_ia' and tuple(map(int, (boto.__version__.split(".")))) < boto_required_version:
module.fail_json(msg="'standard_ia' class requires boto >= 2.40.0")
if state == 'present':
create_lifecycle_rule(connection, module)
elif state == 'absent':
destroy_lifecycle_rule(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
jchevin/MissionPlanner-master | packages/IronPython.StdLib.2.7.4/content/Lib/token.py | 178 | 2944 | #! /usr/bin/env python
"""Token constants (from "token.h")."""
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# python Lib/token.py
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
BACKQUOTE = 25
LBRACE = 26
RBRACE = 27
EQEQUAL = 28
NOTEQUAL = 29
LESSEQUAL = 30
GREATEREQUAL = 31
TILDE = 32
CIRCUMFLEX = 33
LEFTSHIFT = 34
RIGHTSHIFT = 35
DOUBLESTAR = 36
PLUSEQUAL = 37
MINEQUAL = 38
STAREQUAL = 39
SLASHEQUAL = 40
PERCENTEQUAL = 41
AMPEREQUAL = 42
VBAREQUAL = 43
CIRCUMFLEXEQUAL = 44
LEFTSHIFTEQUAL = 45
RIGHTSHIFTEQUAL = 46
DOUBLESTAREQUAL = 47
DOUBLESLASH = 48
DOUBLESLASHEQUAL = 49
AT = 50
OP = 51
ERRORTOKEN = 52
N_TOKENS = 53
NT_OFFSET = 256
#--end constants--
tok_name = {}
for _name, _value in globals().items():
if type(_value) is type(0):
tok_name[_value] = _name
del _name, _value
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
def main():
import re
import sys
args = sys.argv[1:]
inFileName = args and args[0] or "Include/token.h"
outFileName = "Lib/token.py"
if len(args) > 1:
outFileName = args[1]
try:
fp = open(inFileName)
except IOError, err:
sys.stdout.write("I/O error: %s\n" % str(err))
sys.exit(1)
lines = fp.read().split("\n")
fp.close()
prog = re.compile(
"#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)",
re.IGNORECASE)
tokens = {}
for line in lines:
match = prog.match(line)
if match:
name, val = match.group(1, 2)
val = int(val)
tokens[val] = name # reverse so we can sort them...
keys = tokens.keys()
keys.sort()
# load the output skeleton from the target:
try:
fp = open(outFileName)
except IOError, err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(2)
format = fp.read().split("\n")
fp.close()
try:
start = format.index("#--start constants--") + 1
end = format.index("#--end constants--")
except ValueError:
sys.stderr.write("target does not contain format markers")
sys.exit(3)
lines = []
for val in keys:
lines.append("%s = %d" % (tokens[val], val))
format[start:end] = lines
try:
fp = open(outFileName, 'w')
except IOError, err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(4)
fp.write("\n".join(format))
fp.close()
if __name__ == "__main__":
main()
| gpl-3.0 |
alexis-roche/nipy | nipy/testing/__init__.py | 2 | 1369 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The testing directory contains a small set of imaging files to be
used for doctests only. More thorough tests and example data will be
stored in a nipy data packages that you can download separately.
.. note:
We use the ``nose`` testing framework for tests.
Nose is a dependency for the tests, but should not be a dependency
for running the algorithms in the NIPY library. This file should
import without nose being present on the python path.
Examples
--------
>>> from nipy.testing import funcfile
>>> from nipy.io.api import load_image
>>> img = load_image(funcfile)
>>> img.shape
(17, 21, 3, 20)
"""
from __future__ import absolute_import
import os
#__all__ = ['funcfile', 'anatfile']
# Discover directory path
filepath = os.path.abspath(__file__)
basedir = os.path.dirname(filepath)
funcfile = os.path.join(basedir, 'functional.nii.gz')
anatfile = os.path.join(basedir, 'anatomical.nii.gz')
from numpy.testing import *
# Overwrites numpy.testing.Tester
from .nosetester import NipyNoseTester as Tester
test = Tester().test
bench = Tester().bench
from . import decorators as dec
# Allow failed import of nose if not now running tests
try:
from nose.tools import assert_true, assert_false
except ImportError:
pass
| bsd-3-clause |
pgmillon/ansible | lib/ansible/modules/network/meraki/meraki_syslog.py | 10 | 9172 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Kevin Breit (@kbreit) <kevin.breit@kevinbreit.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: meraki_syslog
short_description: Manage syslog server settings in the Meraki cloud.
version_added: "2.8"
description:
- Allows for creation and management of Syslog servers within Meraki.
notes:
- Changes to existing syslog servers replaces existing configuration. If you need to add to an
existing configuration set state to query to gather the existing configuration and then modify or add.
options:
auth_key:
description:
- Authentication key provided by the dashboard. Required if environmental variable MERAKI_KEY is not set.
type: str
state:
description:
- Query or edit syslog servers
- To delete a syslog server, do not include server in list of servers
choices: [present, query]
default: present
type: str
net_name:
description:
- Name of a network.
aliases: [name, network]
type: str
net_id:
description:
- ID number of a network.
type: str
servers:
description:
- List of syslog server settings
suboptions:
host:
description:
- IP address or hostname of Syslog server.
port:
description:
- Port number Syslog server is listening on.
default: "514"
roles:
description:
- List of applicable Syslog server roles.
choices: ['Wireless event log',
'Appliance event log',
'Switch event log',
'Air Marshal events',
'Flows',
'URLs',
'IDS alerts',
'Security events']
author:
- Kevin Breit (@kbreit)
extends_documentation_fragment: meraki
'''
EXAMPLES = r'''
- name: Query syslog configurations on network named MyNet in the YourOrg organization
meraki_syslog:
auth_key: abc12345
status: query
org_name: YourOrg
net_name: MyNet
delegate_to: localhost
- name: Add single syslog server with Appliance event log role
meraki_syslog:
auth_key: abc12345
status: query
org_name: YourOrg
net_name: MyNet
servers:
- host: 192.0.1.2
port: 514
roles:
- Appliance event log
delegate_to: localhost
- name: Add multiple syslog servers
meraki_syslog:
auth_key: abc12345
status: query
org_name: YourOrg
net_name: MyNet
servers:
- host: 192.0.1.2
port: 514
roles:
- Appliance event log
- host: 192.0.1.3
port: 514
roles:
- Appliance event log
- Flows
delegate_to: localhost
'''
RETURN = r'''
data:
description: Information about the created or manipulated object.
returned: info
type: complex
contains:
host:
description: Hostname or IP address of syslog server.
returned: success
type: string
sample: 192.0.1.1
port:
description: Port number for syslog communication.
returned: success
type: string
sample: 443
roles:
description: List of roles assigned to syslog server.
returned: success
type: list
sample: "Wireless event log, URLs"
'''
import os
from ansible.module_utils.basic import AnsibleModule, json, env_fallback
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_native
from ansible.module_utils.common.dict_transformations import recursive_diff
from ansible.module_utils.network.meraki.meraki import MerakiModule, meraki_argument_spec
def main():
# define the available arguments/parameters that a user can pass to
# the module
server_arg_spec = dict(host=dict(type='str'),
port=dict(type='int', default="514"),
roles=dict(type='list', choices=['Wireless Event log',
'Appliance event log',
'Switch event log',
'Air Marshal events',
'Flows',
'URLs',
'IDS alerts',
'Security events',
]),
)
argument_spec = meraki_argument_spec()
argument_spec.update(net_id=dict(type='str'),
servers=dict(type='list', element='dict', options=server_arg_spec),
state=dict(type='str', choices=['present', 'query'], default='present'),
net_name=dict(type='str', aliases=['name', 'network']),
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
)
meraki = MerakiModule(module, function='syslog')
module.params['follow_redirects'] = 'all'
payload = None
syslog_urls = {'syslog': '/networks/{net_id}/syslogServers'}
meraki.url_catalog['query_update'] = syslog_urls
if not meraki.params['org_name'] and not meraki.params['org_id']:
meraki.fail_json(msg='org_name or org_id parameters are required')
if meraki.params['state'] != 'query':
if not meraki.params['net_name'] and not meraki.params['net_id']:
meraki.fail_json(msg='net_name or net_id is required for present or absent states')
if meraki.params['net_name'] and meraki.params['net_id']:
meraki.fail_json(msg='net_name and net_id are mutually exclusive')
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
org_id = meraki.params['org_id']
if not org_id:
org_id = meraki.get_org_id(meraki.params['org_name'])
net_id = meraki.params['net_id']
if net_id is None:
nets = meraki.get_nets(org_id=org_id)
net_id = meraki.get_net_id(net_name=meraki.params['net_name'], data=nets)
if meraki.params['state'] == 'query':
path = meraki.construct_path('query_update', net_id=net_id)
r = meraki.request(path, method='GET')
if meraki.status == 200:
meraki.result['data'] = r
elif meraki.params['state'] == 'present':
# Construct payload
payload = dict()
payload['servers'] = meraki.params['servers']
# Convert port numbers to string for idempotency checks
for server in payload['servers']:
if server['port']:
server['port'] = str(server['port'])
path = meraki.construct_path('query_update', net_id=net_id)
r = meraki.request(path, method='GET')
if meraki.status == 200:
original = dict()
original['servers'] = r
if meraki.is_update_required(original, payload):
if meraki.module.check_mode is True:
diff = recursive_diff(original, payload)
original.update(payload)
meraki.result['diff'] = {'before': diff[0],
'after': diff[1]}
meraki.result['data'] = original
meraki.result['changed'] = True
meraki.exit_json(**meraki.result)
path = meraki.construct_path('query_update', net_id=net_id)
r = meraki.request(path, method='PUT', payload=json.dumps(payload))
if meraki.status == 200:
meraki.result['data'] = r
meraki.result['changed'] = True
else:
if meraki.module.check_mode is True:
meraki.result['data'] = original
meraki.exit_json(**meraki.result)
meraki.result['data'] = original
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
meraki.exit_json(**meraki.result)
if __name__ == '__main__':
main()
| gpl-3.0 |
exercism/python | exercises/practice/bank-account/bank_account_test.py | 4 | 4133 | import sys
import threading
import time
import unittest
from bank_account import BankAccount
class BankAccountTest(unittest.TestCase):
def test_newly_opened_account_has_zero_balance(self):
account = BankAccount()
account.open()
self.assertEqual(account.get_balance(), 0)
def test_can_deposit_money(self):
account = BankAccount()
account.open()
account.deposit(100)
self.assertEqual(account.get_balance(), 100)
def test_can_deposit_money_sequentially(self):
account = BankAccount()
account.open()
account.deposit(100)
account.deposit(50)
self.assertEqual(account.get_balance(), 150)
def test_can_withdraw_money(self):
account = BankAccount()
account.open()
account.deposit(100)
account.withdraw(50)
self.assertEqual(account.get_balance(), 50)
def test_can_withdraw_money_sequentially(self):
account = BankAccount()
account.open()
account.deposit(100)
account.withdraw(20)
account.withdraw(80)
self.assertEqual(account.get_balance(), 0)
def test_checking_balance_of_closed_account_throws_error(self):
account = BankAccount()
account.open()
account.close()
with self.assertRaisesWithMessage(ValueError):
account.get_balance()
def test_deposit_into_closed_account(self):
account = BankAccount()
account.open()
account.close()
with self.assertRaisesWithMessage(ValueError):
account.deposit(50)
def test_withdraw_from_closed_account(self):
account = BankAccount()
account.open()
account.close()
with self.assertRaisesWithMessage(ValueError):
account.withdraw(50)
def test_close_already_closed_account(self):
account = BankAccount()
with self.assertRaisesWithMessage(ValueError):
account.close()
def test_open_already_opened_account(self):
account = BankAccount()
account.open()
with self.assertRaisesWithMessage(ValueError):
account.open()
def test_reopened_account_does_not_retain_balance(self):
account = BankAccount()
account.open()
account.deposit(50)
account.close()
account.open()
self.assertEqual(account.get_balance(), 0)
def test_cannot_withdraw_more_than_deposited(self):
account = BankAccount()
account.open()
account.deposit(25)
with self.assertRaises(ValueError):
account.withdraw(50)
def test_cannot_withdraw_negative(self):
account = BankAccount()
account.open()
account.deposit(100)
with self.assertRaisesWithMessage(ValueError):
account.withdraw(-50)
def test_cannot_deposit_negative(self):
account = BankAccount()
account.open()
with self.assertRaisesWithMessage(ValueError):
account.deposit(-50)
def test_can_handle_concurrent_transactions(self):
account = BankAccount()
account.open()
account.deposit(1000)
self.adjust_balance_concurrently(account)
self.assertEqual(account.get_balance(), 1000)
def adjust_balance_concurrently(self, account):
def transact():
account.deposit(5)
time.sleep(0.001)
account.withdraw(5)
# Greatly improve the chance of an operation being interrupted
# by thread switch, thus testing synchronization effectively
try:
sys.setswitchinterval(1e-12)
except AttributeError:
# For Python 2 compatibility
sys.setcheckinterval(1)
threads = [threading.Thread(target=transact) for _ in range(1000)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Utility functions
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
if __name__ == '__main__':
unittest.main()
| mit |
openstack/mistral | mistral/api/controllers/v2/execution.py | 1 | 17181 | # Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
# Copyright 2015 Huawei Technologies Co., Ltd.
# Copyright 2016 - Brocade Communications Systems, Inc.
# Copyright 2018 - Extreme Networks, Inc.
# Copyright 2019 - NetCracker Technology Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from oslo_utils import uuidutils
from pecan import rest
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from mistral.api import access_control as acl
from mistral.api.controllers.v2 import execution_report
from mistral.api.controllers.v2 import resources
from mistral.api.controllers.v2 import sub_execution
from mistral.api.controllers.v2 import task
from mistral.api.controllers.v2 import types
from mistral import context
from mistral.db.v2 import api as db_api
from mistral.db.v2.sqlalchemy import models as db_models
from mistral import exceptions as exc
from mistral.rpc import clients as rpc
from mistral.services import workflows as wf_service
from mistral.utils import filter_utils
from mistral.utils import rest_utils
from mistral.workflow import data_flow
from mistral.workflow import states
from mistral_lib.utils import merge_dicts
LOG = logging.getLogger(__name__)
STATE_TYPES = wtypes.Enum(
str,
states.IDLE,
states.RUNNING,
states.SUCCESS,
states.ERROR,
states.PAUSED,
states.CANCELLED
)
def _get_workflow_execution_resource_with_output(wf_ex):
rest_utils.load_deferred_fields(wf_ex, ['params', 'input', 'output'])
return resources.Execution.from_db_model(wf_ex)
def _get_workflow_execution_resource(wf_ex):
rest_utils.load_deferred_fields(wf_ex, ['params', 'input'])
return resources.Execution.from_db_model(wf_ex)
# Use retries to prevent possible failures.
@rest_utils.rest_retry_on_db_error
def _get_workflow_execution(id, must_exist=True):
with db_api.transaction():
if must_exist:
wf_ex = db_api.get_workflow_execution(id)
else:
wf_ex = db_api.load_workflow_execution(id)
return rest_utils.load_deferred_fields(
wf_ex,
['params', 'input', 'output', 'context', 'spec']
)
# TODO(rakhmerov): Make sure to make all needed renaming on public API.
class ExecutionsController(rest.RestController):
tasks = task.ExecutionTasksController()
report = execution_report.ExecutionReportController()
executions = sub_execution.SubExecutionsController()
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Execution, wtypes.text)
def get(self, id):
"""Return the specified Execution.
:param id: UUID of execution to retrieve.
"""
acl.enforce("executions:get", context.ctx())
LOG.debug("Fetch execution [id=%s]", id)
wf_ex = _get_workflow_execution(id)
resource = resources.Execution.from_db_model(wf_ex)
resource.published_global = (
data_flow.get_workflow_execution_published_global(wf_ex)
)
return resource
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(
resources.Execution,
wtypes.text,
body=resources.Execution
)
def put(self, id, wf_ex):
"""Update the specified workflow execution.
:param id: UUID of execution to update.
:param wf_ex: Execution object.
"""
acl.enforce('executions:update', context.ctx())
LOG.debug('Update execution [id=%s, execution=%s]', id, wf_ex)
@rest_utils.rest_retry_on_db_error
def _compute_delta(wf_ex):
with db_api.transaction():
# ensure that workflow execution exists
db_api.get_workflow_execution(
id,
fields=(db_models.WorkflowExecution.id,)
)
delta = {}
if wf_ex.state:
delta['state'] = wf_ex.state
if wf_ex.description:
delta['description'] = wf_ex.description
if wf_ex.params and wf_ex.params.get('env'):
delta['env'] = wf_ex.params.get('env')
# Currently we can change only state, description, or env.
if len(delta.values()) <= 0:
raise exc.InputException(
'The property state, description, or env '
'is not provided for update.'
)
# Description cannot be updated together with state.
if delta.get('description') and delta.get('state'):
raise exc.InputException(
'The property description must be updated '
'separately from state.'
)
# If state change, environment cannot be updated
# if not RUNNING.
if (delta.get('env') and
delta.get('state') and
delta['state'] != states.RUNNING):
raise exc.InputException(
'The property env can only be updated when workflow '
'execution is not running or on resume from pause.'
)
if delta.get('description'):
wf_ex = db_api.update_workflow_execution(
id,
{'description': delta['description']}
)
if not delta.get('state') and delta.get('env'):
wf_ex = db_api.get_workflow_execution(id)
wf_ex = wf_service.update_workflow_execution_env(
wf_ex,
delta.get('env')
)
return delta, wf_ex
delta, wf_ex = _compute_delta(wf_ex)
if delta.get('state'):
if states.is_paused(delta.get('state')):
wf_ex = rpc.get_engine_client().pause_workflow(id)
elif delta.get('state') == states.RUNNING:
wf_ex = rpc.get_engine_client().resume_workflow(
id,
env=delta.get('env')
)
elif states.is_completed(delta.get('state')):
msg = wf_ex.state_info if wf_ex.state_info else None
wf_ex = rpc.get_engine_client().stop_workflow(
id,
delta.get('state'),
msg
)
else:
# To prevent changing state in other cases throw a message.
raise exc.InputException(
"Cannot change state to %s. Allowed states are: '%s" % (
wf_ex.state,
', '.join([
states.RUNNING,
states.PAUSED,
states.SUCCESS,
states.ERROR,
states.CANCELLED
])
)
)
return resources.Execution.from_dict(
wf_ex if isinstance(wf_ex, dict) else wf_ex.to_dict()
)
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(
resources.Execution,
body=resources.Execution,
status_code=201
)
def post(self, wf_ex):
"""Create a new Execution.
:param wf_ex: Execution object with input content.
"""
acl.enforce('executions:create', context.ctx())
LOG.debug("Create execution [execution=%s]", wf_ex)
exec_dict = wf_ex.to_dict()
exec_id = exec_dict.get('id')
if not exec_id:
exec_id = uuidutils.generate_uuid()
LOG.debug("Generated execution id [exec_id=%s]", exec_id)
exec_dict.update({'id': exec_id})
wf_ex = None
else:
# If ID is present we need to check if such execution exists.
# If yes, the method just returns the object. If not, the ID
# will be used to create a new execution.
wf_ex = _get_workflow_execution(exec_id, must_exist=False)
if wf_ex:
return resources.Execution.from_db_model(wf_ex)
source_execution_id = exec_dict.get('source_execution_id')
source_exec_dict = None
if source_execution_id:
# If source execution is present we will perform a lookup for
# previous workflow execution model and the information to start
# a new workflow based on that information.
source_exec_dict = db_api.get_workflow_execution(
source_execution_id).to_dict()
exec_dict['description'] = "{} Based on the execution '{}'".format(
exec_dict['description'],
source_execution_id
)
exec_dict['description'] = exec_dict['description'].strip()
result_exec_dict = merge_dicts(source_exec_dict, exec_dict)
if not (result_exec_dict.get('workflow_id') or
result_exec_dict.get('workflow_name')):
raise exc.WorkflowException(
"Workflow ID or workflow name must be provided. Workflow ID is"
" recommended."
)
engine = rpc.get_engine_client()
result = engine.start_workflow(
result_exec_dict.get(
'workflow_id',
result_exec_dict.get('workflow_name')
),
result_exec_dict.get('workflow_namespace', ''),
result_exec_dict.get('id'),
result_exec_dict.get('input'),
description=result_exec_dict.get('description', ''),
**result_exec_dict.get('params') or {}
)
return resources.Execution.from_dict(result)
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(None, wtypes.text, bool, status_code=204)
def delete(self, id, force=False):
"""Delete the specified Execution.
:param id: UUID of execution to delete.
:param force: Optional. Force the deletion of unfinished executions.
Default: false. While the api is backward compatible
the behaviour is not the same. The new default is the
safer option
"""
acl.enforce('executions:delete', context.ctx())
LOG.debug("Delete execution [id=%s]", id)
if not force:
state = db_api.get_workflow_execution(
id,
fields=(db_models.WorkflowExecution.state,)
)[0]
if not states.is_completed(state):
raise exc.NotAllowedException(
"Only completed executions can be deleted. "
"Use --force to override this. "
"Execution {} is in {} state".format(id, state)
)
return rest_utils.rest_retry_on_db_error(
db_api.delete_workflow_execution
)(id)
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Executions, types.uuid, int,
types.uniquelist, types.list, types.uniquelist,
wtypes.text, types.uuid, wtypes.text,
types.uniquelist, types.jsontype, types.uuid,
types.uuid, STATE_TYPES, wtypes.text,
types.jsontype, types.jsontype, wtypes.text,
wtypes.text, bool, types.uuid,
bool, types.list)
def get_all(self, marker=None, limit=None,
sort_keys='created_at', sort_dirs='asc', fields='',
workflow_name=None, workflow_id=None, description=None,
tags=None, params=None, task_execution_id=None,
root_execution_id=None, state=None, state_info=None,
input=None, output=None, created_at=None,
updated_at=None, include_output=None, project_id=None,
all_projects=False, nulls=''):
"""Return all Executions.
:param marker: Optional. Pagination marker for large data sets.
:param limit: Optional. Maximum number of resources to return in a
single result. Default value is None for backward
compatibility.
:param sort_keys: Optional. Columns to sort results by.
Default: created_at, which is backward compatible.
:param sort_dirs: Optional. Directions to sort corresponding to
sort_keys, "asc" or "desc" can be chosen.
Default: desc. The length of sort_dirs can be equal
or less than that of sort_keys.
:param fields: Optional. A specified list of fields of the resource to
be returned. 'id' will be included automatically in
fields if it's provided, since it will be used when
constructing 'next' link.
:param workflow_name: Optional. Keep only resources with a specific
workflow name.
:param workflow_id: Optional. Keep only resources with a specific
workflow ID.
:param description: Optional. Keep only resources with a specific
description.
:param tags: Optional. Keep only resources containing specific tags.
:param params: Optional. Keep only resources with specific parameters.
:param task_execution_id: Optional. Keep only resources with a
specific task execution ID.
:param root_execution_id: Optional. Keep only resources with a
specific root execution ID.
:param state: Optional. Keep only resources with a specific state.
:param state_info: Optional. Keep only resources with specific
state information.
:param input: Optional. Keep only resources with a specific input.
:param output: Optional. Keep only resources with a specific output.
:param created_at: Optional. Keep only resources created at a specific
time and date.
:param updated_at: Optional. Keep only resources with specific latest
update time and date.
:param include_output: Optional. Include the output for all executions
in the list.
:param project_id: Optional. Only get executions belong to the project.
Admin required.
:param all_projects: Optional. Get resources of all projects. Admin
required.
:param nulls: Optional. The names of the columns with null value in
the query.
"""
acl.enforce('executions:list', context.ctx())
db_models.WorkflowExecution.check_allowed_none_values(nulls)
if all_projects or project_id:
acl.enforce('executions:list:all_projects', context.ctx())
filters = filter_utils.create_filters_from_request_params(
none_values=nulls,
created_at=created_at,
workflow_name=workflow_name,
workflow_id=workflow_id,
tags=tags,
params=params,
task_execution_id=task_execution_id,
state=state,
state_info=state_info,
input=input,
output=output,
updated_at=updated_at,
description=description,
project_id=project_id,
root_execution_id=root_execution_id,
)
LOG.debug(
"Fetch executions. marker=%s, limit=%s, sort_keys=%s, "
"sort_dirs=%s, filters=%s, all_projects=%s", marker, limit,
sort_keys, sort_dirs, filters, all_projects
)
if include_output:
resource_function = _get_workflow_execution_resource_with_output
else:
resource_function = _get_workflow_execution_resource
return rest_utils.get_all(
resources.Executions,
resources.Execution,
db_api.get_workflow_executions,
db_api.get_workflow_execution,
resource_function=resource_function,
marker=marker,
limit=limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
fields=fields,
all_projects=all_projects,
**filters
)
| apache-2.0 |
zeroc0d3lab/centos-ruby | rootfs/usr/lib/python2.7/dist-packages/powerline/ipython.py | 14 | 1687 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
from powerline import Powerline
from powerline.lib.dict import mergedicts
from powerline.lib.unicode import string
class IPythonInfo(object):
def __init__(self, shell):
self._shell = shell
@property
def prompt_count(self):
return self._shell.execution_count
# HACK: ipython tries to only leave us with plain ASCII
class RewriteResult(object):
def __init__(self, prompt):
self.prompt = string(prompt)
def __str__(self):
return self.prompt
def __add__(self, s):
if type(s) is not str:
try:
s = s.encode('utf-8')
except AttributeError:
raise NotImplementedError
return RewriteResult(self.prompt + s)
class IPythonPowerline(Powerline):
def init(self, **kwargs):
super(IPythonPowerline, self).init(
'ipython',
use_daemon_threads=True,
**kwargs
)
def get_config_paths(self):
if self.config_paths:
return self.config_paths
else:
return super(IPythonPowerline, self).get_config_paths()
def get_local_themes(self, local_themes):
return dict(((type, {'config': self.load_theme_config(name)}) for type, name in local_themes.items()))
def load_main_config(self):
r = super(IPythonPowerline, self).load_main_config()
if self.config_overrides:
mergedicts(r, self.config_overrides)
return r
def load_theme_config(self, name):
r = super(IPythonPowerline, self).load_theme_config(name)
if name in self.theme_overrides:
mergedicts(r, self.theme_overrides[name])
return r
def do_setup(self, wrefs):
for wref in wrefs:
obj = wref()
if obj is not None:
setattr(obj, 'powerline', self)
| gpl-2.0 |
wwj718/edx-platform | lms/djangoapps/teams/management/commands/tests/test_reindex_course_team.py | 33 | 3091 | """ Tests for course_team reindex command """
import ddt
import mock
from mock import patch
from django.core.management import call_command, CommandError
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from opaque_keys.edx.keys import CourseKey
from ....tests.factories import CourseTeamFactory
from ....search_indexes import CourseTeamIndexer
from search.search_engine_base import SearchEngine
COURSE_KEY1 = CourseKey.from_string('edx/history/1')
@ddt.ddt
class ReindexCourseTeamTest(SharedModuleStoreTestCase):
"""Tests for the ReindexCourseTeam command"""
def setUp(self):
"""
Set up tests.
"""
super(ReindexCourseTeamTest, self).setUp()
self.team1 = CourseTeamFactory(course_id=COURSE_KEY1, team_id='team1')
self.team2 = CourseTeamFactory(course_id=COURSE_KEY1, team_id='team2')
self.team3 = CourseTeamFactory(course_id=COURSE_KEY1, team_id='team3')
self.search_engine = SearchEngine.get_search_engine(index='index_course_team')
def test_given_no_arguments_raises_command_error(self):
""" Test that raises CommandError for incorrect arguments. """
with self.assertRaisesRegexp(CommandError, ".* requires one or more arguments.*"):
call_command('reindex_course_team')
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_TEAMS': False})
def test_teams_search_flag_disabled_raises_command_error(self):
""" Test that raises CommandError for disabled feature flag. """
with self.assertRaisesRegexp(CommandError, ".*ENABLE_TEAMS must be enabled.*"):
call_command('reindex_course_team', self.team1.team_id)
def test_given_invalid_team_id_raises_command_error(self):
""" Test that raises CommandError for invalid team id. """
team_id = u'team4'
error_str = 'Argument {0} is not a course_team team_id'.format(team_id)
with self.assertRaisesRegexp(CommandError, error_str):
call_command('reindex_course_team', team_id)
@patch.object(CourseTeamIndexer, 'index')
def test_single_team_id(self, mock_index):
""" Test that command indexes a single passed team. """
call_command('reindex_course_team', self.team1.team_id)
mock_index.assert_called_once_with(self.team1)
mock_index.reset_mock()
@patch.object(CourseTeamIndexer, 'index')
def test_multiple_team_id(self, mock_index):
""" Test that command indexes multiple passed teams. """
call_command('reindex_course_team', self.team1.team_id, self.team2.team_id)
mock_index.assert_any_call(self.team1)
mock_index.assert_any_call(self.team2)
mock_index.reset_mock()
@patch.object(CourseTeamIndexer, 'index')
def test_all_teams(self, mock_index):
""" Test that command indexes all teams. """
call_command('reindex_course_team', all=True)
mock_index.assert_any_call(self.team1)
mock_index.assert_any_call(self.team2)
mock_index.assert_any_call(self.team3)
mock_index.reset_mock()
| agpl-3.0 |
DinoCow/airflow | tests/providers/google/cloud/operators/test_cloud_build_system.py | 10 | 2023 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""System tests for Google Cloud Build operators"""
import pytest
from tests.providers.google.cloud.operators.test_cloud_build_system_helper import GCPCloudBuildTestHelper
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_CLOUD_BUILD_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_CLOUD_BUILD_KEY)
class CloudBuildExampleDagsSystemTest(GoogleSystemTest):
"""
System tests for Google Cloud Build operators
It use a real service.
"""
helper = GCPCloudBuildTestHelper()
@provide_gcp_context(GCP_CLOUD_BUILD_KEY, project_id=GoogleSystemTest._project_id())
def setUp(self):
super().setUp()
self.helper.create_repository_and_bucket()
@provide_gcp_context(GCP_CLOUD_BUILD_KEY)
def test_run_example_dag(self):
self.run_dag("example_gcp_cloud_build", CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_CLOUD_BUILD_KEY, project_id=GoogleSystemTest._project_id())
def tearDown(self):
self.helper.delete_bucket()
self.helper.delete_docker_images()
self.helper.delete_repo()
super().tearDown()
| apache-2.0 |
fjferrer/angr | angr/analyses/vsa_ddg.py | 9 | 6157 | from .datagraph_meta import DataGraphMeta, DataGraphError
import logging
import networkx
import collections
import simuvex
l = logging.getLogger(name="angr.analyses.vsa_ddg")
class VSA_DDG(DataGraphMeta):
"""
A Data dependency graph based on VSA states.
That means we don't (and shouldn't) expect any symbolic expressions.
"""
def __init__(self, start_addr, interfunction_level=0,
context_sensitivity_level=2, keep_addrs=False):
"""
@start_addr: the address where to start the analysis (typically, a
function's entry point)
@interfunction_level and @context_sensitivity_level have the same
meaning as in the VFG analysis.
@keep_addrs: whether we keep set of addresses as edges in the graph, or
just the cardinality of the sets, which can be used as a "weight".
Returns: a NetworkX graph representing data dependencies.
"""
self._startnode = None # entry point of the analyzed function
self._vfg = self._p.analyses.VFG(function_start=start_addr,
interfunction_level=interfunction_level,
context_sensitivity_level=context_sensitivity_level)
self.graph = networkx.DiGraph()
self.keep_addrs = keep_addrs
self._simproc_map = {}
self._imarks = {}
# Get the first node
self._startnode = self._vfg_node(start_addr)
if self._startnode is None:
raise DataGraphError ("No start node :(")
# We explore one path at a time
self._branch({}, self._startnode)
def _make_block(self, irsb, live_defs):
return Block(irsb, live_defs, self.graph, self.keep_addrs)
class Block(object):
"""
Defs and uses in a block.
"""
def __init__(self, irsb, live_defs, graph, keep_addrs):
"""
@irsb: a SimIRSB object
@live_defs: a dict {addr:stmt} containing the definitions from previous
blocks that are still live at this point, where addr is a tuple
representing a normalized addr (see simuvex/plugins/abstract_memory.py for more)
@keep_addrs: edges in the graph may either be labelled with the
cardinality of the set of addresses involved, or the set of addresses itself.
"""
self.irsb = irsb
self.live_defs= live_defs
self.graph = graph
self.keep_addrs = keep_addrs
self._imarks = {}
# A repeating block is a block creating an already existing edge in the
# graph, which is where we want to stop analyzing a specific path
self._read_edge = False # The block causes a read edge in the graph
self._new = False # There is at least one new read edge
if isinstance(irsb, simuvex.SimProcedure):
# TODO: track reads and writes in SimProcedures
self._track_actions(-1, irsb.successors[0].log.actions)
else:
for st in self.irsb.statements:
self._imarks[(self.irsb.addr, st.stmt_idx)] = st.imark.addr
self._track_actions(st.stmt_idx, st.actions)
def _track_actions(self, stmt_idx, a_list):
for a in a_list:
if a.type == "mem":
addr_list = set(self.irsb.initial_state.memory.normalize_address(a.addr.ast))
node = (self.irsb.addr, stmt_idx)
prevdefs = self._def_lookup(addr_list)
if a.action == "read":
for prev_node, label in prevdefs.iteritems():
self._read_edge = True
self._add_edge(prev_node, node, label)
if a.action == "write":
self._kill(addr_list, node)
def _def_lookup(self, addr_list):
"""
This is a backward lookup in the previous defs.
@addr_list is a list of normalized addresses.
Note that, as we are using VSA, it is possible that @a is affected by
several definitions.
Returns: a dict {stmt:label} where label is the number of individual
addresses of @addr_list (or the actual set of addresses depending on the
keep_addrs flag) that are definted by stmt.
"""
if self.keep_addrs is True:
prevdefs = collections.defaultdict(set)
else:
prevdefs = collections.defaultdict(int) # default value of int is 0
for addr in addr_list:
if addr in self.live_defs.keys():
stmt = self.live_defs[addr]
# Label edges with cardinality or actual sets of addresses
if self.keep_addrs is True:
prevdefs[stmt].add(addr)
else:
prevdefs[stmt] = prevdefs[stmt] + 1
return prevdefs
def _kill(self, addr_list, stmt):
"""
Kill previous defs. @addr_list is a list of normalized addresses
"""
# Case 1: address perfectly match, we kill
# Case 2: a is a subset of the original address
# Case 3: a is a superset of the original address
for addr in addr_list:
self.live_defs[addr] = stmt
l.debug("XX Stmt (0x%x, %d) kills addr %s" % (stmt[0], stmt[1], repr(addr)))
def _add_edge(self, s_a, s_b, label):
"""
Add an edge in the graph from @s_a to statment @s_b, where @s_a and
@s_b are tuples of statements of the form (irsb_addr, stmt_idx)
"""
# Is that edge already in the graph ?
# If at least one is new, then we are not redoing the same path again
if (s_a, s_b) not in self.graph.edges():
self.graph.add_edge(s_a, s_b, label=label)
self._new = True
l.info("New edge from (0x%x, %d) --> (0x%x, %d)" %
(s_a[0], s_a[1],
s_b[0], s_b[1]))
@property
def stop(self):
"""
If this block contains a read that is not creating new edges in the graph,
then we are looping and we should stop the analysis.
"""
return self._read_edge and not self._new
| bsd-2-clause |
madrang/pyTSon-AudioBot | requests/utils.py | 17 | 27595 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import contextlib
import io
import os
import platform
import re
import socket
import struct
import warnings
from . import __version__
from . import certs
# to_native_string is unused here, but imported here for backwards compatibility
from ._internal_utils import to_native_string
from .compat import parse_http_list as _parse_list_header
from .compat import (
quote, urlparse, bytes, str, OrderedDict, unquote, getproxies,
proxy_bypass, urlunparse, basestring, integer_types, is_py3,
proxy_bypass_environment, getproxies_environment)
from .cookies import cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import (
InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
if platform.system() == 'Windows':
# provide a proxy_bypass version on Windows without DNS lookups
def proxy_bypass_registry(host):
if is_py3:
import winreg
else:
import _winreg as winreg
try:
internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
proxyEnable = winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0]
proxyOverride = winreg.QueryValueEx(internetSettings,
'ProxyOverride')[0]
except OSError:
return False
if not proxyEnable or not proxyOverride:
return False
# make a check value list from the registry entry: replace the
# '<local>' string by the localhost entry and the corresponding
# canonical entry.
proxyOverride = proxyOverride.split(';')
# now check if we match one of the registry values.
for test in proxyOverride:
if test == '<local>':
if '.' not in host:
return True
test = test.replace(".", r"\.") # mask dots
test = test.replace("*", r".*") # change glob sequence
test = test.replace("?", r".") # change glob char
if re.match(test, host, re.I):
return True
return False
def proxy_bypass(host): # noqa
"""Return True, if the host should be bypassed.
Checks proxy settings gathered from the environment, if specified,
or the registry.
"""
if getproxies_environment():
return proxy_bypass_environment(host)
else:
return proxy_bypass_registry(host)
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
total_length = None
current_position = 0
if hasattr(o, '__len__'):
total_length = len(o)
elif hasattr(o, 'len'):
total_length = o.len
elif hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
total_length = os.fstat(fileno).st_size
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
if 'b' not in o.mode:
warnings.warn((
"Requests has determined the content-length for this "
"request using the binary size of the file: however, the "
"file has been opened in text mode (i.e. without the 'b' "
"flag in the mode). This may lead to an incorrect "
"content-length. In Requests 3.0, support will be removed "
"for files in text mode."),
FileModeWarning
)
if hasattr(o, 'tell'):
try:
current_position = o.tell()
except (OSError, IOError):
# This can happen in some weird situations, such as when the file
# is actually a special file descriptor like stdin. In this
# instance, we don't know what the length is, so set it to zero and
# let requests chunk it instead.
if total_length is not None:
current_position = total_length
else:
if hasattr(o, 'seek') and total_length is None:
# StringIO and BytesIO have seek but no useable fileno
try:
# seek to end of file
o.seek(0, 2)
total_length = o.tell()
# seek back to current position to support
# partially read file-like objects
o.seek(current_position or 0)
except (OSError, IOError):
total_length = 0
if total_length is None:
total_length = 0
return max(0, total_length - current_position)
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b':'
if isinstance(url, str):
splitstr = splitstr.decode('ascii')
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
:rtype: OrderedDict
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
:rtype: list
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
:rtype: list
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
:rtype: str
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
:rtype: dict
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:rtype: CookieJar
"""
return cookiejar_from_dict(cookie_dict, cj)
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
:rtype: str
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
if slice_length is None or slice_length <= 0:
slice_length = len(string)
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
:rtype: str
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
:rtype: str
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
:rtype: str
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""This function allows you to check if an IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
:rtype: bool
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
:rtype: str
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
"""
:rtype: bool
"""
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""
Very simple check of the cidr format in no_proxy variable.
:rtype: bool
"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
@contextlib.contextmanager
def set_environ(env_name, value):
"""Set the environment variable 'env_name' to 'value'
Save previous value, yield, and then restore the previous value stored in
the environment variable 'env_name'.
If 'value' is None, do nothing"""
if value is not None:
old_value = os.environ.get(env_name)
os.environ[env_name] = value
try:
yield
finally:
if value is None:
return
if old_value is None:
del os.environ[env_name]
else:
os.environ[env_name] = old_value
def should_bypass_proxies(url, no_proxy):
"""
Returns whether we should bypass proxies or not.
:rtype: bool
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy_arg = no_proxy
if no_proxy is None:
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = (
host for host in no_proxy.replace(' ', '').split(',') if host
)
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
elif ip == proxy_ip:
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
# matches the IP of the index
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
with set_environ('no_proxy', no_proxy_arg):
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url, no_proxy):
"""
Return a dict of environment proxies.
:rtype: dict
"""
if should_bypass_proxies(url, no_proxy=no_proxy):
return {}
else:
return getproxies()
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
return proxies.get(urlparts.scheme, proxies.get('all'))
proxy_keys = [
urlparts.scheme + '://' + urlparts.hostname,
urlparts.scheme,
'all://' + urlparts.hostname,
'all',
]
proxy = None
for proxy_key in proxy_keys:
if proxy_key in proxies:
proxy = proxies[proxy_key]
break
return proxy
def default_user_agent(name="python-requests"):
"""
Return a string representing the default user agent.
:rtype: str
"""
return '%s/%s' % (name, __version__)
def default_headers():
"""
:rtype: requests.structures.CaseInsensitiveDict
"""
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = ' \'"'
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
"""
:rtype: str
"""
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
"""Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password.
:rtype: (str,str)
"""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
# Moved outside of function to avoid recompile every call
_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
def check_header_validity(header):
"""Verifies that header value is a string which doesn't contain
leading whitespace or return characters. This prevents unintended
header injection.
:param header: tuple, in the format (name, value).
"""
name, value = header
if isinstance(value, bytes):
pat = _CLEAN_HEADER_REGEX_BYTE
else:
pat = _CLEAN_HEADER_REGEX_STR
try:
if not pat.match(value):
raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
except TypeError:
raise InvalidHeader("Header value %s must be of type str or bytes, "
"not %s" % (value, type(value)))
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
def rewind_body(prepared_request):
"""Move file pointer back to its recorded starting position
so it can be read again on redirect.
"""
body_seek = getattr(prepared_request.body, 'seek', None)
if body_seek is not None and isinstance(prepared_request._body_position, integer_types):
try:
body_seek(prepared_request._body_position)
except (IOError, OSError):
raise UnrewindableBodyError("An error occurred when rewinding request "
"body for redirect.")
else:
raise UnrewindableBodyError("Unable to rewind request body for redirect.")
| gpl-3.0 |
mozilla/inventory | truth/models.py | 3 | 1262 | from django.db import models
from systems.models import ScheduledTask
from django.db import IntegrityError
import pdb
# Create your models here.
class Truth(models.Model):
name = models.CharField(max_length=128)
description = models.CharField(max_length=128)
def __unicode__(self):
return self.name
class Meta:
db_table = u'truth'
class ApiManager(models.Manager):
def get_query_set(self):
results = super(ApiManager, self).get_query_set()
return results
class KeyValue(models.Model):
truth = models.ForeignKey('Truth')
key = models.CharField(max_length=255, blank=True, null=True)
value = models.CharField(max_length=255, blank=True, null=True)
objects = models.Manager()
expanded_objects = ApiManager()
def __unicode__(self):
return self.key
class Meta:
db_table = u'truth_key_value'
def save(self, *args, **kwargs):
if self.truth.name == "ip_to_vlan_mapping":
try:
ScheduledTask(type='dns', task=self.key).save()
except IntegrityError, e:
print ("Key {0} and Value {1} already existsed in "
"table".format(self.key, self.value))
super(KeyValue, self).save(*args, **kwargs)
| bsd-3-clause |
kgullikson88/TS23-Scripts | CheckSyntheticTemperature.py | 1 | 14868 | import os
import re
from collections import defaultdict
from operator import itemgetter
import logging
import pandas
from scipy.interpolate import InterpolatedUnivariateSpline as spline
from george import kernels
import matplotlib.pyplot as plt
import numpy as np
import george
import emcee
import StarData
import SpectralTypeRelations
def classify_filename(fname, type='bright'):
"""
Given a CCF filename, it classifies the star combination, temperature, metallicity, and vsini
:param fname:
:return:
"""
# First, remove any leading directories
fname = fname.split('/')[-1]
# Star combination
m1 = re.search('\.[0-9]+kps', fname)
stars = fname[:m1.start()]
star1 = stars.split('+')[0].replace('_', ' ')
star2 = stars.split('+')[1].split('_{}'.format(type))[0].replace('_', ' ')
# secondary star vsini
vsini = float(fname[m1.start() + 1:].split('kps')[0])
# Temperature
m2 = re.search('[0-9]+\.0K', fname)
temp = float(m2.group()[:-1])
# logg
m3 = re.search('K\+[0-9]\.[0-9]', fname)
logg = float(m3.group()[1:])
# metallicity
metal = float(fname.split(str(logg))[-1])
return star1, star2, vsini, temp, logg, metal
def get_ccf_data(basedir, primary_name=None, secondary_name=None, vel_arr=np.arange(-900.0, 900.0, 0.1), type='bright'):
"""
Searches the given directory for CCF files, and classifies
by star, temperature, metallicity, and vsini
:param basedir: The directory to search for CCF files
:keyword primary_name: Optional keyword. If given, it will only get the requested primary star data
:keyword secondary_name: Same as primary_name, but only reads ccfs for the given secondary
:keyword vel_arr: The velocities to interpolate each ccf at
:return: pandas DataFrame
"""
if not basedir.endswith('/'):
basedir += '/'
all_files = ['{}{}'.format(basedir, f) for f in os.listdir(basedir) if type in f.lower()]
primary = []
secondary = []
vsini_values = []
temperature = []
gravity = []
metallicity = []
ccf = []
for fname in all_files:
star1, star2, vsini, temp, logg, metal = classify_filename(fname, type=type)
if primary_name is not None and star1.lower() != primary_name.lower():
continue
if secondary_name is not None and star2.lower() != secondary_name.lower():
continue
vel, corr = np.loadtxt(fname, unpack=True)
fcn = spline(vel, corr)
ccf.append(fcn(vel_arr))
primary.append(star1)
secondary.append(star2)
vsini_values.append(vsini)
temperature.append(temp)
gravity.append(logg)
metallicity.append(metal)
# Make a pandas dataframe with all this data
df = pandas.DataFrame(data={'Primary': primary, 'Secondary': secondary, 'Temperature': temperature,
'vsini': vsini_values, 'logg': gravity, '[Fe/H]': metallicity, 'CCF': ccf})
return df
def get_ccf_summary(basedir, vel_arr=np.arange(-900.0, 900.0, 0.1), velocity='highest', type='bright'):
"""
Very similar to get_ccf_data, but does it in a way that is more memory efficient
:param basedir: The directory to search for CCF files
:keyword velocity: The velocity to measure the CCF at. The default is 'highest', and uses the maximum of the ccf
:keyword vel_arr: The velocities to interpolate each ccf at
:return: pandas DataFrame
"""
if not basedir.endswith('/'):
basedir += '/'
all_files = ['{}{}'.format(basedir, f) for f in os.listdir(basedir) if type in f.lower()]
file_dict = defaultdict(lambda: defaultdict(list))
for fname in all_files:
star1, star2, vsini, temp, logg, metal = classify_filename(fname, type=type)
file_dict[star1][star2].append(fname)
# Now, read the ccfs for each primary/secondary combo, and find the best combination
summary_dfs = []
for primary in file_dict.keys():
for secondary in file_dict[primary].keys():
data = get_ccf_data(basedir, primary_name=primary, secondary_name=secondary,
vel_arr=vel_arr, type=type)
summary_dfs.append(find_best_pars(data, velocity=velocity, vel_arr=vel_arr))
return pandas.concat(summary_dfs, ignore_index=True)
def find_best_pars(df, velocity='highest', vel_arr=np.arange(-900.0, 900.0, 0.1)):
"""
Find the 'best-fit' parameters for each combination of primary and secondary star
:param df: the dataframe to search in
:keyword velocity: The velocity to measure the CCF at. The default is 'highest', and uses the maximum of the ccf
:keyword vel_arr: The velocities to interpolate each ccf at
:return: a dataframe with keys of primary, secondary, and the parameters
"""
# Get the names of the primary and secondary stars
primary_names = pandas.unique(df.Primary)
secondary_names = pandas.unique(df.Secondary)
# Find the ccf value at the given velocity
if velocity == 'highest':
fcn = lambda row: (np.max(row), vel_arr[np.argmax(row)])
vals = df['CCF'].map(fcn)
df['ccf_max'] = vals.map(lambda l: l[0])
df['rv'] = vals.map(lambda l: l[1])
# df['ccf_max'] = df['CCF'].map(np.max)
else:
df['ccf_max'] = df['CCF'].map(lambda arr: arr[np.argmin(np.abs(vel_arr - velocity))])
# Find the best parameter for each combination
d = defaultdict(list)
for primary in primary_names:
for secondary in secondary_names:
good = df.loc[(df.Primary == primary) & (df.Secondary == secondary)]
best = good.loc[good.ccf_max == good.ccf_max.max()]
d['Primary'].append(primary)
d['Secondary'].append(secondary)
d['Temperature'].append(best['Temperature'].item())
d['vsini'].append(best['vsini'].item())
d['logg'].append(best['logg'].item())
d['[Fe/H]'].append(best['[Fe/H]'].item())
d['rv'].append(best['rv'].item())
return pandas.DataFrame(data=d)
def get_detected_objects(df, tol=1.0):
"""
Takes a summary dataframe with RV information. Finds the median rv for each star,
and removes objects that are 'tol' km/s from the median value
:param df: A summary dataframe, such as created by find_best_pars
:param tol: The tolerance, in km/s, to accept an observation as detected
:return: a dataframe containing only detected companions
"""
secondary_names = pandas.unique(df.Secondary)
secondary_to_rv = defaultdict(float)
for secondary in secondary_names:
rv = df.loc[df.Secondary == secondary]['rv'].median()
secondary_to_rv[secondary] = rv
print secondary, rv
keys = df.Secondary.values
good = df.loc[abs(df.rv.values - np.array(itemgetter(*keys)(secondary_to_rv))) < tol]
return good
def add_actual_temperature(df, method='spt'):
"""
Add the actual temperature to a given summary dataframe
:param df: The dataframe to which we will add the actual secondary star temperature
:param method: How to get the actual temperature. Options are:
- 'spt': Use main-sequence relationships to go from spectral type --> temperature
- 'excel': Use tabulated data, available in the file 'SecondaryStar_Temperatures.xls'
:return: copy of the original dataframe, with an extra column for the secondary star temperature
"""
# First, get a list of the secondary stars in the data
secondary_names = pandas.unique(df.Secondary)
secondary_to_temperature = defaultdict(float)
secondary_to_error = defaultdict(float)
if method.lower() == 'spt':
MS = SpectralTypeRelations.MainSequence()
for secondary in secondary_names:
star_data = StarData.GetData(secondary)
spt = star_data.spectype[0] + re.search('[0-9]\.*[0-9]*', star_data.spectype).group()
T_sec = MS.Interpolate(MS.Temperature, spt)
secondary_to_temperature[secondary] = T_sec
elif method.lower() == 'excel':
table = pandas.read_excel('SecondaryStar_Temperatures.xls', 0)
for secondary in secondary_names:
T_sec = table.loc[table.Star.str.lower().str.contains(secondary.strip().lower())]['Literature_Temp'].item()
T_error = table.loc[table.Star.str.lower().str.contains(secondary.strip().lower())][
'Literature_error'].item()
secondary_to_temperature[secondary] = T_sec
secondary_to_error[secondary] = T_error
df['Tactual'] = df['Secondary'].map(lambda s: secondary_to_temperature[s])
df['Tact_err'] = df['Secondary'].map(lambda s: secondary_to_error[s])
return
def make_gaussian_process_samples(df):
"""
Make a gaussian process fitting the Tactual-Tmeasured relationship
:param df: pandas DataFrame with columns 'Temperature' (with the measured temperature)
and 'Tactual' (for the actual temperature)
:return: emcee sampler instance
"""
# First, find the uncertainties at each actual temperature
# Tactual = df['Tactual'].values
#Tmeasured = df['Temperature'].values
#error = df['Tact_err'].values
temp = df.groupby('Temperature').mean()['Tactual']
Tmeasured = temp.keys().values
Tactual = temp.values
error = np.nan_to_num(df.groupby('Temperature').std(ddof=1)['Tactual'].values)
default = np.median(error[error > 1])
error = np.maximum(error, np.ones(error.size) * default)
for Tm, Ta, e in zip(Tmeasured, Tactual, error):
print Tm, Ta, e
plt.figure(1)
plt.errorbar(Tmeasured, Tactual, yerr=error, fmt='.k', capsize=0)
plt.plot(Tmeasured, Tmeasured, 'r--')
plt.xlim((min(Tmeasured) - 100, max(Tmeasured) + 100))
plt.xlabel('Measured Temperature')
plt.ylabel('Actual Temperature')
plt.show(block=False)
# Define some functions to use in the GP fit
def model(pars, T):
#polypars = pars[2:]
#return np.poly1d(polypars)(T)
return T
def lnlike(pars, Tact, Tmeas, Terr):
a, tau = np.exp(pars[:2])
gp = george.GP(a * kernels.ExpSquaredKernel(tau))
gp.compute(Tmeas, Terr)
return gp.lnlikelihood(Tact - model(pars, Tmeas))
def lnprior(pars):
lna, lntau = pars[:2]
polypars = pars[2:]
if -20 < lna < 20 and 4 < lntau < 20:
return 0.0
return -np.inf
def lnprob(pars, x, y, yerr):
lp = lnprior(pars)
return lp + lnlike(pars, x, y, yerr) if np.isfinite(lp) else -np.inf
# Set up the emcee fitter
initial = np.array([0, 6])#, 1.0, 0.0])
ndim = len(initial)
nwalkers = 100
p0 = [np.array(initial) + 1e-8 * np.random.randn(ndim) for i in xrange(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(Tactual, Tmeasured, error))
print 'Running first burn-in'
p1, lnp, _ = sampler.run_mcmc(p0, 500)
sampler.reset()
print "Running second burn-in..."
p_best = p1[np.argmax(lnp)]
p2 = [p_best + 1e-8 * np.random.randn(ndim) for i in xrange(nwalkers)]
p3, _, _ = sampler.run_mcmc(p2, 250)
sampler.reset()
print "Running production..."
sampler.run_mcmc(p3, 1000)
# Plot a bunch of the fits
print "Plotting..."
N = 100
Tvalues = np.arange(3300, 7000, 20)
idx = np.argsort(-sampler.lnprobability.flatten())[:N] # Get N 'best' curves
par_vals = sampler.flatchain[idx]
for i, pars in enumerate(par_vals):
a, tau = np.exp(pars[:2])
gp = george.GP(a * kernels.ExpSquaredKernel(tau))
gp.compute(Tmeasured, error)
s = gp.sample_conditional(Tactual - model(pars, Tmeasured), Tvalues) + model(pars, Tvalues)
plt.plot(Tvalues, s, 'b-', alpha=0.1)
plt.draw()
# Finally, get posterior samples at all the possibly measured temperatures
print 'Generating posterior samples at all temperatures...'
N = 10000 # This is 1/10th of the total number of samples!
idx = np.argsort(-sampler.lnprobability.flatten())[:N] # Get N 'best' curves
par_vals = sampler.flatchain[idx]
Tvalues = np.arange(3000, 6900, 100)
gp_posterior = []
for pars in par_vals:
a, tau = np.exp(pars[:2])
gp = george.GP(a * kernels.ExpSquaredKernel(tau))
gp.compute(Tmeasured, error)
s = gp.sample_conditional(Tactual - model(pars, Tmeasured), Tvalues) + model(pars, Tvalues)
gp_posterior.append(s)
# Finally, make confidence intervals for the actual temperatures
gp_posterior = np.array(gp_posterior)
l, m, h = np.percentile(gp_posterior, [16.0, 50.0, 84.0], axis=0)
conf = pandas.DataFrame(data={'Measured Temperature': Tvalues, 'Actual Temperature': m,
'Lower Bound': l, 'Upper bound': h})
conf.to_csv('Confidence_Intervals.csv', index=False)
return sampler, np.array(gp_posterior)
def check_posterior(df, posterior, Tvalues):
"""
Checks the posterior samples: Are 95% of the measurements within 2-sigma of the prediction?
:param df: The summary dataframe
:param posterior: The MCMC predicted values
:param Tvalues: The measured temperatures the posterior was made with
:return: boolean, as well as some warning messages if applicable
"""
# First, make 2-sigma confidence intervals
l, m, h = np.percentile(posterior, [5.0, 50.0, 95.0], axis=0)
# Save the confidence intervals
# conf = pandas.DataFrame(data={'Measured Temperature': Tvalues, 'Actual Temperature': m,
# 'Lower Bound': l, 'Upper bound': h})
#conf.to_csv('Confidence_Intervals.csv', index=False)
Ntot = [] # The total number of observations with the given measured temperature
Nacc = [] # The number that have actual temperatures within the confidence interval
g = df.groupby('Temperature')
for i, T in enumerate(Tvalues):
if T in g.groups.keys():
Ta = g.get_group(T)['Tactual']
low, high = l[i], h[i]
Ntot.append(len(Ta))
Nacc.append(len(Ta.loc[(Ta >= low) & (Ta <= high)]))
p = float(Nacc[-1]) / float(Ntot[-1])
if p < 0.95:
logging.warn(
'Only {}/{} of the samples ({:.2f}%) were accepted for T = {} K'.format(Nacc[-1], Ntot[-1], p * 100,
T))
print low, high
print sorted(Ta)
else:
Ntot.append(0)
Nacc.append(0)
p = float(sum(Nacc)) / float(sum(Ntot))
if p < 0.95:
logging.warn('Only {:.2f}% of the total samples were accepted!'.format(p * 100))
return False
return True
if __name__ == '__main__':
pass
| gpl-3.0 |
codester2/devide.johannes | install_packages/ip_matplotlib.py | 5 | 5932 | # Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import config
from install_package import InstallPackage
import os
import shutil
import sys
import utils
from distutils import sysconfig
MPL_VER = "1.1.0"
if os.name == 'posix':
MPL_ARCHIVE = "matplotlib-%s.tar.gz" % (MPL_VER,)
MPL_URL = "http://surfnet.dl.sourceforge.net/sourceforge/matplotlib/%s" % \
(MPL_ARCHIVE,)
elif os.name == 'nt':
if config.WINARCH_STR == 'x64':
WINTHINGY = 'win-amd64'
else:
WINTHINGY = 'win32'
MPL_ARCHIVE = "matplotlib-%s.%s-py2.7.exe" % (MPL_VER, WINTHINGY)
MPL_URL = "http://graphics.tudelft.nl/~cpbotha/files/devide/johannes_support/gohlke/%s" % (MPL_ARCHIVE,)
MPL_DIRBASE = "matplotlib-%s" % (MPL_VER,)
# I prefer that this be built with numpy, but it is not a dependency
# per se
dependencies = []
class matplotlib(InstallPackage):
def __init__(self):
self.tbfilename = os.path.join(config.archive_dir, MPL_ARCHIVE)
self.build_dir = os.path.join(config.build_dir, MPL_DIRBASE)
self.inst_dir = os.path.join(config.inst_dir, 'matplotlib')
def get(self):
if os.path.exists(self.tbfilename):
utils.output("%s already present, not downloading." %
(MPL_ARCHIVE,))
else:
utils.goto_archive()
utils.urlget(MPL_URL)
def unpack(self):
if os.path.isdir(self.build_dir):
utils.output("MATPLOTLIB source already unpacked, not redoing.")
else:
if os.name == 'posix':
utils.output("Unpacking MATPLOTLIB source.")
utils.unpack_build(self.tbfilename)
else:
utils.output("Unpacking MATPLOTLIB binaries.")
os.mkdir(self.build_dir)
os.chdir(self.build_dir)
utils.unpack(self.tbfilename)
def configure(self):
if os.name == 'nt':
utils.output("Skipping configure (WINDOWS).")
return
# pre-configure setup.py and setupext.py so that everything is
# found and configured as we want it.
os.chdir(self.build_dir)
if os.path.exists('setup.py.new'):
utils.output('matplotlib already configured. Skipping step.')
else:
# pre-filter setup.py
repls = [("(BUILD_GTKAGG\s*=\s*).*", "\\1 0"),
("(BUILD_GTK\s*=\s*).*", "\\1 0"),
("(BUILD_TKAGG\s*=\s*).*", "\\1 0"),
("(BUILD_WXAGG\s*=\s*).*", "\\1 1"),
("(rc\s*=\s*dict\().*",
"\\1 {'backend':'PS', 'numerix':'numpy'} )")]
utils.re_sub_filter_file(repls, 'setup.py')
def build(self):
if os.name == 'nt':
utils.output("Skipping build (WINDOWS).")
return
os.chdir(self.build_dir)
# weak test... there are .so files deeper, but they're in platform
# specific directories
if os.path.exists('build'):
utils.output('matplotlib already built. Skipping step.')
else:
# add wx bin to path so that wx-config can be found
os.environ['PATH'] = "%s%s%s" % (config.WX_BIN_PATH,
os.pathsep, os.environ['PATH'])
ret = os.system('%s setup.py build' % (sys.executable,))
if ret != 0:
utils.error('matplotlib build failed. Please fix and try again.')
def install(self):
# to test for install, just do python -c "import matplotlib"
# and test the result (we could just import directly, but that would
# only work once our invoking python has been stopped and started
# again)
os.chdir(config.archive_dir) # we need to be elsewhere!
ret = os.system('%s -c "import matplotlib"' % (sys.executable,))
if ret == 0:
utils.output('matplotlib already installed. Skipping step.')
else:
utils.output('ImportError test shows that matplotlib is not '
'installed. Installing...')
if os.name == 'nt':
self.install_nt()
else:
self.install_posix()
# make sure the backend is set to WXAgg
# and that interactive is set to True
rcfn = os.path.join(
config.PYTHON_SITE_PACKAGES,
'matplotlib', 'mpl-data', 'matplotlibrc')
utils.re_sub_filter_file(
[("(\s*backend\s*\:).*", "\\1 WXAgg"),
("#*(\s*interactive\s:).*","\\1 True")], rcfn)
def install_nt(self):
sp_dir = sysconfig.get_python_lib()
utils.copy_glob(os.path.join(self.build_dir, 'PLATLIB', '*'), sp_dir)
def install_posix(self):
os.chdir(self.build_dir)
# add wx bin to path so that wx-config can be found
os.environ['PATH'] = "%s%s%s" % (config.WX_BIN_PATH,
os.pathsep, os.environ['PATH'])
ret = os.system('%s setup.py install' % (sys.executable,))
if ret != 0:
utils.error(
'matplotlib install failed. Please fix and try again.')
def clean_build(self):
utils.output("Removing build and install directories.")
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
from distutils import sysconfig
matplotlib_instdir = os.path.join(sysconfig.get_python_lib(),
'matplotlib')
if os.path.exists(matplotlib_instdir):
shutil.rmtree(matplotlib_instdir)
def get_installed_version(self):
import matplotlib
return matplotlib.__version__
| bsd-3-clause |
laiqiqi886/kbengine | kbe/src/lib/python/Lib/test/test_ossaudiodev.py | 97 | 7216 | from test import support
support.requires('audio')
from test.support import findfile
ossaudiodev = support.import_module('ossaudiodev')
import errno
import sys
import sunau
import time
import audioop
import unittest
# Arggh, AFMT_S16_NE not defined on all platforms -- seems to be a
# fairly recent addition to OSS.
try:
from ossaudiodev import AFMT_S16_NE
except ImportError:
if sys.byteorder == "little":
AFMT_S16_NE = ossaudiodev.AFMT_S16_LE
else:
AFMT_S16_NE = ossaudiodev.AFMT_S16_BE
def read_sound_file(path):
with open(path, 'rb') as fp:
au = sunau.open(fp)
rate = au.getframerate()
nchannels = au.getnchannels()
encoding = au._encoding
fp.seek(0)
data = fp.read()
if encoding != sunau.AUDIO_FILE_ENCODING_MULAW_8:
raise RuntimeError("Expect .au file with 8-bit mu-law samples")
# Convert the data to 16-bit signed.
data = audioop.ulaw2lin(data, 2)
return (data, rate, 16, nchannels)
class OSSAudioDevTests(unittest.TestCase):
def play_sound_file(self, data, rate, ssize, nchannels):
try:
dsp = ossaudiodev.open('w')
except OSError as msg:
if msg.args[0] in (errno.EACCES, errno.ENOENT,
errno.ENODEV, errno.EBUSY):
raise unittest.SkipTest(msg)
raise
# at least check that these methods can be invoked
dsp.bufsize()
dsp.obufcount()
dsp.obuffree()
dsp.getptr()
dsp.fileno()
# Make sure the read-only attributes work.
self.assertFalse(dsp.closed)
self.assertEqual(dsp.name, "/dev/dsp")
self.assertEqual(dsp.mode, "w", "bad dsp.mode: %r" % dsp.mode)
# And make sure they're really read-only.
for attr in ('closed', 'name', 'mode'):
try:
setattr(dsp, attr, 42)
except (TypeError, AttributeError):
pass
else:
self.fail("dsp.%s not read-only" % attr)
# Compute expected running time of sound sample (in seconds).
expected_time = float(len(data)) / (ssize/8) / nchannels / rate
# set parameters based on .au file headers
dsp.setparameters(AFMT_S16_NE, nchannels, rate)
self.assertTrue(abs(expected_time - 3.51) < 1e-2, expected_time)
t1 = time.time()
dsp.write(data)
dsp.close()
t2 = time.time()
elapsed_time = t2 - t1
percent_diff = (abs(elapsed_time - expected_time) / expected_time) * 100
self.assertTrue(percent_diff <= 10.0,
"elapsed time (%s) > 10%% off of expected time (%s)" %
(elapsed_time, expected_time))
def set_parameters(self, dsp):
# Two configurations for testing:
# config1 (8-bit, mono, 8 kHz) should work on even the most
# ancient and crufty sound card, but maybe not on special-
# purpose high-end hardware
# config2 (16-bit, stereo, 44.1kHz) should work on all but the
# most ancient and crufty hardware
config1 = (ossaudiodev.AFMT_U8, 1, 8000)
config2 = (AFMT_S16_NE, 2, 44100)
for config in [config1, config2]:
(fmt, channels, rate) = config
if (dsp.setfmt(fmt) == fmt and
dsp.channels(channels) == channels and
dsp.speed(rate) == rate):
break
else:
raise RuntimeError("unable to set audio sampling parameters: "
"you must have really weird audio hardware")
# setparameters() should be able to set this configuration in
# either strict or non-strict mode.
result = dsp.setparameters(fmt, channels, rate, False)
self.assertEqual(result, (fmt, channels, rate),
"setparameters%r: returned %r" % (config, result))
result = dsp.setparameters(fmt, channels, rate, True)
self.assertEqual(result, (fmt, channels, rate),
"setparameters%r: returned %r" % (config, result))
def set_bad_parameters(self, dsp):
# Now try some configurations that are presumably bogus: eg. 300
# channels currently exceeds even Hollywood's ambitions, and
# negative sampling rate is utter nonsense. setparameters() should
# accept these in non-strict mode, returning something other than
# was requested, but should barf in strict mode.
fmt = AFMT_S16_NE
rate = 44100
channels = 2
for config in [(fmt, 300, rate), # ridiculous nchannels
(fmt, -5, rate), # impossible nchannels
(fmt, channels, -50), # impossible rate
]:
(fmt, channels, rate) = config
result = dsp.setparameters(fmt, channels, rate, False)
self.assertNotEqual(result, config,
"unexpectedly got requested configuration")
try:
result = dsp.setparameters(fmt, channels, rate, True)
except ossaudiodev.OSSAudioError as err:
pass
else:
self.fail("expected OSSAudioError")
def test_playback(self):
sound_info = read_sound_file(findfile('audiotest.au'))
self.play_sound_file(*sound_info)
def test_set_parameters(self):
dsp = ossaudiodev.open("w")
try:
self.set_parameters(dsp)
# Disabled because it fails under Linux 2.6 with ALSA's OSS
# emulation layer.
#self.set_bad_parameters(dsp)
finally:
dsp.close()
self.assertTrue(dsp.closed)
def test_mixer_methods(self):
# Issue #8139: ossaudiodev didn't initialize its types properly,
# therefore some methods were unavailable.
with ossaudiodev.openmixer() as mixer:
self.assertGreaterEqual(mixer.fileno(), 0)
def test_with(self):
with ossaudiodev.open('w') as dsp:
pass
self.assertTrue(dsp.closed)
def test_on_closed(self):
dsp = ossaudiodev.open('w')
dsp.close()
self.assertRaises(ValueError, dsp.fileno)
self.assertRaises(ValueError, dsp.read, 1)
self.assertRaises(ValueError, dsp.write, b'x')
self.assertRaises(ValueError, dsp.writeall, b'x')
self.assertRaises(ValueError, dsp.bufsize)
self.assertRaises(ValueError, dsp.obufcount)
self.assertRaises(ValueError, dsp.obufcount)
self.assertRaises(ValueError, dsp.obuffree)
self.assertRaises(ValueError, dsp.getptr)
mixer = ossaudiodev.openmixer()
mixer.close()
self.assertRaises(ValueError, mixer.fileno)
def test_main():
try:
dsp = ossaudiodev.open('w')
except (ossaudiodev.error, OSError) as msg:
if msg.args[0] in (errno.EACCES, errno.ENOENT,
errno.ENODEV, errno.EBUSY):
raise unittest.SkipTest(msg)
raise
dsp.close()
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
jhd/spunout | flask/lib/python2.7/site-packages/pip/_vendor/requests/auth.py | 294 | 6173 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
import logging
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header
log = logging.getLogger(__name__)
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
return 'Basic ' + b64encode(('%s:%s' % (username, password)).encode('latin1')).strip().decode('latin1')
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
self.pos = None
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm')
opaque = self.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
setattr(self, 'num_401_calls', num_401_calls + 1)
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
setattr(self, 'num_401_calls', 1)
return r
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
pass
r.register_hook('response', self.handle_401)
return r
| gpl-3.0 |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/oauth2client/oauth2client/gce.py | 12 | 3515 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google Compute Engine
Utilities for making it easier to use OAuth 2.0 on Google Compute Engine.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import json
import logging
import urllib
from oauth2client import util
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import AssertionCredentials
logger = logging.getLogger(__name__)
# URI Template for the endpoint that returns access_tokens.
META = ('http://metadata.google.internal/0.1/meta-data/service-accounts/'
'default/acquire{?scope}')
class AppAssertionCredentials(AssertionCredentials):
"""Credentials object for Compute Engine Assertion Grants
This object will allow a Compute Engine instance to identify itself to
Google and other OAuth 2.0 servers that can verify assertions. It can be used
for the purpose of accessing data stored under an account assigned to the
Compute Engine instance itself.
This credential does not require a flow to instantiate because it represents
a two legged flow, and therefore has all of the required information to
generate and refresh its own access tokens.
"""
@util.positional(2)
def __init__(self, scope, **kwargs):
"""Constructor for AppAssertionCredentials
Args:
scope: string or iterable of strings, scope(s) of the credentials being
requested.
"""
self.scope = util.scopes_to_string(scope)
self.kwargs = kwargs
# Assertion type is no longer used, but still in the parent class signature.
super(AppAssertionCredentials, self).__init__(None)
@classmethod
def from_json(cls, json_data):
data = json.loads(json_data)
return AppAssertionCredentials(data['scope'])
def _refresh(self, http_request):
"""Refreshes the access_token.
Skip all the storage hoops and just refresh using the API.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
query = '?scope=%s' % urllib.quote(self.scope, '')
uri = META.replace('{?scope}', query)
response, content = http_request(uri)
if response.status == 200:
try:
d = json.loads(content)
except StandardError as e:
raise AccessTokenRefreshError(str(e))
self.access_token = d['accessToken']
else:
if response.status == 404:
content += (' This can occur if a VM was created'
' with no service account or scopes.')
raise AccessTokenRefreshError(content)
@property
def serialization_data(self):
raise NotImplementedError(
'Cannot serialize credentials for GCE service accounts.')
def create_scoped_required(self):
return not self.scope
def create_scoped(self, scopes):
return AppAssertionCredentials(scopes, **self.kwargs)
| bsd-3-clause |
eriser/marsyas | scripts/large-evaluators/eval_manager.py | 5 | 1222 | #!/usr/bin/env python
import multiprocessing
class EvalManager():
def __init__(self, num_processes=4):
""" Sets up infrastructure with the desired number of
processes."""
self.pool = multiprocessing.Pool(processes=num_processes)
self.manager = multiprocessing.Manager()
self.queue = self.manager.Queue()
def task(self, function, datalist):
""" Runs the function on datalist. Result are available
via get_results()."""
for datum in datalist:
self.pool.apply_async(function, args=(self.queue, datum))
def task_block(self, function, datalist):
""" Runs the function on datalist, but blocks. This is
very useful for debugging the function. Result are
available via get_results()."""
for datum in datalist:
apply(function, (self.queue, datum))
def get_results(self):
""" Blocks until computation is finished, then returns a
list of results."""
self.pool.close()
self.pool.join()
results_list = []
while not self.queue.empty():
result = self.queue.get()
results_list.append(result)
return results_list
| gpl-2.0 |
fjbatresv/odoo | addons/account/edi/__init__.py | 450 | 1062 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
wong2/sentry | src/sentry/event_manager.py | 2 | 19299 | """
sentry.event_manager
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import math
import six
from datetime import datetime, timedelta
from django.conf import settings
from django.db import IntegrityError, transaction
from django.utils import timezone
from hashlib import md5
from raven.utils.encoding import to_string
from uuid import uuid4
from sentry.app import buffer, tsdb
from sentry.constants import (
CLIENT_RESERVED_ATTRS, LOG_LEVELS, DEFAULT_LOGGER_NAME, MAX_CULPRIT_LENGTH,
MAX_TAG_VALUE_LENGTH
)
from sentry.interfaces.base import get_interface
from sentry.models import (
Activity, Event, EventMapping, Group, GroupHash, GroupStatus, Project,
Release, UserReport
)
from sentry.plugins import plugins
from sentry.signals import regression_signal
from sentry.utils.logging import suppress_exceptions
from sentry.tasks.index import index_event
from sentry.tasks.merge import merge_group
from sentry.tasks.post_process import post_process_group
from sentry.utils.db import get_db_engine
from sentry.utils.safe import safe_execute, trim, trim_dict
def count_limit(count):
# TODO: could we do something like num_to_store = max(math.sqrt(100*count)+59, 200) ?
# ~ 150 * ((log(n) - 1.5) ^ 2 - 0.25)
for amount, sample_rate in settings.SENTRY_SAMPLE_RATES:
if count <= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_RATE
def time_limit(silence): # ~ 3600 per hour
for amount, sample_rate in settings.SENTRY_SAMPLE_TIMES:
if silence >= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_TIME
def md5_from_hash(hash_bits):
result = md5()
for bit in hash_bits:
result.update(to_string(bit))
return result.hexdigest()
def get_hashes_for_event(event):
interfaces = event.get_interfaces()
for interface in interfaces.itervalues():
result = interface.compute_hashes(event.platform)
if not result:
continue
return result
return [[event.message]]
def get_hashes_from_fingerprint(event, fingerprint):
default_values = set(['{{ default }}', '{{default}}'])
if any(d in fingerprint for d in default_values):
default_hashes = get_hashes_for_event(event)
hash_count = len(default_hashes)
else:
hash_count = 1
hashes = []
for idx in xrange(hash_count):
result = []
for bit in fingerprint:
if bit in default_values:
result.extend(default_hashes[idx])
else:
result.append(bit)
hashes.append(result)
return hashes
if not settings.SENTRY_SAMPLE_DATA:
def should_sample(current_datetime, last_seen, times_seen):
return False
else:
def should_sample(current_datetime, last_seen, times_seen):
silence_timedelta = current_datetime - last_seen
silence = silence_timedelta.days * 86400 + silence_timedelta.seconds
if times_seen % count_limit(times_seen) == 0:
return False
if times_seen % time_limit(silence) == 0:
return False
return True
def plugin_is_regression(group, event):
project = event.project
for plugin in plugins.for_project(project):
result = safe_execute(plugin.is_regression, group, event,
version=1, _with_transaction=False)
if result is not None:
return result
return True
class ScoreClause(object):
def __init__(self, group):
self.group = group
def __int__(self):
# Calculate the score manually when coercing to an int.
# This is used within create_or_update and friends
return self.group.get_score()
def prepare_database_save(self, unused):
return self
def prepare(self, evaluator, query, allow_joins):
return
def evaluate(self, node, qn, connection):
engine = get_db_engine(getattr(connection, 'alias', 'default'))
if engine.startswith('postgresql'):
sql = 'log(times_seen) * 600 + last_seen::abstime::int'
elif engine.startswith('mysql'):
sql = 'log(times_seen) * 600 + unix_timestamp(last_seen)'
else:
# XXX: if we cant do it atomically let's do it the best we can
sql = int(self)
return (sql, [])
@classmethod
def calculate(self, times_seen, last_seen):
return math.log(times_seen) * 600 + float(last_seen.strftime('%s'))
class EventManager(object):
logger = logging.getLogger('sentry.events')
def __init__(self, data, version='5'):
self.data = data
self.version = version
def normalize(self):
# TODO(dcramer): store http.env.REMOTE_ADDR as user.ip
# First we pull out our top-level (non-data attr) kwargs
data = self.data
if not isinstance(data.get('level'), (six.string_types, int)):
data['level'] = logging.ERROR
elif data['level'] not in LOG_LEVELS:
data['level'] = logging.ERROR
if not data.get('logger'):
data['logger'] = DEFAULT_LOGGER_NAME
else:
data['logger'] = trim(data['logger'], 64)
if data.get('platform'):
data['platform'] = trim(data['platform'], 64)
timestamp = data.get('timestamp')
if not timestamp:
timestamp = timezone.now()
if isinstance(timestamp, datetime):
# We must convert date to local time so Django doesn't mess it up
# based on TIME_ZONE
if settings.TIME_ZONE:
if not timezone.is_aware(timestamp):
timestamp = timestamp.replace(tzinfo=timezone.utc)
elif timezone.is_aware(timestamp):
timestamp = timestamp.replace(tzinfo=None)
timestamp = float(timestamp.strftime('%s'))
data['timestamp'] = timestamp
if not data.get('event_id'):
data['event_id'] = uuid4().hex
data.setdefault('message', None)
data.setdefault('culprit', None)
data.setdefault('time_spent', None)
data.setdefault('server_name', None)
data.setdefault('site', None)
data.setdefault('checksum', None)
data.setdefault('fingerprint', None)
data.setdefault('platform', None)
data.setdefault('extra', {})
data.setdefault('errors', [])
tags = data.get('tags')
if not tags:
tags = []
# full support for dict syntax
elif isinstance(tags, dict):
tags = tags.items()
# prevent [tag, tag, tag] (invalid) syntax
elif not all(len(t) == 2 for t in tags):
tags = []
else:
tags = list(tags)
data['tags'] = []
for key, value in tags:
key = six.text_type(key).strip()
value = six.text_type(value).strip()
if not (key and value):
continue
if len(value) > MAX_TAG_VALUE_LENGTH:
continue
data['tags'].append((key, value))
if not isinstance(data['extra'], dict):
# throw it away
data['extra'] = {}
trim_dict(
data['extra'], max_size=settings.SENTRY_MAX_EXTRA_VARIABLE_SIZE)
# TODO(dcramer): more of validate data needs stuffed into the manager
for key in data.keys():
if key in CLIENT_RESERVED_ATTRS:
continue
value = data.pop(key)
try:
interface = get_interface(key)()
except ValueError:
continue
try:
inst = interface.to_python(value)
data[inst.get_path()] = inst.to_json()
except Exception:
pass
data['version'] = self.version
# TODO(dcramer): find a better place for this logic
exception = data.get('sentry.interfaces.Exception')
stacktrace = data.get('sentry.interfaces.Stacktrace')
if exception and len(exception['values']) == 1 and stacktrace:
exception['values'][0]['stacktrace'] = stacktrace
del data['sentry.interfaces.Stacktrace']
if 'sentry.interfaces.Http' in data:
# default the culprit to the url
if not data['culprit']:
data['culprit'] = data['sentry.interfaces.Http']['url']
if data['time_spent']:
data['time_spent'] = int(data['time_spent'])
if data['culprit']:
data['culprit'] = trim(data['culprit'], MAX_CULPRIT_LENGTH)
if data['message']:
data['message'] = trim(
data['message'], settings.SENTRY_MAX_MESSAGE_LENGTH)
return data
@suppress_exceptions
def save(self, project, raw=False):
# TODO: culprit should default to "most recent" frame in stacktraces when
# it's not provided.
project = Project.objects.get_from_cache(id=project)
data = self.data.copy()
# First we pull out our top-level (non-data attr) kwargs
event_id = data.pop('event_id')
message = data.pop('message')
level = data.pop('level')
culprit = data.pop('culprit', None) or ''
time_spent = data.pop('time_spent', None)
logger_name = data.pop('logger', None)
server_name = data.pop('server_name', None)
site = data.pop('site', None)
checksum = data.pop('checksum', None)
fingerprint = data.pop('fingerprint', None)
platform = data.pop('platform', None)
release = data.pop('release', None)
date = datetime.fromtimestamp(data.pop('timestamp'))
date = date.replace(tzinfo=timezone.utc)
kwargs = {
'message': message,
'platform': platform,
}
event = Event(
project=project,
event_id=event_id,
data=data,
time_spent=time_spent,
datetime=date,
**kwargs
)
tags = data.get('tags') or []
tags.append(('level', LOG_LEVELS[level]))
if logger_name:
tags.append(('logger', logger_name))
if server_name:
tags.append(('server_name', server_name))
if site:
tags.append(('site', site))
if release:
# TODO(dcramer): we should ensure we create Release objects
tags.append(('sentry:release', release))
for plugin in plugins.for_project(project, version=None):
added_tags = safe_execute(plugin.get_tags, event,
_with_transaction=False)
if added_tags:
tags.extend(added_tags)
# XXX(dcramer): we're relying on mutation of the data object to ensure
# this propagates into Event
data['tags'] = tags
# prioritize fingerprint over checksum as its likely the client defaulted
# a checksum whereas the fingerprint was explicit
if fingerprint:
hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint))
elif checksum:
hashes = [checksum]
else:
hashes = map(md5_from_hash, get_hashes_for_event(event))
group_kwargs = kwargs.copy()
group_kwargs.update({
'culprit': culprit,
'logger': logger_name,
'level': level,
'last_seen': date,
'first_seen': date,
'time_spent_total': time_spent or 0,
'time_spent_count': time_spent and 1 or 0,
})
if release:
release = Release.get_or_create(
project=project,
version=release,
date_added=date,
)
group_kwargs['first_release'] = release
Activity.objects.create(
type=Activity.RELEASE,
project=project,
ident=release,
data={'version': release},
datetime=date,
)
group, is_new, is_regression, is_sample = safe_execute(
self._save_aggregate,
event=event,
hashes=hashes,
**group_kwargs
)
using = group._state.db
event.group = group
event.group_id = group.id
# store a reference to the group id to guarantee validation of isolation
event.data.bind_ref(event)
try:
with transaction.atomic():
EventMapping.objects.create(
project=project, group=group, event_id=event_id)
except IntegrityError:
self.logger.info('Duplicate EventMapping found for event_id=%s', event_id)
return event
UserReport.objects.filter(
project=project, event_id=event_id,
).update(group=group)
# save the event unless its been sampled
if not is_sample:
try:
with transaction.atomic():
event.save()
except IntegrityError:
self.logger.info('Duplicate Event found for event_id=%s', event_id)
return event
if is_new and release:
buffer.incr(Release, {'new_groups': 1}, {
'id': release.id,
})
safe_execute(Group.objects.add_tags, group, tags,
_with_transaction=False)
if not raw:
post_process_group.delay(
group=group,
event=event,
is_new=is_new,
is_sample=is_sample,
is_regression=is_regression,
)
else:
self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id)
index_event.delay(event)
# TODO: move this to the queue
if is_regression and not raw:
regression_signal.send_robust(sender=Group, instance=group)
return event
def _find_hashes(self, project, hash_list):
matches = []
for hash in hash_list:
ghash, _ = GroupHash.objects.get_or_create(
project=project,
hash=hash,
)
matches.append((ghash.group_id, ghash.hash))
return matches
def _ensure_hashes_merged(self, group, hash_list):
# TODO(dcramer): there is a race condition with selecting/updating
# in that another group could take ownership of the hash
bad_hashes = GroupHash.objects.filter(
project=group.project,
hash__in=hash_list,
).exclude(
group=group,
)
if not bad_hashes:
return
for hash in bad_hashes:
merge_group.delay(
from_group_id=hash.group_id,
to_group_id=group.id,
)
return GroupHash.objects.filter(
project=group.project,
hash__in=bad_hashes,
).update(
group=group,
)
def _save_aggregate(self, event, hashes, **kwargs):
time_spent = event.time_spent
project = event.project
# attempt to find a matching hash
all_hashes = self._find_hashes(project, hashes)
try:
existing_group_id = (h[0] for h in all_hashes if h[0]).next()
except StopIteration:
existing_group_id = None
# XXX(dcramer): this has the opportunity to create duplicate groups
# it should be resolved by the hash merging function later but this
# should be better tested/reviewed
if existing_group_id is None:
kwargs['score'] = ScoreClause.calculate(1, kwargs['last_seen'])
group, group_is_new = Group.objects.create(
project=project,
**kwargs
), True
else:
group = Group.objects.get(id=existing_group_id)
group_is_new = False
# If all hashes are brand new we treat this event as new
is_new = False
new_hashes = [h[1] for h in all_hashes if h[0] is None]
if new_hashes:
affected = GroupHash.objects.filter(
project=project,
hash__in=new_hashes,
group__isnull=True,
).update(
group=group,
)
if affected != len(new_hashes):
self._ensure_hashes_merged(group, new_hashes)
elif group_is_new and len(new_hashes) == len(all_hashes):
is_new = True
# XXX(dcramer): it's important this gets called **before** the aggregate
# is processed as otherwise values like last_seen will get mutated
can_sample = should_sample(event.datetime, group.last_seen, group.times_seen)
if not is_new:
is_regression = self._process_existing_aggregate(group, event, kwargs)
else:
is_regression = False
# Determine if we've sampled enough data to store this event
if is_new or is_regression:
is_sample = False
else:
is_sample = can_sample
tsdb.incr_multi([
(tsdb.models.group, group.id),
(tsdb.models.project, project.id),
])
return group, is_new, is_regression, is_sample
def _process_existing_aggregate(self, group, event, data):
date = max(event.datetime, group.last_seen)
extra = {
'last_seen': date,
'score': ScoreClause(group),
}
if event.message and event.message != group.message:
extra['message'] = event.message
if group.level != data['level']:
extra['level'] = data['level']
if group.culprit != data['culprit']:
extra['culprit'] = data['culprit']
is_regression = False
if group.is_resolved() and plugin_is_regression(group, event):
is_regression = bool(Group.objects.filter(
id=group.id,
# ensure we cant update things if the status has been set to
# muted
status__in=[GroupStatus.RESOLVED, GroupStatus.UNRESOLVED],
).exclude(
# add to the regression window to account for races here
active_at__gte=date - timedelta(seconds=5),
).update(
active_at=date,
# explicitly set last_seen here as ``is_resolved()`` looks
# at the value
last_seen=date,
status=GroupStatus.UNRESOLVED
))
group.active_at = date
group.status = GroupStatus.UNRESOLVED
group.last_seen = extra['last_seen']
update_kwargs = {
'times_seen': 1,
}
if event.time_spent:
update_kwargs.update({
'time_spent_total': event.time_spent,
'time_spent_count': 1,
})
buffer.incr(Group, update_kwargs, {
'id': group.id,
}, extra)
return is_regression
| bsd-3-clause |
MattDevo/edk2 | BaseTools/Source/Python/Workspace/MetaFileTable.py | 1 | 16975 | ## @file
# This file is used to create/update/query/erase a meta file table
#
# Copyright (c) 2008 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import absolute_import
import uuid
import Common.EdkLogger as EdkLogger
from Common.BuildToolError import FORMAT_INVALID
from CommonDataClass.DataClass import MODEL_FILE_DSC, MODEL_FILE_DEC, MODEL_FILE_INF, \
MODEL_FILE_OTHERS
from Common.DataType import *
class MetaFileTable():
# TRICK: use file ID as the part before '.'
_ID_STEP_ = 1
_ID_MAX_ = 99999999
## Constructor
def __init__(self, DB, MetaFile, FileType, Temporary, FromItem=None):
self.MetaFile = MetaFile
self.TableName = ""
self.DB = DB
self._NumpyTab = None
self.CurrentContent = []
DB.TblFile.append([MetaFile.Name,
MetaFile.Ext,
MetaFile.Dir,
MetaFile.Path,
FileType,
MetaFile.TimeStamp,
FromItem])
self.FileId = len(DB.TblFile)
self.ID = self.FileId * 10**8
if Temporary:
self.TableName = "_%s_%s_%s" % (FileType, len(DB.TblFile), uuid.uuid4().hex)
else:
self.TableName = "_%s_%s" % (FileType, len(DB.TblFile))
def IsIntegrity(self):
try:
TimeStamp = self.MetaFile.TimeStamp
if not self.CurrentContent:
Result = False
else:
Result = self.CurrentContent[-1][0] < 0
if not Result:
# update the timestamp in database
self.DB.SetFileTimeStamp(self.FileId, TimeStamp)
return False
if TimeStamp != self.DB.GetFileTimeStamp(self.FileId):
# update the timestamp in database
self.DB.SetFileTimeStamp(self.FileId, TimeStamp)
return False
except Exception as Exc:
EdkLogger.debug(EdkLogger.DEBUG_5, str(Exc))
return False
return True
def SetEndFlag(self):
self.CurrentContent.append(self._DUMMY_)
def GetAll(self):
return [item for item in self.CurrentContent if item[0] >= 0 ]
## Python class representation of table storing module data
class ModuleTable(MetaFileTable):
_COLUMN_ = '''
ID REAL PRIMARY KEY,
Model INTEGER NOT NULL,
Value1 TEXT NOT NULL,
Value2 TEXT,
Value3 TEXT,
Scope1 TEXT,
Scope2 TEXT,
BelongsToItem REAL NOT NULL,
StartLine INTEGER NOT NULL,
StartColumn INTEGER NOT NULL,
EndLine INTEGER NOT NULL,
EndColumn INTEGER NOT NULL,
Enabled INTEGER DEFAULT 0
'''
# used as table end flag, in case the changes to database is not committed to db file
_DUMMY_ = [-1, -1, '====', '====', '====', '====', '====', -1, -1, -1, -1, -1, -1]
## Constructor
def __init__(self, Db, MetaFile, Temporary):
MetaFileTable.__init__(self, Db, MetaFile, MODEL_FILE_INF, Temporary)
## Insert a record into table Inf
#
# @param Model: Model of a Inf item
# @param Value1: Value1 of a Inf item
# @param Value2: Value2 of a Inf item
# @param Value3: Value3 of a Inf item
# @param Scope1: Arch of a Inf item
# @param Scope2 Platform os a Inf item
# @param BelongsToItem: The item belongs to which another item
# @param StartLine: StartLine of a Inf item
# @param StartColumn: StartColumn of a Inf item
# @param EndLine: EndLine of a Inf item
# @param EndColumn: EndColumn of a Inf item
# @param Enabled: If this item enabled
#
def Insert(self, Model, Value1, Value2, Value3, Scope1=TAB_ARCH_COMMON, Scope2=TAB_COMMON,
BelongsToItem=-1, StartLine=-1, StartColumn=-1, EndLine=-1, EndColumn=-1, Enabled=0):
(Value1, Value2, Value3, Scope1, Scope2) = (Value1.strip(), Value2.strip(), Value3.strip(), Scope1.strip(), Scope2.strip())
self.ID = self.ID + self._ID_STEP_
if self.ID >= (MODEL_FILE_INF + self._ID_MAX_):
self.ID = MODEL_FILE_INF + self._ID_STEP_
row = [ self.ID,
Model,
Value1,
Value2,
Value3,
Scope1,
Scope2,
BelongsToItem,
StartLine,
StartColumn,
EndLine,
EndColumn,
Enabled
]
self.CurrentContent.append(row)
return self.ID
## Query table
#
# @param Model: The Model of Record
# @param Arch: The Arch attribute of Record
# @param Platform The Platform attribute of Record
#
# @retval: A recordSet of all found records
#
def Query(self, Model, Arch=None, Platform=None, BelongsToItem=None):
QueryTab = self.CurrentContent
result = [item for item in QueryTab if item[1] == Model and item[-1]>=0 ]
if Arch is not None and Arch != TAB_ARCH_COMMON:
ArchList = set(['COMMON'])
ArchList.add(Arch)
result = [item for item in result if item[5] in ArchList]
if Platform is not None and Platform != TAB_COMMON:
Platformlist = set( ['COMMON','DEFAULT'])
Platformlist.add(Platform)
result = [item for item in result if item[6] in Platformlist]
if BelongsToItem is not None:
result = [item for item in result if item[7] == BelongsToItem]
result = [ [r[2],r[3],r[4],r[5],r[6],r[0],r[9]] for r in result ]
return result
## Python class representation of table storing package data
class PackageTable(MetaFileTable):
_COLUMN_ = '''
ID REAL PRIMARY KEY,
Model INTEGER NOT NULL,
Value1 TEXT NOT NULL,
Value2 TEXT,
Value3 TEXT,
Scope1 TEXT,
Scope2 TEXT,
BelongsToItem REAL NOT NULL,
StartLine INTEGER NOT NULL,
StartColumn INTEGER NOT NULL,
EndLine INTEGER NOT NULL,
EndColumn INTEGER NOT NULL,
Enabled INTEGER DEFAULT 0
'''
# used as table end flag, in case the changes to database is not committed to db file
_DUMMY_ = [-1, -1, '====', '====', '====', '====', '====', -1, -1, -1, -1, -1, -1]
## Constructor
def __init__(self, Cursor, MetaFile, Temporary):
MetaFileTable.__init__(self, Cursor, MetaFile, MODEL_FILE_DEC, Temporary)
## Insert table
#
# Insert a record into table Dec
#
# @param Model: Model of a Dec item
# @param Value1: Value1 of a Dec item
# @param Value2: Value2 of a Dec item
# @param Value3: Value3 of a Dec item
# @param Scope1: Arch of a Dec item
# @param Scope2: Module type of a Dec item
# @param BelongsToItem: The item belongs to which another item
# @param StartLine: StartLine of a Dec item
# @param StartColumn: StartColumn of a Dec item
# @param EndLine: EndLine of a Dec item
# @param EndColumn: EndColumn of a Dec item
# @param Enabled: If this item enabled
#
def Insert(self, Model, Value1, Value2, Value3, Scope1=TAB_ARCH_COMMON, Scope2=TAB_COMMON,
BelongsToItem=-1, StartLine=-1, StartColumn=-1, EndLine=-1, EndColumn=-1, Enabled=0):
(Value1, Value2, Value3, Scope1, Scope2) = (Value1.strip(), Value2.strip(), Value3.strip(), Scope1.strip(), Scope2.strip())
self.ID = self.ID + self._ID_STEP_
row = [ self.ID,
Model,
Value1,
Value2,
Value3,
Scope1,
Scope2,
BelongsToItem,
StartLine,
StartColumn,
EndLine,
EndColumn,
Enabled
]
self.CurrentContent.append(row)
return self.ID
## Query table
#
# @param Model: The Model of Record
# @param Arch: The Arch attribute of Record
#
# @retval: A recordSet of all found records
#
def Query(self, Model, Arch=None):
QueryTab = self.CurrentContent
result = [item for item in QueryTab if item[1] == Model and item[-1]>=0 ]
if Arch is not None and Arch != TAB_ARCH_COMMON:
ArchList = set(['COMMON'])
ArchList.add(Arch)
result = [item for item in result if item[5] in ArchList]
return [[r[2], r[3], r[4], r[5], r[6], r[0], r[8]] for r in result]
def GetValidExpression(self, TokenSpaceGuid, PcdCName):
QueryTab = self.CurrentContent
result = [[item[2], item[8]] for item in QueryTab if item[3] == TokenSpaceGuid and item[4] == PcdCName]
validateranges = []
validlists = []
expressions = []
try:
for row in result:
comment = row[0]
LineNum = row[1]
comment = comment.strip("#")
comment = comment.strip()
oricomment = comment
if comment.startswith("@ValidRange"):
comment = comment.replace("@ValidRange", "", 1)
validateranges.append(comment.split("|")[1].strip())
if comment.startswith("@ValidList"):
comment = comment.replace("@ValidList", "", 1)
validlists.append(comment.split("|")[1].strip())
if comment.startswith("@Expression"):
comment = comment.replace("@Expression", "", 1)
expressions.append(comment.split("|")[1].strip())
except Exception as Exc:
ValidType = ""
if oricomment.startswith("@ValidRange"):
ValidType = "@ValidRange"
if oricomment.startswith("@ValidList"):
ValidType = "@ValidList"
if oricomment.startswith("@Expression"):
ValidType = "@Expression"
EdkLogger.error('Parser', FORMAT_INVALID, "The syntax for %s of PCD %s.%s is incorrect" % (ValidType, TokenSpaceGuid, PcdCName),
ExtraData=oricomment, File=self.MetaFile, Line=LineNum)
return set(), set(), set()
return set(validateranges), set(validlists), set(expressions)
## Python class representation of table storing platform data
class PlatformTable(MetaFileTable):
_COLUMN_ = '''
ID REAL PRIMARY KEY,
Model INTEGER NOT NULL,
Value1 TEXT NOT NULL,
Value2 TEXT,
Value3 TEXT,
Scope1 TEXT,
Scope2 TEXT,
Scope3 TEXT,
BelongsToItem REAL NOT NULL,
FromItem REAL NOT NULL,
StartLine INTEGER NOT NULL,
StartColumn INTEGER NOT NULL,
EndLine INTEGER NOT NULL,
EndColumn INTEGER NOT NULL,
Enabled INTEGER DEFAULT 0
'''
# used as table end flag, in case the changes to database is not committed to db file
_DUMMY_ = [-1, -1, '====', '====', '====', '====', '====','====', -1, -1, -1, -1, -1, -1, -1]
## Constructor
def __init__(self, Cursor, MetaFile, Temporary, FromItem=0):
MetaFileTable.__init__(self, Cursor, MetaFile, MODEL_FILE_DSC, Temporary, FromItem)
## Insert table
#
# Insert a record into table Dsc
#
# @param Model: Model of a Dsc item
# @param Value1: Value1 of a Dsc item
# @param Value2: Value2 of a Dsc item
# @param Value3: Value3 of a Dsc item
# @param Scope1: Arch of a Dsc item
# @param Scope2: Module type of a Dsc item
# @param BelongsToItem: The item belongs to which another item
# @param FromItem: The item belongs to which dsc file
# @param StartLine: StartLine of a Dsc item
# @param StartColumn: StartColumn of a Dsc item
# @param EndLine: EndLine of a Dsc item
# @param EndColumn: EndColumn of a Dsc item
# @param Enabled: If this item enabled
#
def Insert(self, Model, Value1, Value2, Value3, Scope1=TAB_ARCH_COMMON, Scope2=TAB_COMMON, Scope3=TAB_DEFAULT_STORES_DEFAULT,BelongsToItem=-1,
FromItem=-1, StartLine=-1, StartColumn=-1, EndLine=-1, EndColumn=-1, Enabled=1):
(Value1, Value2, Value3, Scope1, Scope2, Scope3) = (Value1.strip(), Value2.strip(), Value3.strip(), Scope1.strip(), Scope2.strip(), Scope3.strip())
self.ID = self.ID + self._ID_STEP_
row = [ self.ID,
Model,
Value1,
Value2,
Value3,
Scope1,
Scope2,
Scope3,
BelongsToItem,
FromItem,
StartLine,
StartColumn,
EndLine,
EndColumn,
Enabled
]
self.CurrentContent.append(row)
return self.ID
## Query table
#
# @param Model: The Model of Record
# @param Scope1: Arch of a Dsc item
# @param Scope2: Module type of a Dsc item
# @param BelongsToItem: The item belongs to which another item
# @param FromItem: The item belongs to which dsc file
#
# @retval: A recordSet of all found records
#
def Query(self, Model, Scope1=None, Scope2=None, BelongsToItem=None, FromItem=None):
QueryTab = self.CurrentContent
result = [item for item in QueryTab if item[1] == Model and item[-1]>0 ]
if Scope1 is not None and Scope1 != TAB_ARCH_COMMON:
Sc1 = set(['COMMON'])
Sc1.add(Scope1)
result = [item for item in result if item[5] in Sc1]
Sc2 = set( ['COMMON','DEFAULT'])
if Scope2 and Scope2 != TAB_COMMON:
if '.' in Scope2:
Index = Scope2.index('.')
NewScope = TAB_COMMON + Scope2[Index:]
Sc2.add(NewScope)
Sc2.add(Scope2)
result = [item for item in result if item[6] in Sc2]
if BelongsToItem is not None:
result = [item for item in result if item[8] == BelongsToItem]
else:
result = [item for item in result if item[8] < 0]
if FromItem is not None:
result = [item for item in result if item[9] == FromItem]
result = [ [r[2],r[3],r[4],r[5],r[6],r[7],r[0],r[9]] for r in result ]
return result
## Factory class to produce different storage for different type of meta-file
class MetaFileStorage(object):
_FILE_TABLE_ = {
MODEL_FILE_INF : ModuleTable,
MODEL_FILE_DEC : PackageTable,
MODEL_FILE_DSC : PlatformTable,
MODEL_FILE_OTHERS : MetaFileTable,
}
_FILE_TYPE_ = {
".inf" : MODEL_FILE_INF,
".dec" : MODEL_FILE_DEC,
".dsc" : MODEL_FILE_DSC,
}
_ObjectCache = {}
## Constructor
def __new__(Class, Cursor, MetaFile, FileType=None, Temporary=False, FromItem=None):
# no type given, try to find one
key = (MetaFile.Path, FileType,Temporary,FromItem)
if key in Class._ObjectCache:
return Class._ObjectCache[key]
if not FileType:
if MetaFile.Type in self._FILE_TYPE_:
FileType = Class._FILE_TYPE_[MetaFile.Type]
else:
FileType = MODEL_FILE_OTHERS
# don't pass the type around if it's well known
if FileType == MODEL_FILE_OTHERS:
Args = (Cursor, MetaFile, FileType, Temporary)
else:
Args = (Cursor, MetaFile, Temporary)
if FromItem:
Args = Args + (FromItem,)
# create the storage object and return it to caller
reval = Class._FILE_TABLE_[FileType](*Args)
if not Temporary:
Class._ObjectCache[key] = reval
return reval
| bsd-2-clause |
SUSE/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/containerservice/v2017_01_31/models/container_service.py | 2 | 5339 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class ContainerService(Resource):
"""Container service.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:ivar provisioning_state: the current deployment or provisioning state,
which only appears in the response.
:vartype provisioning_state: str
:param orchestrator_profile: Properties of the orchestrator.
:type orchestrator_profile: :class:`ContainerServiceOrchestratorProfile
<azure.mgmt.compute.containerservice.v2017_01_31.models.ContainerServiceOrchestratorProfile>`
:param custom_profile: Properties for custom clusters.
:type custom_profile: :class:`ContainerServiceCustomProfile
<azure.mgmt.compute.containerservice.v2017_01_31.models.ContainerServiceCustomProfile>`
:param service_principal_profile: Properties for cluster service
principals.
:type service_principal_profile:
:class:`ContainerServiceServicePrincipalProfile
<azure.mgmt.compute.containerservice.v2017_01_31.models.ContainerServiceServicePrincipalProfile>`
:param master_profile: Properties of master agents.
:type master_profile: :class:`ContainerServiceMasterProfile
<azure.mgmt.compute.containerservice.v2017_01_31.models.ContainerServiceMasterProfile>`
:param agent_pool_profiles: Properties of the agent pool.
:type agent_pool_profiles: list of
:class:`ContainerServiceAgentPoolProfile
<azure.mgmt.compute.containerservice.v2017_01_31.models.ContainerServiceAgentPoolProfile>`
:param windows_profile: Properties of Windows VMs.
:type windows_profile: :class:`ContainerServiceWindowsProfile
<azure.mgmt.compute.containerservice.v2017_01_31.models.ContainerServiceWindowsProfile>`
:param linux_profile: Properties of Linux VMs.
:type linux_profile: :class:`ContainerServiceLinuxProfile
<azure.mgmt.compute.containerservice.v2017_01_31.models.ContainerServiceLinuxProfile>`
:param diagnostics_profile: Properties of the diagnostic agent.
:type diagnostics_profile: :class:`ContainerServiceDiagnosticsProfile
<azure.mgmt.compute.containerservice.v2017_01_31.models.ContainerServiceDiagnosticsProfile>`
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'master_profile': {'required': True},
'agent_pool_profiles': {'required': True},
'linux_profile': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'orchestrator_profile': {'key': 'properties.orchestratorProfile', 'type': 'ContainerServiceOrchestratorProfile'},
'custom_profile': {'key': 'properties.customProfile', 'type': 'ContainerServiceCustomProfile'},
'service_principal_profile': {'key': 'properties.servicePrincipalProfile', 'type': 'ContainerServiceServicePrincipalProfile'},
'master_profile': {'key': 'properties.masterProfile', 'type': 'ContainerServiceMasterProfile'},
'agent_pool_profiles': {'key': 'properties.agentPoolProfiles', 'type': '[ContainerServiceAgentPoolProfile]'},
'windows_profile': {'key': 'properties.windowsProfile', 'type': 'ContainerServiceWindowsProfile'},
'linux_profile': {'key': 'properties.linuxProfile', 'type': 'ContainerServiceLinuxProfile'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'ContainerServiceDiagnosticsProfile'},
}
def __init__(self, location, master_profile, agent_pool_profiles, linux_profile, tags=None, orchestrator_profile=None, custom_profile=None, service_principal_profile=None, windows_profile=None, diagnostics_profile=None):
super(ContainerService, self).__init__(location=location, tags=tags)
self.provisioning_state = None
self.orchestrator_profile = orchestrator_profile
self.custom_profile = custom_profile
self.service_principal_profile = service_principal_profile
self.master_profile = master_profile
self.agent_pool_profiles = agent_pool_profiles
self.windows_profile = windows_profile
self.linux_profile = linux_profile
self.diagnostics_profile = diagnostics_profile
| mit |
ssharpjr/taskbuster-boilerplate | taskbuster/apps/taskmanager/models.py | 1 | 2262 | # -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.core.validators import RegexValidator
from . import managers
class Profile(models.Model):
# Relations
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
related_name="profile",
verbose_name=_("user")
)
# Attributes - Mandatory
interaction = models.PositiveIntegerField(
default=0,
verbose_name=_("interaction")
)
# Attributes - Optional
# Object Manager
objects = managers.ProfileManager()
# Custom Properties
@property
def username(self):
return self.user.username
# Methods
# Meta and String
class Meta:
verbose_name = _("Profile")
verbose_name_plural = _("Profiles")
ordering = ("user",)
def __str__(self):
return self.user.username
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_profile_for_new_user(sender, created, instance, **kwargs):
if created:
profile = Profile(user=instance)
profile.save()
class Project(models.Model):
# Relations
user = models.ForeignKey(
Profile,
related_name="projects",
verbose_name=_("user")
)
# Attributes - Mandatory
name = models.CharField(
max_length=100,
verbose_name=_("name"),
help_text=_("Enter the project name")
)
color = models.CharField(
max_length=7,
default="#fff",
validators=[RegexValidator(
"(^#[0-9a-fA-F]{3}$)|(^#[0-9a-fA-F]{6}$)")],
verbose_name=_("color"),
help_text=_("Enter the hex color code, like #ccc or #cccccc")
)
# Attributes - Optional
# Object Manager
objects = managers.ProjectManager()
# Custom Properties
# Methods
# Meta and String
class Meta:
verbose_name = _("Project")
verbose_name_plural = _("Projects")
ordering = ("user", "name")
unique_together = ("user", "name")
def __str__(self):
return "%s - %s" % (self.user, self.name)
| mit |
pothosware/gnuradio | gr-filter/python/filter/optfir.py | 45 | 12761 | #
# Copyright 2004,2005,2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Routines for designing optimal FIR filters.
For a great intro to how all this stuff works, see section 6.6 of
"Digital Signal Processing: A Practical Approach", Emmanuael C. Ifeachor
and Barrie W. Jervis, Adison-Wesley, 1993. ISBN 0-201-54413-X.
'''
import math, cmath
import filter_swig as filter
# ----------------------------------------------------------------
def low_pass (gain, Fs, freq1, freq2, passband_ripple_db, stopband_atten_db,
nextra_taps=2):
"""
Builds a low pass filter.
Args:
gain: Filter gain in the passband (linear)
Fs: Sampling rate (sps)
freq1: End of pass band (in Hz)
freq2: Start of stop band (in Hz)
passband_ripple_db: Pass band ripple in dB (should be small, < 1)
stopband_atten_db: Stop band attenuation in dB (should be large, >= 60)
nextra_taps: Extra taps to use in the filter (default=2)
"""
passband_dev = passband_ripple_to_dev (passband_ripple_db)
stopband_dev = stopband_atten_to_dev (stopband_atten_db)
desired_ampls = (gain, 0)
(n, fo, ao, w) = remezord ([freq1, freq2], desired_ampls,
[passband_dev, stopband_dev], Fs)
# The remezord typically under-estimates the filter order, so add 2 taps by default
taps = filter.pm_remez (n + nextra_taps, fo, ao, w, "bandpass")
return taps
def band_pass (gain, Fs, freq_sb1, freq_pb1, freq_pb2, freq_sb2,
passband_ripple_db, stopband_atten_db,
nextra_taps=2):
"""
Builds a band pass filter.
Args:
gain: Filter gain in the passband (linear)
Fs: Sampling rate (sps)
freq_sb1: End of stop band (in Hz)
freq_pb1: Start of pass band (in Hz)
freq_pb2: End of pass band (in Hz)
freq_sb2: Start of stop band (in Hz)
passband_ripple_db: Pass band ripple in dB (should be small, < 1)
stopband_atten_db: Stop band attenuation in dB (should be large, >= 60)
nextra_taps: Extra taps to use in the filter (default=2)
"""
passband_dev = passband_ripple_to_dev (passband_ripple_db)
stopband_dev = stopband_atten_to_dev (stopband_atten_db)
desired_ampls = (0, gain, 0)
desired_freqs = [freq_sb1, freq_pb1, freq_pb2, freq_sb2]
desired_ripple = [stopband_dev, passband_dev, stopband_dev]
(n, fo, ao, w) = remezord (desired_freqs, desired_ampls,
desired_ripple, Fs)
# The remezord typically under-estimates the filter order, so add 2 taps by default
taps = filter.pm_remez (n + nextra_taps, fo, ao, w, "bandpass")
return taps
def complex_band_pass (gain, Fs, freq_sb1, freq_pb1, freq_pb2, freq_sb2,
passband_ripple_db, stopband_atten_db,
nextra_taps=2):
"""
Builds a band pass filter with complex taps by making an LPF and
spinning it up to the right center frequency
Args:
gain: Filter gain in the passband (linear)
Fs: Sampling rate (sps)
freq_sb1: End of stop band (in Hz)
freq_pb1: Start of pass band (in Hz)
freq_pb2: End of pass band (in Hz)
freq_sb2: Start of stop band (in Hz)
passband_ripple_db: Pass band ripple in dB (should be small, < 1)
stopband_atten_db: Stop band attenuation in dB (should be large, >= 60)
nextra_taps: Extra taps to use in the filter (default=2)
"""
center_freq = (freq_pb2 + freq_pb1) / 2.0
lp_pb = (freq_pb2 - center_freq)/1.0
lp_sb = freq_sb2 - center_freq
lptaps = low_pass(gain, Fs, lp_pb, lp_sb, passband_ripple_db,
stopband_atten_db, nextra_taps)
spinner = [cmath.exp(2j*cmath.pi*center_freq/Fs*i) for i in xrange(len(lptaps))]
taps = [s*t for s,t in zip(spinner, lptaps)]
return taps
def band_reject (gain, Fs, freq_pb1, freq_sb1, freq_sb2, freq_pb2,
passband_ripple_db, stopband_atten_db,
nextra_taps=2):
"""
Builds a band reject filter
spinning it up to the right center frequency
Args:
gain: Filter gain in the passband (linear)
Fs: Sampling rate (sps)
freq_pb1: End of pass band (in Hz)
freq_sb1: Start of stop band (in Hz)
freq_sb2: End of stop band (in Hz)
freq_pb2: Start of pass band (in Hz)
passband_ripple_db: Pass band ripple in dB (should be small, < 1)
stopband_atten_db: Stop band attenuation in dB (should be large, >= 60)
nextra_taps: Extra taps to use in the filter (default=2)
"""
passband_dev = passband_ripple_to_dev (passband_ripple_db)
stopband_dev = stopband_atten_to_dev (stopband_atten_db)
desired_ampls = (gain, 0, gain)
desired_freqs = [freq_pb1, freq_sb1, freq_sb2, freq_pb2]
desired_ripple = [passband_dev, stopband_dev, passband_dev]
(n, fo, ao, w) = remezord (desired_freqs, desired_ampls,
desired_ripple, Fs)
# Make sure we use an odd number of taps
if((n+nextra_taps)%2 == 1):
n += 1
# The remezord typically under-estimates the filter order, so add 2 taps by default
taps = filter.pm_remez (n + nextra_taps, fo, ao, w, "bandpass")
return taps
def high_pass (gain, Fs, freq1, freq2, passband_ripple_db, stopband_atten_db,
nextra_taps=2):
"""
Builds a high pass filter.
Args:
gain: Filter gain in the passband (linear)
Fs: Sampling rate (sps)
freq1: End of stop band (in Hz)
freq2: Start of pass band (in Hz)
passband_ripple_db: Pass band ripple in dB (should be small, < 1)
stopband_atten_db: Stop band attenuation in dB (should be large, >= 60)
nextra_taps: Extra taps to use in the filter (default=2)
"""
passband_dev = passband_ripple_to_dev (passband_ripple_db)
stopband_dev = stopband_atten_to_dev (stopband_atten_db)
desired_ampls = (0, 1)
(n, fo, ao, w) = remezord ([freq1, freq2], desired_ampls,
[stopband_dev, passband_dev], Fs)
# For a HPF, we need to use an odd number of taps
# In filter.remez, ntaps = n+1, so n must be even
if((n+nextra_taps)%2 == 1):
n += 1
# The remezord typically under-estimates the filter order, so add 2 taps by default
taps = filter.pm_remez (n + nextra_taps, fo, ao, w, "bandpass")
return taps
# ----------------------------------------------------------------
def stopband_atten_to_dev (atten_db):
"""Convert a stopband attenuation in dB to an absolute value"""
return 10**(-atten_db/20)
def passband_ripple_to_dev (ripple_db):
"""Convert passband ripple spec expressed in dB to an absolute value"""
return (10**(ripple_db/20)-1)/(10**(ripple_db/20)+1)
# ----------------------------------------------------------------
def remezord (fcuts, mags, devs, fsamp = 2):
'''
FIR order estimator (lowpass, highpass, bandpass, mulitiband).
(n, fo, ao, w) = remezord (f, a, dev)
(n, fo, ao, w) = remezord (f, a, dev, fs)
(n, fo, ao, w) = remezord (f, a, dev) finds the approximate order,
normalized frequency band edges, frequency band amplitudes, and
weights that meet input specifications f, a, and dev, to use with
the remez command.
* f is a sequence of frequency band edges (between 0 and Fs/2, where
Fs is the sampling frequency), and a is a sequence specifying the
desired amplitude on the bands defined by f. The length of f is
twice the length of a, minus 2. The desired function is
piecewise constant.
* dev is a sequence the same size as a that specifies the maximum
allowable deviation or ripples between the frequency response
and the desired amplitude of the output filter, for each band.
Use remez with the resulting order n, frequency sequence fo,
amplitude response sequence ao, and weights w to design the filter b
which approximately meets the specifications given by remezord
input parameters f, a, and dev:
b = remez (n, fo, ao, w)
(n, fo, ao, w) = remezord (f, a, dev, Fs) specifies a sampling frequency Fs.
Fs defaults to 2 Hz, implying a Nyquist frequency of 1 Hz. You can
therefore specify band edges scaled to a particular applications
sampling frequency.
In some cases remezord underestimates the order n. If the filter
does not meet the specifications, try a higher order such as n+1
or n+2.
'''
# get local copies
fcuts = fcuts[:]
mags = mags[:]
devs = devs[:]
for i in range (len (fcuts)):
fcuts[i] = float (fcuts[i]) / fsamp
nf = len (fcuts)
nm = len (mags)
nd = len (devs)
nbands = nm
if nm != nd:
raise ValueError, "Length of mags and devs must be equal"
if nf != 2 * (nbands - 1):
raise ValueError, "Length of f must be 2 * len (mags) - 2"
for i in range (len (mags)):
if mags[i] != 0: # if not stopband, get relative deviation
devs[i] = devs[i] / mags[i]
# separate the passband and stopband edges
f1 = fcuts[0::2]
f2 = fcuts[1::2]
n = 0
min_delta = 2
for i in range (len (f1)):
if f2[i] - f1[i] < min_delta:
n = i
min_delta = f2[i] - f1[i]
if nbands == 2:
# lowpass or highpass case (use formula)
l = lporder (f1[n], f2[n], devs[0], devs[1])
else:
# bandpass or multipass case
# try different lowpasses and take the worst one that
# goes through the BP specs
l = 0
for i in range (1, nbands-1):
l1 = lporder (f1[i-1], f2[i-1], devs[i], devs[i-1])
l2 = lporder (f1[i], f2[i], devs[i], devs[i+1])
l = max (l, l1, l2)
n = int (math.ceil (l)) - 1 # need order, not length for remez
# cook up remez compatible result
ff = [0] + fcuts + [1]
for i in range (1, len (ff) - 1):
ff[i] *= 2
aa = []
for a in mags:
aa = aa + [a, a]
max_dev = max (devs)
wts = [1] * len(devs)
for i in range (len (wts)):
wts[i] = max_dev / devs[i]
return (n, ff, aa, wts)
# ----------------------------------------------------------------
def lporder (freq1, freq2, delta_p, delta_s):
'''
FIR lowpass filter length estimator. freq1 and freq2 are
normalized to the sampling frequency. delta_p is the passband
deviation (ripple), delta_s is the stopband deviation (ripple).
Note, this works for high pass filters too (freq1 > freq2), but
doesnt work well if the transition is near f == 0 or f == fs/2
From Herrmann et al (1973), Practical design rules for optimum
finite impulse response filters. Bell System Technical J., 52, 769-99
'''
df = abs (freq2 - freq1)
ddp = math.log10 (delta_p)
dds = math.log10 (delta_s)
a1 = 5.309e-3
a2 = 7.114e-2
a3 = -4.761e-1
a4 = -2.66e-3
a5 = -5.941e-1
a6 = -4.278e-1
b1 = 11.01217
b2 = 0.5124401
t1 = a1 * ddp * ddp
t2 = a2 * ddp
t3 = a4 * ddp * ddp
t4 = a5 * ddp
dinf=((t1 + t2 + a3) * dds) + (t3 + t4 + a6)
ff = b1 + b2 * (ddp - dds)
n = dinf / df - ff * df + 1
return n
def bporder (freq1, freq2, delta_p, delta_s):
'''
FIR bandpass filter length estimator. freq1 and freq2 are
normalized to the sampling frequency. delta_p is the passband
deviation (ripple), delta_s is the stopband deviation (ripple).
From Mintzer and Liu (1979)
'''
df = abs (freq2 - freq1)
ddp = math.log10 (delta_p)
dds = math.log10 (delta_s)
a1 = 0.01201
a2 = 0.09664
a3 = -0.51325
a4 = 0.00203
a5 = -0.57054
a6 = -0.44314
t1 = a1 * ddp * ddp
t2 = a2 * ddp
t3 = a4 * ddp * ddp
t4 = a5 * ddp
cinf = dds * (t1 + t2 + a3) + t3 + t4 + a6
ginf = -14.6 * math.log10 (delta_p / delta_s) - 16.9
n = cinf / df + ginf * df + 1
return n
| gpl-3.0 |
eerorika/ansible | lib/ansible/utils/module_docs_fragments/nxos.py | 6 | 3368 | #
# (c) 2015, Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device. This value applies to either I(cli) or I(nxapi). The port
value will default to the appropriate transport common port if
none is provided in the task. (cli=22, http=80, https=443).
required: false
default: 0 (use common port)
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. The value of I(username) is used to authenticate
either the CLI login or the nxapi authentication depending on which
transport is used. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
required: false
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This is a common argument used for either I(cli)
or I(nxapi) transports. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
required: false
default: null
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This argument is only used for the I(cli)
transport. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
required: false
transport:
description:
- Configures the transport connection to use when connecting to the
remote device. The transport argument supports connectivity to the
device over cli (ssh) or nxapi.
required: true
default: cli
use_ssl:
description:
- Configures the I(transport) to use SSL if set to true only when the
C(transport=nxapi), otherwise this value is ignored.
required: false
default: no
choices: ['yes', 'no']
provider:
description:
- Convenience method that allows all I(nxos) arguments to be passed as
a dict object. All constraints (required, choices, etc) must be
met either by individual arguments or values in this dict.
required: false
default: null
"""
| gpl-3.0 |
unindented/streamcode | client/static/jsrepl/extern/python/reloop-closured/lib/python2.7/email/mime/application.py | 414 | 1256 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Keith Dart
# Contact: email-sig@python.org
"""Class representing application/* type MIME documents."""
__all__ = ["MIMEApplication"]
from email import encoders
from email.mime.nonmultipart import MIMENonMultipart
class MIMEApplication(MIMENonMultipart):
"""Class for generating application/* MIME documents."""
def __init__(self, _data, _subtype='octet-stream',
_encoder=encoders.encode_base64, **_params):
"""Create an application/* type MIME document.
_data is a string containing the raw application data.
_subtype is the MIME content type subtype, defaulting to
'octet-stream'.
_encoder is a function which will perform the actual encoding for
transport of the application data, defaulting to base64 encoding.
Any additional keyword arguments are passed to the base class
constructor, which turns them into parameters on the Content-Type
header.
"""
if _subtype is None:
raise TypeError('Invalid application MIME subtype')
MIMENonMultipart.__init__(self, 'application', _subtype, **_params)
self.set_payload(_data)
_encoder(self)
| mit |
zhanghenry/stocks | tests/introspection/models.py | 103 | 1079 | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField()
facebook_user_id = models.BigIntegerField(null=True)
raw_data = models.BinaryField(null=True)
small_int = models.SmallIntegerField()
class Meta:
unique_together = ('first_name', 'last_name')
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
body = models.TextField(default='')
reporter = models.ForeignKey(Reporter)
response_to = models.ForeignKey('self', null=True)
def __str__(self):
return self.headline
class Meta:
ordering = ('headline',)
index_together = [
["headline", "pub_date"],
]
| bsd-3-clause |
darionyaphet/flink | flink-python/pyflink/table/sources.py | 17 | 4602 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.java_gateway import get_gateway
from pyflink.table.types import _to_java_type
__all__ = ['TableSource', 'CsvTableSource']
class TableSource(object):
"""
Defines a table from an external system or location.
"""
def __init__(self, j_table_source):
self._j_table_source = j_table_source
class CsvTableSource(TableSource):
"""
A :class:`TableSource` for simple CSV files with a
(logically) unlimited number of fields.
Example:
::
>>> CsvTableSource("/csv/file/path", ["a", "b"], [DataTypes.INT(), DataTypes.STRING()])
:param source_path: The path to the CSV file.
:type source_path: str
:param field_names: The names of the table fields.
:type field_names: collections.Iterable[str]
:param field_types: The types of the table fields.
:type field_types: collections.Iterable[str]
:param field_delim: The field delimiter, "," by default.
:type field_delim: str, optional
:param line_delim: The row delimiter, "\\n" by default.
:type line_delim: str, optional
:param quote_character: An optional quote character for String values, null by default.
:type quote_character: str, optional
:param ignore_first_line: Flag to ignore the first line, false by default.
:type ignore_first_line: bool, optional
:param ignore_comments: An optional prefix to indicate comments, null by default.
:type ignore_comments: str, optional
:param lenient: Flag to skip records with parse error instead to fail, false by default.
:type lenient: bool, optional
:param empty_column_as_null: Treat empty column as null, false by default.
:type empty_column_as_null: bool, optional
"""
def __init__(
self,
source_path,
field_names,
field_types,
field_delim=None,
line_delim=None,
quote_character=None,
ignore_first_line=None,
ignore_comments=None,
lenient=None,
empty_column_as_null=None,
):
gateway = get_gateway()
builder = gateway.jvm.CsvTableSource.builder()
builder.path(source_path)
for (field_name, field_type) in zip(field_names, field_types):
builder.field(field_name, _to_java_type(field_type))
if field_delim is not None:
builder.fieldDelimiter(field_delim)
if line_delim is not None:
builder.lineDelimiter(line_delim)
if quote_character is not None:
# Java API has a Character type for this field. At time of writing,
# Py4J will convert the Python str to Java Character by taking only
# the first character. This results in either:
# - Silently truncating a Python str with more than one character
# with no further type error from either Py4J or Java
# CsvTableSource
# - java.lang.StringIndexOutOfBoundsException from Py4J for an
# empty Python str. That error can be made more friendly here.
if len(quote_character) != 1:
raise ValueError(
"Expected a single CSV quote character but got '{}'".format(quote_character)
)
builder.quoteCharacter(quote_character)
if ignore_first_line:
builder.ignoreFirstLine()
if ignore_comments is not None:
builder.commentPrefix(ignore_comments)
if lenient:
builder.ignoreParseErrors()
if empty_column_as_null:
builder.emptyColumnAsNull()
super(CsvTableSource, self).__init__(builder.build())
| apache-2.0 |
jeffstaley/cyflash | cyflash/cyacd_test.py | 1 | 1628 | from cStringIO import StringIO
import unittest
import cyacd
class BootloaderRowTest(unittest.TestCase):
def testParseRow(self):
rowdata = ":000018008000100020110C0000E92D0000E92D000008B5024B83F3088802F0E8F800100020F8B572B6002406236343704D0134EE187279707831793778B3781202F67800020A4338431904084337063843002103F09FF8032CE7D1291C12316548802203F08EF80023191C634AFF25141C143418593C32061CAE434F00C4B2351CD219002CB8"
blrow = cyacd.BootloaderRow.read(rowdata)
self.assertEquals(blrow.array_id, 0)
self.assertEquals(blrow.row_number, 0x18)
self.assertEquals(len(blrow.data), 0x80)
self.assertEquals(blrow.data.encode('hex').upper(), rowdata[11:-2])
def testParseFile(self):
filedata = """04A611931101
:000018008000100020110C0000E92D0000E92D000008B5024B83F3088802F0E8F800100020F8B572B6002406236343704D0134EE187279707831793778B3781202F67800020A4338431904084337063843002103F09FF8032CE7D1291C12316548802203F08EF80023191C634AFF25141C143418593C32061CAE434F00C4B2351CD219002CB8
:000019008007D0167857787619013C3770E4B20232F5E7C0B204330918282BE4D1564A574B574C584D584F59491A6099262C604F20574A584B584D3E600F240860574F58491A6003262C608720564B574C3E603C220860564DD82756491A6038012260554A2E60554B0860554C554D56481660C0270E2655491E6002222C6093260760534BB3"""
bldata = cyacd.BootloaderData.read(StringIO(filedata))
self.assertEquals(bldata.silicon_id, 0x04A61193)
self.assertEquals(bldata.silicon_rev, 0x11)
self.assertEquals(bldata.checksum_type, 0x01)
self.assertEquals(len(bldata.rows), 2)
self.assertTrue(all(isinstance(row, cyacd.BootloaderRow) for row in bldata.rows))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
ArcherCraftStore/ArcherVMPeridot | Python/Lib/encodings/shift_jis_2004.py | 816 | 1059 | #
# shift_jis_2004.py: Python Unicode Codec for SHIFT_JIS_2004
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jis_2004')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jis_2004',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
ashmastaflash/gwdetect | dependencies/netaddr-0.7.10/build/lib/netaddr/tests/__init__.py | 9 | 2319 | #!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2012, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
"""Runs all netaddr unit tests."""
from os.path import abspath, basename, dirname, join as pathjoin
import sys
import glob
import doctest
import unittest
sys.path.insert(0, abspath(pathjoin(dirname(__file__), '..', '..')))
#-----------------------------------------------------------------------------
def test_suite_all():
test_dirs = [
'ip',
'eui',
'strategy',
'core'
]
base_path = abspath(pathjoin(dirname(__file__), '..'))
# Select tests based on the version of the Python interpreter.
py_ver_dir = '2.x'
if sys.version_info[0] == 3:
py_ver_dir = '3.x'
# Gather list of files containing tests.
test_files = []
for entry in test_dirs:
test_path = pathjoin(base_path, "tests", py_ver_dir, entry, "*.txt")
files = glob.glob(test_path)
test_files.extend(files)
sys.stdout.write('testdir: %s\n' % '\n'.join(test_files))
# Add anything to the skiplist that we want to leave out.
skiplist = []
# Drop platform specific tests for other platforms.
platform_tests = ['platform_darwin.txt', 'platform_linux2.txt', 'platform_win32.txt']
for platform_test in platform_tests:
if not sys.platform in platform_test:
skiplist.append(platform_test)
# Exclude any entries from the skip list.
test_files = [t for t in test_files if basename(t) not in skiplist]
# Build and return a complete unittest test suite.
suite = unittest.TestSuite()
for test_file in test_files:
doctest_suite = doctest.DocFileSuite(test_file,
optionflags=doctest.ELLIPSIS, module_relative=False)
suite.addTest(doctest_suite)
return suite
#-----------------------------------------------------------------------------
def run():
runner = unittest.TextTestRunner()
runner.run(test_suite_all())
#-----------------------------------------------------------------------------
if __name__ == "__main__":
run()
| mit |
chouseknecht/openshift-restclient-python | openshift/test/test_v1_service_account_restriction.py | 1 | 4339 | # coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'metav1.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import openshift.client
from kubernetes.client.rest import ApiException
from openshift.client.models.v1_service_account_restriction import V1ServiceAccountRestriction
class TestV1ServiceAccountRestriction(unittest.TestCase):
""" V1ServiceAccountRestriction unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ServiceAccountRestriction(self):
"""
Test V1ServiceAccountRestriction
"""
# FIXME: construct object with mandatory attributes with example values
#model = openshift.client.models.v1_service_account_restriction.V1ServiceAccountRestriction()
pass
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
mjfarmer/scada_py | env/lib/python2.7/site-packages/pip/__init__.py | 19 | 12070 | #!/usr/bin/env python
import os
import optparse
import sys
import re
import errno
# Upstream pip vendorizes a bunch of its dependencies. Debian de-vendorizes
# (unbundles) these dependencies to be compliant with Debian policy. Instead,
# these dependencies are packaged as wheel (.whl) files in a known location.
# When pip itself executes, we have to arrange for these wheels to show up
# earlier on sys.path than any other version of these packages, otherwise
# things can break. See for example Bug #744145.
#
# The location of the wheels differs depending on whether we're inside or
# outside a virtual environment, regardless of whether that venv was created
# with virtualenv or pyvenv. The first thing we have to do is figure out if
# we're inside or outside a venv, then search the appropriate wheel directory
# and add all the .whls found there to the front of sys.path. As per Debian
# Python Policy, only the wheels needed to support this de-vendorization will
# be present, so it's safe to add them all.
#
# venv determination is a bit of a black art, but this algorithm should work
# in both Python 2 (virtualenv-only) and Python 3 (pyvenv and virtualenv). -
# updated by barry@debian.org 2015-02-25
base_prefix = getattr(sys, 'base_prefix', None)
real_prefix = getattr(sys, 'real_prefix', None)
if base_prefix is None:
# Python 2 has no base_prefix at all. It also has no pyvenv. Fall back
# to checking real_prefix.
if real_prefix is None:
# We are not in a venv.
in_venv = False
else:
# We're in a Python 2 virtualenv created venv, but real_prefix should
# never be the same as sys.prefix.
assert sys.prefix != real_prefix
in_venv = True
elif sys.prefix != base_prefix:
# We're in a Python 3, pyvenv created venv.
in_venv = True
elif real_prefix is None:
# We're in Python 3, outside a venv, but base better equal prefix.
assert sys.prefix == base_prefix
in_venv = False
else:
# We're in a Python 3, virtualenv created venv.
assert real_prefix != sys.prefix
in_venv = True
if in_venv:
wheel_dir = os.path.join(sys.prefix, 'lib', 'python-wheels')
else:
wheel_dir = '/usr/share/python-wheels'
# We'll add all the wheels we find to the front of sys.path so that they're
# found first, even if the same dependencies are available in site-packages.
try:
for filename in os.listdir(wheel_dir):
if os.path.splitext(filename)[1] == '.whl':
sys.path.insert(0, os.path.join(wheel_dir, filename))
# FileNotFoundError doesn't exist in Python 2, but ignore it anyway.
except OSError as error:
if error.errno != errno.ENOENT:
raise
from pip.exceptions import InstallationError, CommandError, PipError
from pip.log import logger
from pip.util import get_installed_distributions, get_prog
from pip.vcs import git, mercurial, subversion, bazaar # noqa
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.commands import commands, get_summaries, get_similar_commands
# This fixes a peculiarity when importing via __import__ - as we are
# initialising the pip module, "from pip import cmdoptions" is recursive
# and appears not to work properly in that situation.
import pip.cmdoptions
cmdoptions = pip.cmdoptions
# The version as used in the setup.py and the docs conf.py
__version__ = "1.5.6"
def autocomplete():
"""Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash or zsh).
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for uninstall command
if subcommand_name == 'uninstall' and not current.startswith('-'):
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands[subcommand_name]()
options += [(opt.get_opt_string(), opt.nargs)
for opt in subcommand.parser.option_list_all
if opt.help != optparse.SUPPRESS_HELP]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
if current.startswith('-') or current.startswith('--'):
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
subcommands += [i.get_opt_string() for i in opts
if i.help != optparse.SUPPRESS_HELP]
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def create_main_parser():
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser.version = 'pip %s from %s (python %s)' % (
__version__, pip_pkg_dir, sys.version[:3])
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
parser.main = True # so the help formatter knows
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parseopts(args):
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this call
# is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0].lower()
#all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(args_else[0].lower())
if cmd_name not in commands:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
return cmd_name, cmd_args
def main(initial_args=None):
if initial_args is None:
initial_args = sys.argv[1:]
autocomplete()
try:
cmd_name, cmd_args = parseopts(initial_args)
except PipError:
e = sys.exc_info()[1]
sys.stderr.write("ERROR: %s" % e)
sys.stderr.write(os.linesep)
sys.exit(1)
command = commands[cmd_name]()
return command.main(cmd_args)
def bootstrap():
"""
Bootstrapping function to be called from install-pip.py script.
"""
pkgs = ['pip']
try:
import setuptools
except ImportError:
pkgs.append('setuptools')
return main(['install', '--upgrade'] + pkgs + sys.argv[1:])
############################################################
## Writing freeze files
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links, find_tags=False):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip.vcs import vcs, get_src_requirement
if vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location, find_tags)
except InstallationError:
ex = sys.exc_info()[1]
logger.warn("Error when trying to get requirement for VCS system %s, falling back to uneditable format" % ex)
req = None
if req is None:
logger.warn('Could not determine repository location of %s' % location)
comments.append('## !! Could not determine repository location')
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] == '=='
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend(
).get_location(dist, dependency_links)
if not svn_location:
logger.warn(
'Warning: cannot find svn location for %s' % req)
comments.append('## FIXME: could not find svn URL in dependency_links for this package:')
else:
comments.append('# Installing as editable to satisfy requirement %s:' % req)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (svn_location, rev, cls.egg_name(dist))
return cls(dist.project_name, req, editable, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
if __name__ == '__main__':
exit = main()
if exit:
sys.exit(exit)
| gpl-3.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Tools/pybench/With.py | 43 | 4137 | from __future__ import with_statement
from pybench import Test
class WithFinally(Test):
version = 2.0
operations = 20
rounds = 80000
class ContextManager(object):
def __enter__(self):
pass
def __exit__(self, exc, val, tb):
pass
def test(self):
cm = self.ContextManager()
for i in xrange(self.rounds):
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
def calibrate(self):
cm = self.ContextManager()
for i in xrange(self.rounds):
pass
class TryFinally(Test):
version = 2.0
operations = 20
rounds = 80000
class ContextManager(object):
def __enter__(self):
pass
def __exit__(self):
# "Context manager" objects used just for their cleanup
# actions in finally blocks usually don't have parameters.
pass
def test(self):
cm = self.ContextManager()
for i in xrange(self.rounds):
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
def calibrate(self):
cm = self.ContextManager()
for i in xrange(self.rounds):
pass
class WithRaiseExcept(Test):
version = 2.0
operations = 2 + 3 + 3
rounds = 100000
class BlockExceptions(object):
def __enter__(self):
pass
def __exit__(self, exc, val, tb):
return True
def test(self):
error = ValueError
be = self.BlockExceptions()
for i in xrange(self.rounds):
with be: raise error
with be: raise error
with be: raise error,"something"
with be: raise error,"something"
with be: raise error,"something"
with be: raise error("something")
with be: raise error("something")
with be: raise error("something")
def calibrate(self):
error = ValueError
be = self.BlockExceptions()
for i in xrange(self.rounds):
pass
| mit |
CitoEngine/cito_engine | app/cito_engine/actions/json_formatter.py | 1 | 1266 | """Copyright 2014 Cyrus Dasadia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import simplejson
def create_json_parameters(event_action, incident, message=None):
plugin_parameters = event_action.pluginParameters
plugin_parameters = re.sub('"__EVENTID__"', simplejson.dumps(unicode(incident.event.id)), plugin_parameters)
plugin_parameters = re.sub('"__INCIDENTID__"', simplejson.dumps(unicode(incident.id)), plugin_parameters)
plugin_parameters = re.sub('"__ELEMENT__"', simplejson.dumps(unicode(incident.element)), plugin_parameters)
plugin_parameters = re.sub('"__MESSAGE__"', simplejson.dumps(unicode(message)), plugin_parameters)
return '{"plugin": %s, "parameters": %s}' % (simplejson.dumps(unicode(event_action.plugin.name)), plugin_parameters) | apache-2.0 |
ymcagodme/Norwalk-Judo | django/contrib/gis/gdal/feature.py | 321 | 3998 | # The GDAL C library, OGR exception, and the Field object
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException, OGRIndexError
from django.contrib.gis.gdal.field import Field
from django.contrib.gis.gdal.geometries import OGRGeometry, OGRGeomType
from django.contrib.gis.gdal.srs import SpatialReference
# ctypes function prototypes
from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_F_* routines are relevant here.
class Feature(GDALBase):
"A class that wraps an OGR Feature, needs to be instantiated from a Layer object."
#### Python 'magic' routines ####
def __init__(self, feat, fdefn):
"Initializes on the pointers for the feature and the layer definition."
if not feat or not fdefn:
raise OGRException('Cannot create OGR Feature, invalid pointer given.')
self.ptr = feat
self._fdefn = fdefn
def __del__(self):
"Releases a reference to this object."
if self._ptr: capi.destroy_feature(self._ptr)
def __getitem__(self, index):
"""
Gets the Field object at the specified index, which may be either
an integer or the Field's string label. Note that the Field object
is not the field's _value_ -- use the `get` method instead to
retrieve the value (e.g. an integer) instead of a Field instance.
"""
if isinstance(index, basestring):
i = self.index(index)
else:
if index < 0 or index > self.num_fields:
raise OGRIndexError('index out of range')
i = index
return Field(self.ptr, i)
def __iter__(self):
"Iterates over each field in the Feature."
for i in xrange(self.num_fields):
yield self[i]
def __len__(self):
"Returns the count of fields in this feature."
return self.num_fields
def __str__(self):
"The string name of the feature."
return 'Feature FID %d in Layer<%s>' % (self.fid, self.layer_name)
def __eq__(self, other):
"Does equivalence testing on the features."
return bool(capi.feature_equal(self.ptr, other._ptr))
#### Feature Properties ####
@property
def fid(self):
"Returns the feature identifier."
return capi.get_fid(self.ptr)
@property
def layer_name(self):
"Returns the name of the layer for the feature."
return capi.get_feat_name(self._fdefn)
@property
def num_fields(self):
"Returns the number of fields in the Feature."
return capi.get_feat_field_count(self.ptr)
@property
def fields(self):
"Returns a list of fields in the Feature."
return [capi.get_field_name(capi.get_field_defn(self._fdefn, i))
for i in xrange(self.num_fields)]
@property
def geom(self):
"Returns the OGR Geometry for this Feature."
# Retrieving the geometry pointer for the feature.
geom_ptr = capi.get_feat_geom_ref(self.ptr)
return OGRGeometry(geom_api.clone_geom(geom_ptr))
@property
def geom_type(self):
"Returns the OGR Geometry Type for this Feture."
return OGRGeomType(capi.get_fd_geom_type(self._fdefn))
#### Feature Methods ####
def get(self, field):
"""
Returns the value of the field, instead of an instance of the Field
object. May take a string of the field name or a Field object as
parameters.
"""
field_name = getattr(field, 'name', field)
return self[field_name].value
def index(self, field_name):
"Returns the index of the given field name."
i = capi.get_field_index(self.ptr, field_name)
if i < 0: raise OGRIndexError('invalid OFT field name given: "%s"' % field_name)
return i
| bsd-3-clause |
mdibaiee/servo | tests/wpt/css-tests/tools/html5lib/html5lib/filters/lint.py | 979 | 4306 | from __future__ import absolute_import, division, unicode_literals
from gettext import gettext
_ = gettext
from . import _base
from ..constants import cdataElements, rcdataElements, voidElements
from ..constants import spaceCharacters
spaceCharacters = "".join(spaceCharacters)
class LintError(Exception):
pass
class Filter(_base.Filter):
def __iter__(self):
open_elements = []
contentModelFlag = "PCDATA"
for token in _base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("StartTag not in PCDATA content model flag: %(tag)s") % {"tag": name})
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
if not name:
raise LintError(_("Empty tag name"))
if type == "StartTag" and name in voidElements:
raise LintError(_("Void element reported as StartTag token: %(tag)s") % {"tag": name})
elif type == "EmptyTag" and name not in voidElements:
raise LintError(_("Non-void element reported as EmptyTag token: %(tag)s") % {"tag": token["name"]})
if type == "StartTag":
open_elements.append(name)
for name, value in token["data"]:
if not isinstance(name, str):
raise LintError(_("Attribute name is not a string: %(name)r") % {"name": name})
if not name:
raise LintError(_("Empty attribute name"))
if not isinstance(value, str):
raise LintError(_("Attribute value is not a string: %(value)r") % {"value": value})
if name in cdataElements:
contentModelFlag = "CDATA"
elif name in rcdataElements:
contentModelFlag = "RCDATA"
elif name == "plaintext":
contentModelFlag = "PLAINTEXT"
elif type == "EndTag":
name = token["name"]
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
if not name:
raise LintError(_("Empty tag name"))
if name in voidElements:
raise LintError(_("Void element reported as EndTag token: %(tag)s") % {"tag": name})
start_name = open_elements.pop()
if start_name != name:
raise LintError(_("EndTag (%(end)s) does not match StartTag (%(start)s)") % {"end": name, "start": start_name})
contentModelFlag = "PCDATA"
elif type == "Comment":
if contentModelFlag != "PCDATA":
raise LintError(_("Comment not in PCDATA content model flag"))
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
if not isinstance(data, str):
raise LintError(_("Attribute name is not a string: %(name)r") % {"name": data})
if not data:
raise LintError(_("%(type)s token with empty data") % {"type": type})
if type == "SpaceCharacters":
data = data.strip(spaceCharacters)
if data:
raise LintError(_("Non-space character(s) found in SpaceCharacters token: %(token)r") % {"token": data})
elif type == "Doctype":
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("Doctype not in PCDATA content model flag: %(name)s") % {"name": name})
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
# XXX: what to do with token["data"] ?
elif type in ("ParseError", "SerializeError"):
pass
else:
raise LintError(_("Unknown token type: %(type)s") % {"type": type})
yield token
| mpl-2.0 |
quantumlib/Cirq | dev_tools/profiling/benchmark_serializers.py | 1 | 4296 | # Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for benchmarking serialization of large circuits.
This tool was originally introduced to enable comparison of the two JSON
serialization protocols (gzip and non-gzip):
https://github.com/quantumlib/Cirq/pull/3662
This is part of the "efficient serialization" effort:
https://github.com/quantumlib/Cirq/issues/3438
Run this benchmark with the following command (make sure to install cirq-dev):
python3 dev_tools/profiling/benchmark_serializers.py \
--num_gates=<int> --nesting_depth=<int> --num_repetitions=<int>
WARNING: runtime increases exponentially with nesting_depth. Values much
higher than nesting_depth=10 are not recommended.
"""
import argparse
import sys
import timeit
import numpy as np
import cirq
_JSON_GZIP = 'json_gzip'
_JSON = 'json'
NUM_QUBITS = 8
SUFFIXES = ['B', 'kB', 'MB', 'GB', 'TB']
def serialize(serializer: str, num_gates: int, nesting_depth: int) -> int:
""""Runs a round-trip of the serializer."""
circuit = cirq.Circuit()
for _ in range(num_gates):
which = np.random.choice(['expz', 'expw', 'exp11'])
if which == 'expw':
q1 = cirq.GridQubit(0, np.random.randint(NUM_QUBITS))
circuit.append(
cirq.PhasedXPowGate(
phase_exponent=np.random.random(), exponent=np.random.random()
).on(q1)
)
elif which == 'expz':
q1 = cirq.GridQubit(0, np.random.randint(NUM_QUBITS))
circuit.append(cirq.Z(q1) ** np.random.random())
elif which == 'exp11':
q1 = cirq.GridQubit(0, np.random.randint(NUM_QUBITS - 1))
q2 = cirq.GridQubit(0, q1.col + 1)
circuit.append(cirq.CZ(q1, q2) ** np.random.random())
cs = [circuit]
for _ in range(1, nesting_depth):
fc = cs[-1].freeze()
cs.append(cirq.Circuit(fc.to_op(), fc.to_op()))
test_circuit = cs[-1]
if serializer == _JSON:
json_data = cirq.to_json(test_circuit)
assert json_data is not None
data_size = len(json_data)
cirq.read_json(json_text=json_data)
elif serializer == _JSON_GZIP:
gzip_data = cirq.to_json_gzip(test_circuit)
assert gzip_data is not None
data_size = len(gzip_data)
cirq.read_json_gzip(gzip_raw=gzip_data)
return data_size
def main(
num_gates: int,
nesting_depth: int,
num_repetitions: int,
setup: str = 'from __main__ import serialize',
):
for serializer in [_JSON_GZIP, _JSON]:
print()
print(f'Using serializer "{serializer}":')
command = f'serialize(\'{serializer}\', {num_gates}, {nesting_depth})'
time = timeit.timeit(command, setup, number=num_repetitions)
print(f'Round-trip serializer time: {time / num_repetitions}s')
data_size = float(serialize(serializer, num_gates, nesting_depth))
suffix_idx = 0
while data_size > 1000:
data_size /= 1024
suffix_idx += 1
print(f'Serialized data size: {data_size} {SUFFIXES[suffix_idx]}.')
def parse_arguments(args):
parser = argparse.ArgumentParser('Benchmark a serializer.')
parser.add_argument(
'--num_gates', default=100, type=int, help='Number of gates at the bottom nesting layer.'
)
parser.add_argument(
'--nesting_depth',
default=1,
type=int,
help='Depth of nested subcircuits. Total gate count will be 2^nesting_depth * num_gates.',
)
parser.add_argument(
'--num_repetitions', default=10, type=int, help='Number of times to repeat serialization.'
)
return vars(parser.parse_args(args))
if __name__ == '__main__':
main(**parse_arguments(sys.argv[1:]))
| apache-2.0 |
onitake/ansible | test/units/modules/network/netscaler/test_netscaler_gslb_site.py | 68 | 24193 |
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from units.compat.mock import patch, Mock, MagicMock, call
from units.modules.utils import set_module_args
from .netscaler_module import TestModule, nitro_base_patcher
import sys
if sys.version_info[:2] != (2, 6):
import requests
class TestNetscalerGSLBSiteModule(TestModule):
@classmethod
def setUpClass(cls):
class MockException(Exception):
pass
cls.MockException = MockException
m = MagicMock()
nssrc_modules_mock = {
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbsite': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbsite.gslbsite': m,
}
cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock)
cls.nitro_base_patcher = nitro_base_patcher
@classmethod
def tearDownClass(cls):
cls.nitro_base_patcher.stop()
cls.nitro_specific_patcher.stop()
def setUp(self):
super(TestNetscalerGSLBSiteModule, self).setUp()
self.nitro_base_patcher.start()
self.nitro_specific_patcher.start()
# Setup minimal required arguments to pass AnsibleModule argument parsing
def tearDown(self):
super(TestNetscalerGSLBSiteModule, self).tearDown()
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
def test_graceful_nitro_api_import_error(self):
# Stop nitro api patching to cause ImportError
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
from ansible.modules.network.netscaler import netscaler_gslb_site
self.module = netscaler_gslb_site
result = self.failed()
self.assertEqual(result['msg'], 'Could not load nitro python sdk')
def test_graceful_nitro_error_on_login(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
client_mock = Mock()
client_mock.login = Mock(side_effect=MockException)
m = Mock(return_value=client_mock)
with patch('ansible.modules.network.netscaler.netscaler_gslb_site.get_nitro_client', m):
with patch('ansible.modules.network.netscaler.netscaler_gslb_site.nitro_exception', MockException):
self.module = netscaler_gslb_site
result = self.failed()
self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly')
def test_graceful_no_connection_error(self):
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.ConnectionError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully')
def test_graceful_login_error(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.SSLError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully')
def test_ensure_feature_is_enabled_called(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
gslb_site_proxy_mock = Mock()
ensure_feature_is_enabled_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=Mock(return_value=client_mock),
gslb_site_exists=Mock(side_effect=[False, True]),
gslb_site_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=ensure_feature_is_enabled_mock,
ConfigProxy=Mock(return_value=gslb_site_proxy_mock),
):
self.module = netscaler_gslb_site
self.exited()
ensure_feature_is_enabled_mock.assert_called_with(client_mock, 'GSLB')
def test_save_config_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_site_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
gslb_site_exists=Mock(side_effect=[False, True]),
gslb_site_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=Mock(return_value=gslb_site_proxy_mock),
):
self.module = netscaler_gslb_site
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_site_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
gslb_site_exists=Mock(side_effect=[True, False]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=Mock(return_value=gslb_site_proxy_mock),
):
self.module = netscaler_gslb_site
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_site_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
gslb_site_exists=Mock(side_effect=[False, True]),
gslb_site_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=Mock(return_value=gslb_site_proxy_mock),
):
self.module = netscaler_gslb_site
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_site_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
gslb_site_exists=Mock(side_effect=[True, False]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=Mock(return_value=gslb_site_proxy_mock),
):
self.module = netscaler_gslb_site
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_new_gslb_site_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
gslb_site_exists=Mock(side_effect=[False, True]),
gslb_site_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
self.exited()
gslb_site_proxy_mock.assert_has_calls([call.add()])
def test_modified_gslb_site_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[True, True]),
gslb_site_identical=Mock(side_effect=[False, True]),
ensure_feature_is_enabled=Mock(),
nitro_exception=self.MockException,
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
self.exited()
gslb_site_proxy_mock.assert_has_calls([call.update()])
def test_absent_gslb_site_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[True, False]),
gslb_site_identical=Mock(side_effect=[False, True]),
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
self.exited()
gslb_site_proxy_mock.assert_has_calls([call.delete()])
def test_present_gslb_site_identical_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[True, True]),
gslb_site_identical=Mock(side_effect=[True, True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
self.exited()
gslb_site_proxy_mock.assert_not_called()
def test_absent_gslb_site_noop_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[False, False]),
gslb_site_identical=Mock(side_effect=[False, False]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
self.exited()
gslb_site_proxy_mock.assert_not_called()
def test_present_gslb_site_failed_update(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[True, True]),
gslb_site_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertEqual(result['msg'], 'GSLB site differs from configured')
self.assertTrue(result['failed'])
def test_present_gslb_site_failed_create(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[False, False]),
gslb_site_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertEqual(result['msg'], 'GSLB site does not exist')
self.assertTrue(result['failed'])
def test_present_gslb_site_update_immutable_attribute(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=['domain']),
gslb_site_exists=Mock(side_effect=[True, True]),
gslb_site_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertEqual(result['msg'], 'Cannot update immutable attributes [\'domain\']')
self.assertTrue(result['failed'])
def test_absent_gslb_site_failed_delete(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[True, True]),
gslb_site_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertEqual(result['msg'], 'GSLB site still exists')
self.assertTrue(result['failed'])
def test_graceful_nitro_exception_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
gslb_site_exists=m,
ensure_feature_is_enabled=Mock(),
nitro_exception=MockException
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
)
def test_graceful_nitro_exception_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
gslb_site_exists=m,
ensure_feature_is_enabled=Mock(),
nitro_exception=MockException
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
)
| gpl-3.0 |
LarsMq73/Greenfield | src/client/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/generator/eclipse.py | 437 | 11894 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GYP backend that generates Eclipse CDT settings files.
This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
files that can be imported into an Eclipse CDT project. The XML file contains a
list of include paths and symbols (i.e. defines).
Because a full .cproject definition is not created by this generator, it's not
possible to properly define the include dirs and symbols for each file
individually. Instead, one set of includes/symbols is generated for the entire
project. This works fairly well (and is a vast improvement in general), but may
still result in a few indexer issues here and there.
This generator has no automated tests, so expect it to be broken.
"""
from xml.sax.saxutils import escape
import os.path
import subprocess
import gyp
import gyp.common
import gyp.msvs_emulation
import shlex
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
# part of the path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# Copy additional generator configuration data from VS, which is shared
# by the Eclipse generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
include_dir = ''
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir and not include_dir in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if config.has_key('include_dirs'):
include_dirs = config['include_dirs']
for shared_intermediate_dir in shared_intermediate_dirs:
for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
if not include_dir in gyp_includes_set:
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, target_dicts, data):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return value
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, data, config_name, params):
"""Calculate the defines for a project.
Returns:
A dict that includes explict defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = target['configurations'][config_name]
target_defines = config['defines']
else:
target_defines = []
for define in target_defines + extra_defines:
split_define = define.split('=', 1)
if len(split_define) == 1:
split_define.append('1')
if split_define[0].strip() in all_defines:
# Already defined
continue
all_defines[split_define[0].strip()] = split_define[1].strip()
# Get default compiler defines (if possible).
if flavor == 'win':
return all_defines # Default defines already processed in the loop above.
cc_target = GetCompilerPath(target_list, target_dicts, data)
if cc_target:
command = shlex.split(cc_target)
command.extend(['-E', '-dM', '-'])
cpp_proc = subprocess.Popen(args=command, cwd='.',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cpp_output = cpp_proc.communicate()[0]
cpp_lines = cpp_output.split('\n')
for cpp_line in cpp_lines:
if not cpp_line.strip():
continue
cpp_line_parts = cpp_line.split(' ', 2)
key = cpp_line_parts[1]
if len(cpp_line_parts) >= 3:
val = cpp_line_parts[2]
else:
val = '1'
all_defines[key] = val
return all_defines
def WriteIncludePaths(out, eclipse_langs, include_dirs):
"""Write the includes section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.IncludePaths">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for include_dir in include_dirs:
out.write(' <includepath workspace_path="false">%s</includepath>\n' %
include_dir)
out.write(' </language>\n')
out.write(' </section>\n')
def WriteMacros(out, eclipse_langs, defines):
"""Write the macros section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.Macros">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for key in sorted(defines.iterkeys()):
out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
(escape(key), escape(defines[key])))
out.write(' </language>\n')
out.write(' </section>\n')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
config_name)
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
# Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the
# SHARED_INTERMEDIATE_DIR. Include both possible locations.
shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'),
os.path.join(toplevel_build, 'gen')]
out_name = os.path.join(toplevel_build, 'eclipse-cdt-settings.xml')
gyp.common.EnsureDirExists(out_name)
out = open(out_name, 'w')
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
out.write('<cdtprojectproperties>\n')
eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
'GNU C++', 'GNU C', 'Assembly']
include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name,
params)
WriteIncludePaths(out, eclipse_langs, include_dirs)
defines = GetAllDefines(target_list, target_dicts, data, config_name, params)
WriteMacros(out, eclipse_langs, defines)
out.write('</cdtprojectproperties>\n')
out.close()
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate an XML settings file that can be imported into a CDT project."""
if params['options'].generator_output:
raise NotImplementedError, "--generator_output not implemented for eclipse"
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| mit |
tungvx/deploy | .google_appengine/lib/django_0_96/django/db/backends/mysql_old/base.py | 32 | 7830 | """
MySQL database backend for Django.
Requires MySQLdb: http://sourceforge.net/projects/mysql-python
"""
from django.db.backends import util
try:
import MySQLdb as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured, "Error loading MySQLdb module: %s" % e
from MySQLdb.converters import conversions
from MySQLdb.constants import FIELD_TYPE
import types
import re
DatabaseError = Database.DatabaseError
django_conversions = conversions.copy()
django_conversions.update({
types.BooleanType: util.rev_typecast_boolean,
FIELD_TYPE.DATETIME: util.typecast_timestamp,
FIELD_TYPE.DATE: util.typecast_date,
FIELD_TYPE.TIME: util.typecast_time,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# This is an extra debug layer over MySQL queries, to display warnings.
# It's only used when DEBUG=True.
class MysqlDebugWrapper:
def __init__(self, cursor):
self.cursor = cursor
def execute(self, sql, params=()):
try:
return self.cursor.execute(sql, params)
except Database.Warning, w:
self.cursor.execute("SHOW WARNINGS")
raise Database.Warning, "%s: %s" % (w, self.cursor.fetchall())
def executemany(self, sql, param_list):
try:
return self.cursor.executemany(sql, param_list)
except Database.Warning, w:
self.cursor.execute("SHOW WARNINGS")
raise Database.Warning, "%s: %s" % (w, self.cursor.fetchall())
def __getattr__(self, attr):
if self.__dict__.has_key(attr):
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
try:
# Only exists in Python 2.4+
from threading import local
except ImportError:
# Import copy of _thread_local.py from Python 2.4
from django.utils._threading_local import local
class DatabaseWrapper(local):
def __init__(self, **kwargs):
self.connection = None
self.queries = []
self.server_version = None
self.options = kwargs
def _valid_connection(self):
if self.connection is not None:
try:
self.connection.ping()
return True
except DatabaseError:
self.connection.close()
self.connection = None
return False
def cursor(self):
from django.conf import settings
if not self._valid_connection():
kwargs = {
'user': settings.DATABASE_USER,
'db': settings.DATABASE_NAME,
'passwd': settings.DATABASE_PASSWORD,
'conv': django_conversions,
}
if settings.DATABASE_HOST.startswith('/'):
kwargs['unix_socket'] = settings.DATABASE_HOST
else:
kwargs['host'] = settings.DATABASE_HOST
if settings.DATABASE_PORT:
kwargs['port'] = int(settings.DATABASE_PORT)
kwargs.update(self.options)
self.connection = Database.connect(**kwargs)
cursor = self.connection.cursor()
if self.connection.get_server_info() >= '4.1':
cursor.execute("SET NAMES 'utf8'")
else:
cursor = self.connection.cursor()
if settings.DEBUG:
return util.CursorDebugWrapper(MysqlDebugWrapper(cursor), self)
return cursor
def _commit(self):
if self.connection is not None:
self.connection.commit()
def _rollback(self):
if self.connection is not None:
try:
self.connection.rollback()
except Database.NotSupportedError:
pass
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
def get_server_version(self):
if not self.server_version:
if not self._valid_connection():
self.cursor()
m = server_version_re.match(self.connection.get_server_info())
if not m:
raise Exception('Unable to determine MySQL version from version string %r' % self.connection.get_server_info())
self.server_version = tuple([int(x) for x in m.groups()])
return self.server_version
supports_constraints = True
def quote_name(name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
dictfetchone = util.dictfetchone
dictfetchmany = util.dictfetchmany
dictfetchall = util.dictfetchall
def get_last_insert_id(cursor, table_name, pk_name):
return cursor.lastrowid
def get_date_extract_sql(lookup_type, table_name):
# lookup_type is 'year', 'month', 'day'
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), table_name)
def get_date_trunc_sql(lookup_type, field_name):
# lookup_type is 'year', 'month', 'day'
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def get_limit_offset_sql(limit, offset=None):
sql = "LIMIT "
if offset and offset != 0:
sql += "%s," % offset
return sql + str(limit)
def get_random_function_sql():
return "RAND()"
def get_deferrable_sql():
return ""
def get_fulltext_search_sql(field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def get_drop_foreignkey_sql():
return "DROP FOREIGN KEY"
def get_pk_default_value():
return "DEFAULT"
def get_sql_flush(style, tables, sequences):
"""Return a list of SQL statements required to remove all data from
all tables in the database (without actually removing the tables
themselves) and put the database in an empty 'initial' state
"""
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;'] + \
['%s %s;' % \
(style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(quote_name(table))
) for table in tables] + \
['SET FOREIGN_KEY_CHECKS = 1;']
# 'ALTER TABLE table AUTO_INCREMENT = 1;'... style SQL statements
# to reset sequence indices
sql.extend(["%s %s %s %s %s;" % \
(style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_TABLE(quote_name(sequence['table'])),
style.SQL_KEYWORD('AUTO_INCREMENT'),
style.SQL_FIELD('= 1'),
) for sequence in sequences])
return sql
else:
return []
OPERATOR_MAPPING = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
| apache-2.0 |
Chetox/RCode | Cannon_Avanzado/client.py | 1 | 2002 | #!/usr/bin/python
# -*- coding:utf-8; tab-width:4; mode:python -*-
import sys
import Ice
Ice.loadSlice('-I {} cannon.ice'.format(Ice.getSliceDir()))
import Cannon
import time
from matrix_utils import matrix_multiply
def load_matrix_from_file(filename):
with file(filename) as f:
rows = f.readlines()
order = len(rows[0].split())
retval = Cannon.Matrix(order, [])
for row in rows:
rowdata = row.split()
assert len(rowdata) == order
for n in rowdata:
retval.data.append(float(n))
assert len(retval.data) == order ** 2
return retval
class Client(Ice.Application):
def run(self, argv):
t_dist = 0;
t_secu = 0;
loader = self.string_to_proxy(argv[1], Cannon.OperationsPrx)
example = argv[2]
A = load_matrix_from_file('m/{}A'.format(example))
B = load_matrix_from_file('m/{}B'.format(example))
t_dist = time.time()
C = loader.matrixMultiply(A, B)
t_dist = time.time() - t_dist
t_secu = time.time()
c = matrix_multiply(A,B)
t_secu = time.time() - t_secu
expected = load_matrix_from_file('m/{}C'.format(example))
retval = (C == expected)
print("OK" if retval else "FAIL")
print("El tiempo que ha tardado en distribuido ha sido {}".format(t_dist))
print("El tiempo que ha tardado en secuencial ha sido {}".format(t_secu))
if(C == None): print("Timeout expired")
return not retval
def string_to_proxy(self, str_proxy, iface):
proxy = self.communicator().stringToProxy(str_proxy)
retval = iface.checkedCast(proxy)
if not retval:
raise RuntimeError('Invalid proxy %s' % str_proxy)
return retval
def print_matrix(self, M):
ncols = M.ncols
nrows = len(M.data) / ncols
for r in range(nrows):
print M.data[r * ncols:(r + 1) * ncols]
if __name__ == '__main__':
sys.exit(Client().main(sys.argv))
| apache-2.0 |
Dhivyap/ansible | test/units/modules/network/fortios/test_fortios_wireless_controller_wtp_profile.py | 20 | 17499 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_wireless_controller_wtp_profile
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_wireless_controller_wtp_profile.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_wireless_controller_wtp_profile_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_wtp_profile': {
'allowaccess': 'telnet',
'ap_country': 'NA',
'ble_profile': 'test_value_5',
'comment': 'Comment.',
'control_message_offload': 'ebp-frame',
'dtls_in_kernel': 'enable',
'dtls_policy': 'clear-text',
'energy_efficient_ethernet': 'enable',
'ext_info_enable': 'enable',
'handoff_roaming': 'enable',
'handoff_rssi': '13',
'handoff_sta_thresh': '14',
'ip_fragment_preventing': 'tcp-mss-adjust',
'led_state': 'enable',
'lldp': 'enable',
'login_passwd': 'test_value_18',
'login_passwd_change': 'yes',
'max_clients': '20',
'name': 'default_name_21',
'poe_mode': 'auto',
'split_tunneling_acl_local_ap_subnet': 'enable',
'split_tunneling_acl_path': 'tunnel',
'tun_mtu_downlink': '25',
'tun_mtu_uplink': '26',
'wan_port_mode': 'wan-lan'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_wtp_profile.fortios_wireless_controller(input_data, fos_instance)
expected_data = {
'allowaccess': 'telnet',
'ap-country': 'NA',
'ble-profile': 'test_value_5',
'comment': 'Comment.',
'control-message-offload': 'ebp-frame',
'dtls-in-kernel': 'enable',
'dtls-policy': 'clear-text',
'energy-efficient-ethernet': 'enable',
'ext-info-enable': 'enable',
'handoff-roaming': 'enable',
'handoff-rssi': '13',
'handoff-sta-thresh': '14',
'ip-fragment-preventing': 'tcp-mss-adjust',
'led-state': 'enable',
'lldp': 'enable',
'login-passwd': 'test_value_18',
'login-passwd-change': 'yes',
'max-clients': '20',
'name': 'default_name_21',
'poe-mode': 'auto',
'split-tunneling-acl-local-ap-subnet': 'enable',
'split-tunneling-acl-path': 'tunnel',
'tun-mtu-downlink': '25',
'tun-mtu-uplink': '26',
'wan-port-mode': 'wan-lan'
}
set_method_mock.assert_called_with('wireless-controller', 'wtp-profile', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_wireless_controller_wtp_profile_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_wtp_profile': {
'allowaccess': 'telnet',
'ap_country': 'NA',
'ble_profile': 'test_value_5',
'comment': 'Comment.',
'control_message_offload': 'ebp-frame',
'dtls_in_kernel': 'enable',
'dtls_policy': 'clear-text',
'energy_efficient_ethernet': 'enable',
'ext_info_enable': 'enable',
'handoff_roaming': 'enable',
'handoff_rssi': '13',
'handoff_sta_thresh': '14',
'ip_fragment_preventing': 'tcp-mss-adjust',
'led_state': 'enable',
'lldp': 'enable',
'login_passwd': 'test_value_18',
'login_passwd_change': 'yes',
'max_clients': '20',
'name': 'default_name_21',
'poe_mode': 'auto',
'split_tunneling_acl_local_ap_subnet': 'enable',
'split_tunneling_acl_path': 'tunnel',
'tun_mtu_downlink': '25',
'tun_mtu_uplink': '26',
'wan_port_mode': 'wan-lan'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_wtp_profile.fortios_wireless_controller(input_data, fos_instance)
expected_data = {
'allowaccess': 'telnet',
'ap-country': 'NA',
'ble-profile': 'test_value_5',
'comment': 'Comment.',
'control-message-offload': 'ebp-frame',
'dtls-in-kernel': 'enable',
'dtls-policy': 'clear-text',
'energy-efficient-ethernet': 'enable',
'ext-info-enable': 'enable',
'handoff-roaming': 'enable',
'handoff-rssi': '13',
'handoff-sta-thresh': '14',
'ip-fragment-preventing': 'tcp-mss-adjust',
'led-state': 'enable',
'lldp': 'enable',
'login-passwd': 'test_value_18',
'login-passwd-change': 'yes',
'max-clients': '20',
'name': 'default_name_21',
'poe-mode': 'auto',
'split-tunneling-acl-local-ap-subnet': 'enable',
'split-tunneling-acl-path': 'tunnel',
'tun-mtu-downlink': '25',
'tun-mtu-uplink': '26',
'wan-port-mode': 'wan-lan'
}
set_method_mock.assert_called_with('wireless-controller', 'wtp-profile', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_wireless_controller_wtp_profile_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'wireless_controller_wtp_profile': {
'allowaccess': 'telnet',
'ap_country': 'NA',
'ble_profile': 'test_value_5',
'comment': 'Comment.',
'control_message_offload': 'ebp-frame',
'dtls_in_kernel': 'enable',
'dtls_policy': 'clear-text',
'energy_efficient_ethernet': 'enable',
'ext_info_enable': 'enable',
'handoff_roaming': 'enable',
'handoff_rssi': '13',
'handoff_sta_thresh': '14',
'ip_fragment_preventing': 'tcp-mss-adjust',
'led_state': 'enable',
'lldp': 'enable',
'login_passwd': 'test_value_18',
'login_passwd_change': 'yes',
'max_clients': '20',
'name': 'default_name_21',
'poe_mode': 'auto',
'split_tunneling_acl_local_ap_subnet': 'enable',
'split_tunneling_acl_path': 'tunnel',
'tun_mtu_downlink': '25',
'tun_mtu_uplink': '26',
'wan_port_mode': 'wan-lan'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_wtp_profile.fortios_wireless_controller(input_data, fos_instance)
delete_method_mock.assert_called_with('wireless-controller', 'wtp-profile', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_wireless_controller_wtp_profile_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'wireless_controller_wtp_profile': {
'allowaccess': 'telnet',
'ap_country': 'NA',
'ble_profile': 'test_value_5',
'comment': 'Comment.',
'control_message_offload': 'ebp-frame',
'dtls_in_kernel': 'enable',
'dtls_policy': 'clear-text',
'energy_efficient_ethernet': 'enable',
'ext_info_enable': 'enable',
'handoff_roaming': 'enable',
'handoff_rssi': '13',
'handoff_sta_thresh': '14',
'ip_fragment_preventing': 'tcp-mss-adjust',
'led_state': 'enable',
'lldp': 'enable',
'login_passwd': 'test_value_18',
'login_passwd_change': 'yes',
'max_clients': '20',
'name': 'default_name_21',
'poe_mode': 'auto',
'split_tunneling_acl_local_ap_subnet': 'enable',
'split_tunneling_acl_path': 'tunnel',
'tun_mtu_downlink': '25',
'tun_mtu_uplink': '26',
'wan_port_mode': 'wan-lan'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_wtp_profile.fortios_wireless_controller(input_data, fos_instance)
delete_method_mock.assert_called_with('wireless-controller', 'wtp-profile', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_wireless_controller_wtp_profile_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_wtp_profile': {
'allowaccess': 'telnet',
'ap_country': 'NA',
'ble_profile': 'test_value_5',
'comment': 'Comment.',
'control_message_offload': 'ebp-frame',
'dtls_in_kernel': 'enable',
'dtls_policy': 'clear-text',
'energy_efficient_ethernet': 'enable',
'ext_info_enable': 'enable',
'handoff_roaming': 'enable',
'handoff_rssi': '13',
'handoff_sta_thresh': '14',
'ip_fragment_preventing': 'tcp-mss-adjust',
'led_state': 'enable',
'lldp': 'enable',
'login_passwd': 'test_value_18',
'login_passwd_change': 'yes',
'max_clients': '20',
'name': 'default_name_21',
'poe_mode': 'auto',
'split_tunneling_acl_local_ap_subnet': 'enable',
'split_tunneling_acl_path': 'tunnel',
'tun_mtu_downlink': '25',
'tun_mtu_uplink': '26',
'wan_port_mode': 'wan-lan'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_wtp_profile.fortios_wireless_controller(input_data, fos_instance)
expected_data = {
'allowaccess': 'telnet',
'ap-country': 'NA',
'ble-profile': 'test_value_5',
'comment': 'Comment.',
'control-message-offload': 'ebp-frame',
'dtls-in-kernel': 'enable',
'dtls-policy': 'clear-text',
'energy-efficient-ethernet': 'enable',
'ext-info-enable': 'enable',
'handoff-roaming': 'enable',
'handoff-rssi': '13',
'handoff-sta-thresh': '14',
'ip-fragment-preventing': 'tcp-mss-adjust',
'led-state': 'enable',
'lldp': 'enable',
'login-passwd': 'test_value_18',
'login-passwd-change': 'yes',
'max-clients': '20',
'name': 'default_name_21',
'poe-mode': 'auto',
'split-tunneling-acl-local-ap-subnet': 'enable',
'split-tunneling-acl-path': 'tunnel',
'tun-mtu-downlink': '25',
'tun-mtu-uplink': '26',
'wan-port-mode': 'wan-lan'
}
set_method_mock.assert_called_with('wireless-controller', 'wtp-profile', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_wireless_controller_wtp_profile_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'wireless_controller_wtp_profile': {
'random_attribute_not_valid': 'tag',
'allowaccess': 'telnet',
'ap_country': 'NA',
'ble_profile': 'test_value_5',
'comment': 'Comment.',
'control_message_offload': 'ebp-frame',
'dtls_in_kernel': 'enable',
'dtls_policy': 'clear-text',
'energy_efficient_ethernet': 'enable',
'ext_info_enable': 'enable',
'handoff_roaming': 'enable',
'handoff_rssi': '13',
'handoff_sta_thresh': '14',
'ip_fragment_preventing': 'tcp-mss-adjust',
'led_state': 'enable',
'lldp': 'enable',
'login_passwd': 'test_value_18',
'login_passwd_change': 'yes',
'max_clients': '20',
'name': 'default_name_21',
'poe_mode': 'auto',
'split_tunneling_acl_local_ap_subnet': 'enable',
'split_tunneling_acl_path': 'tunnel',
'tun_mtu_downlink': '25',
'tun_mtu_uplink': '26',
'wan_port_mode': 'wan-lan'
},
'vdom': 'root'}
is_error, changed, response = fortios_wireless_controller_wtp_profile.fortios_wireless_controller(input_data, fos_instance)
expected_data = {
'allowaccess': 'telnet',
'ap-country': 'NA',
'ble-profile': 'test_value_5',
'comment': 'Comment.',
'control-message-offload': 'ebp-frame',
'dtls-in-kernel': 'enable',
'dtls-policy': 'clear-text',
'energy-efficient-ethernet': 'enable',
'ext-info-enable': 'enable',
'handoff-roaming': 'enable',
'handoff-rssi': '13',
'handoff-sta-thresh': '14',
'ip-fragment-preventing': 'tcp-mss-adjust',
'led-state': 'enable',
'lldp': 'enable',
'login-passwd': 'test_value_18',
'login-passwd-change': 'yes',
'max-clients': '20',
'name': 'default_name_21',
'poe-mode': 'auto',
'split-tunneling-acl-local-ap-subnet': 'enable',
'split-tunneling-acl-path': 'tunnel',
'tun-mtu-downlink': '25',
'tun-mtu-uplink': '26',
'wan-port-mode': 'wan-lan'
}
set_method_mock.assert_called_with('wireless-controller', 'wtp-profile', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
aselle/tensorflow | tensorflow/contrib/kinesis/__init__.py | 31 | 1069 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Kinesis Dataset.
@@KinesisDataset
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.kinesis.python.ops.kinesis_dataset_ops import KinesisDataset
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"KinesisDataset",
]
remove_undocumented(__name__)
| apache-2.0 |
doronkatz/firefox-ios | scripts/clean-xliff.py | 41 | 2284 | #! /usr/bin/env python
#
# clean-xliff.py <l10n_folder>
#
# Remove targets from a locale, remove target-language attribute
#
from glob import glob
from lxml import etree
import argparse
import os
NS = {'x':'urn:oasis:names:tc:xliff:document:1.2'}
def indent(elem, level=0):
# Prettify XML output
# http://effbot.org/zone/element-lib.htm#prettyprint
i = '\n' + level*' '
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + ' '
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def main():
xliff_filename = 'firefox-ios.xliff'
parser = argparse.ArgumentParser()
parser.add_argument('l10n_folder', help='Path to locale folder to clean up')
args = parser.parse_args()
file_path = os.path.join(
os.path.realpath(args.l10n_folder),
xliff_filename
)
print 'Updating %s' % file_path
# Read localized file XML
locale_tree = etree.parse(file_path)
locale_root = locale_tree.getroot()
# Remove existing localizations and target-language
for trans_node in locale_root.xpath('//x:trans-unit', namespaces=NS):
for child in trans_node.xpath('./x:target', namespaces=NS):
child.getparent().remove(child)
# Remove target-language where defined
for file_node in locale_root.xpath('//x:file', namespaces=NS):
if file_node.get('target-language'):
file_node.attrib.pop('target-language')
# Replace the existing locale file with the new XML content
with open(file_path, 'w') as fp:
# Fix indentations
indent(locale_root)
xliff_content = etree.tostring(
locale_tree,
encoding='UTF-8',
xml_declaration=True,
pretty_print=True
)
fp.write(xliff_content)
if __name__ == '__main__':
main()
| mpl-2.0 |
crazy-canux/django | django/contrib/messages/storage/cookie.py | 471 | 6545 | import json
from django.conf import settings
from django.contrib.messages.storage.base import BaseStorage, Message
from django.http import SimpleCookie
from django.utils import six
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.safestring import SafeData, mark_safe
class MessageEncoder(json.JSONEncoder):
"""
Compactly serializes instances of the ``Message`` class as JSON.
"""
message_key = '__json_message'
def default(self, obj):
if isinstance(obj, Message):
# Using 0/1 here instead of False/True to produce more compact json
is_safedata = 1 if isinstance(obj.message, SafeData) else 0
message = [self.message_key, is_safedata, obj.level, obj.message]
if obj.extra_tags:
message.append(obj.extra_tags)
return message
return super(MessageEncoder, self).default(obj)
class MessageDecoder(json.JSONDecoder):
"""
Decodes JSON that includes serialized ``Message`` instances.
"""
def process_messages(self, obj):
if isinstance(obj, list) and obj:
if obj[0] == MessageEncoder.message_key:
if len(obj) == 3:
# Compatibility with previously-encoded messages
return Message(*obj[1:])
if obj[1]:
obj[3] = mark_safe(obj[3])
return Message(*obj[2:])
return [self.process_messages(item) for item in obj]
if isinstance(obj, dict):
return {key: self.process_messages(value)
for key, value in six.iteritems(obj)}
return obj
def decode(self, s, **kwargs):
decoded = super(MessageDecoder, self).decode(s, **kwargs)
return self.process_messages(decoded)
class CookieStorage(BaseStorage):
"""
Stores messages in a cookie.
"""
cookie_name = 'messages'
# uwsgi's default configuration enforces a maximum size of 4kb for all the
# HTTP headers. In order to leave some room for other cookies and headers,
# restrict the session cookie to 1/2 of 4kb. See #18781.
max_cookie_size = 2048
not_finished = '__messagesnotfinished__'
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the messages cookie. If the
not_finished sentinel value is found at the end of the message list,
remove it and return a result indicating that not all messages were
retrieved by this storage.
"""
data = self.request.COOKIES.get(self.cookie_name)
messages = self._decode(data)
all_retrieved = not (messages and messages[-1] == self.not_finished)
if messages and not all_retrieved:
# remove the sentinel value
messages.pop()
return messages, all_retrieved
def _update_cookie(self, encoded_data, response):
"""
Either sets the cookie with the encoded data if there is any data to
store, or deletes the cookie.
"""
if encoded_data:
response.set_cookie(self.cookie_name, encoded_data,
domain=settings.SESSION_COOKIE_DOMAIN,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
else:
response.delete_cookie(self.cookie_name,
domain=settings.SESSION_COOKIE_DOMAIN)
def _store(self, messages, response, remove_oldest=True, *args, **kwargs):
"""
Stores the messages to a cookie, returning a list of any messages which
could not be stored.
If the encoded data is larger than ``max_cookie_size``, removes
messages until the data fits (these are the messages which are
returned), and add the not_finished sentinel value to indicate as much.
"""
unstored_messages = []
encoded_data = self._encode(messages)
if self.max_cookie_size:
# data is going to be stored eventually by SimpleCookie, which
# adds its own overhead, which we must account for.
cookie = SimpleCookie() # create outside the loop
def stored_length(val):
return len(cookie.value_encode(val)[1])
while encoded_data and stored_length(encoded_data) > self.max_cookie_size:
if remove_oldest:
unstored_messages.append(messages.pop(0))
else:
unstored_messages.insert(0, messages.pop())
encoded_data = self._encode(messages + [self.not_finished],
encode_empty=unstored_messages)
self._update_cookie(encoded_data, response)
return unstored_messages
def _hash(self, value):
"""
Creates an HMAC/SHA1 hash based on the value and the project setting's
SECRET_KEY, modified to make it unique for the present purpose.
"""
key_salt = 'django.contrib.messages'
return salted_hmac(key_salt, value).hexdigest()
def _encode(self, messages, encode_empty=False):
"""
Returns an encoded version of the messages list which can be stored as
plain text.
Since the data will be retrieved from the client-side, the encoded data
also contains a hash to ensure that the data was not tampered with.
"""
if messages or encode_empty:
encoder = MessageEncoder(separators=(',', ':'))
value = encoder.encode(messages)
return '%s$%s' % (self._hash(value), value)
def _decode(self, data):
"""
Safely decodes an encoded text stream back into a list of messages.
If the encoded text stream contained an invalid hash or was in an
invalid format, ``None`` is returned.
"""
if not data:
return None
bits = data.split('$', 1)
if len(bits) == 2:
hash, value = bits
if constant_time_compare(hash, self._hash(value)):
try:
# If we get here (and the JSON decode works), everything is
# good. In any other case, drop back and return None.
return json.loads(value, cls=MessageDecoder)
except ValueError:
pass
# Mark the data as used (so it gets removed) since something was wrong
# with the data.
self.used = True
return None
| bsd-3-clause |
yongshengwang/hue | build/env/lib/python2.7/site-packages/logilab_common-1.0.2-py2.7.egg/logilab/common/tasksqueue.py | 93 | 2987 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Prioritized tasks queue"""
__docformat__ = "restructuredtext en"
from bisect import insort_left
from six.moves import queue
LOW = 0
MEDIUM = 10
HIGH = 100
PRIORITY = {
'LOW': LOW,
'MEDIUM': MEDIUM,
'HIGH': HIGH,
}
REVERSE_PRIORITY = dict((values, key) for key, values in PRIORITY.items())
class PrioritizedTasksQueue(queue.Queue):
def _init(self, maxsize):
"""Initialize the queue representation"""
self.maxsize = maxsize
# ordered list of task, from the lowest to the highest priority
self.queue = []
def _put(self, item):
"""Put a new item in the queue"""
for i, task in enumerate(self.queue):
# equivalent task
if task == item:
# if new task has a higher priority, remove the one already
# queued so the new priority will be considered
if task < item:
item.merge(task)
del self.queue[i]
break
# else keep it so current order is kept
task.merge(item)
return
insort_left(self.queue, item)
def _get(self):
"""Get an item from the queue"""
return self.queue.pop()
def __iter__(self):
return iter(self.queue)
def remove(self, tid):
"""remove a specific task from the queue"""
# XXX acquire lock
for i, task in enumerate(self):
if task.id == tid:
self.queue.pop(i)
return
raise ValueError('not task of id %s in queue' % tid)
class Task(object):
def __init__(self, tid, priority=LOW):
# task id
self.id = tid
# task priority
self.priority = priority
def __repr__(self):
return '<Task %s @%#x>' % (self.id, id(self))
def __cmp__(self, other):
return cmp(self.priority, other.priority)
def __lt__(self, other):
return self.priority < other.priority
def __eq__(self, other):
return self.id == other.id
__hash__ = object.__hash__
def merge(self, other):
pass
| apache-2.0 |
bitcraze/crazyflie-lib-python | test/crtp/test_crtpstack.py | 1 | 2875 | # -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import unittest
from cflib.crtp.crtpstack import CRTPPacket
class CRTPPacketTest(unittest.TestCase):
def setUp(self):
self.callback_count = 0
self.sut = CRTPPacket()
def test_that_port_and_channle_is_encoded_in_header(self):
# Fixture
self.sut.set_header(2, 1)
# Test
actual = self.sut.get_header()
# Assert
expected = 0x2d
self.assertEqual(expected, actual)
def test_that_port_is_truncated_in_header(self):
# Fixture
port = 0xff
self.sut.set_header(port, 0)
# Test
actual = self.sut.get_header()
# Assert
expected = 0xfc
self.assertEqual(expected, actual)
def test_that_channel_is_truncated_in_header(self):
# Fixture
channel = 0xff
self.sut.set_header(0, channel)
# Test
actual = self.sut.get_header()
# Assert
expected = 0x0f
self.assertEqual(expected, actual)
def test_that_port_and_channel_is_encoded_in_header_when_set_separat(self):
# Fixture
self.sut.port = 2
self.sut.channel = 1
# Test
actual = self.sut.get_header()
# Assert
expected = 0x2d
self.assertEqual(expected, actual)
def test_that_default_header_is_set_when_constructed(self):
# Fixture
# Test
actual = self.sut.get_header()
# Assert
expected = 0x0c
self.assertEqual(expected, actual)
def test_that_header_is_set_when_constructed(self):
# Fixture
sut = CRTPPacket(header=0x21)
# Test
actual = sut.get_header()
# Assert
self.assertEqual(0x2d, actual)
self.assertEqual(2, sut.port)
self.assertEqual(1, sut.channel)
| gpl-2.0 |
dednal/chromium.src | tools/telemetry/telemetry/value/__init__.py | 35 | 11178 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
The Value hierarchy provides a way of representing the values measurements
produce such that they can be merged across runs, grouped by page, and output
to different targets.
The core Value concept provides the basic functionality:
- association with a page, may be none
- naming and units
- importance tracking [whether a value will show up on a waterfall or output
file by default]
- other metadata, such as a description of what was measured
- default conversion to scalar and string
- merging properties
A page may actually run a few times during a single telemetry session.
Downstream consumers of test results typically want to group these runs
together, then compute summary statistics across runs. Value provides the
Merge* family of methods for this kind of aggregation.
"""
import os
from telemetry.core import discover
from telemetry.core import util
# When combining a pair of Values togehter, it is sometimes ambiguous whether
# the values should be concatenated, or one should be picked as representative.
# The possible merging policies are listed here.
CONCATENATE = 'concatenate'
PICK_FIRST = 'pick-first'
# When converting a Value to its buildbot equivalent, the context in which the
# value is being interpreted actually affects the conversion. This is insane,
# but there you have it. There are three contexts in which Values are converted
# for use by buildbot, represented by these output-intent values.
PER_PAGE_RESULT_OUTPUT_CONTEXT = 'per-page-result-output-context'
COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT = 'merged-pages-result-output-context'
SUMMARY_RESULT_OUTPUT_CONTEXT = 'summary-result-output-context'
class Value(object):
"""An abstract value produced by a telemetry page test.
"""
def __init__(self, page, name, units, important, description):
"""A generic Value object.
Args:
page: A Page object, may be given as None to indicate that the value
represents results for multiple pages.
name: A value name string, may contain a dot. Values from the same test
with the same prefix before the dot may be considered to belong to
the same chart.
units: A units string.
important: Whether the value is "important". Causes the value to appear
by default in downstream UIs.
description: A string explaining in human-understandable terms what this
value represents.
"""
self.page = page
self.name = name
self.units = units
self.important = important
self.description = description
def IsMergableWith(self, that):
return (self.units == that.units and
type(self) == type(that) and
self.important == that.important)
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
"""Combines the provided list of values into a single compound value.
When a page runs multiple times, it may produce multiple values. This
function is given the same-named values across the multiple runs, and has
the responsibility of producing a single result.
It must return a single Value. If merging does not make sense, the
implementation must pick a representative value from one of the runs.
For instance, it may be given
[ScalarValue(page, 'a', 1), ScalarValue(page, 'a', 2)]
and it might produce
ListOfScalarValues(page, 'a', [1, 2])
"""
raise NotImplementedError()
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values,
group_by_name_suffix=False):
"""Combines the provided values into a single compound value.
When a full pageset runs, a single value_name will usually end up getting
collected for multiple pages. For instance, we may end up with
[ScalarValue(page1, 'a', 1),
ScalarValue(page2, 'a', 2)]
This function takes in the values of the same name, but across multiple
pages, and produces a single summary result value. In this instance, it
could produce a ScalarValue(None, 'a', 1.5) to indicate averaging, or even
ListOfScalarValues(None, 'a', [1, 2]) if concatenated output was desired.
Some results are so specific to a page that they make no sense when
aggregated across pages. If merging values of this type across pages is
non-sensical, this method may return None.
If group_by_name_suffix is True, then x.z and y.z are considered to be the
same value and are grouped together. If false, then x.z and y.z are
considered different.
"""
raise NotImplementedError()
def _IsImportantGivenOutputIntent(self, output_context):
if output_context == PER_PAGE_RESULT_OUTPUT_CONTEXT:
return False
elif output_context == COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT:
return self.important
elif output_context == SUMMARY_RESULT_OUTPUT_CONTEXT:
return self.important
def GetBuildbotDataType(self, output_context):
"""Returns the buildbot's equivalent data_type.
This should be one of the values accepted by perf_tests_results_helper.py.
"""
raise NotImplementedError()
def GetBuildbotValue(self):
"""Returns the buildbot's equivalent value."""
raise NotImplementedError()
def GetChartAndTraceNameForPerPageResult(self):
chart_name, _ = _ConvertValueNameToChartAndTraceName(self.name)
trace_name = self.page.display_name
return chart_name, trace_name
@property
def name_suffix(self):
"""Returns the string after a . in the name, or the full name otherwise."""
if '.' in self.name:
return self.name.split('.', 1)[1]
else:
return self.name
def GetChartAndTraceNameForComputedSummaryResult(
self, trace_tag):
chart_name, trace_name = (
_ConvertValueNameToChartAndTraceName(self.name))
if trace_tag:
return chart_name, trace_name + trace_tag
else:
return chart_name, trace_name
def GetRepresentativeNumber(self):
"""Gets a single scalar value that best-represents this value.
Returns None if not possible.
"""
raise NotImplementedError()
def GetRepresentativeString(self):
"""Gets a string value that best-represents this value.
Returns None if not possible.
"""
raise NotImplementedError()
@staticmethod
def GetJSONTypeName():
"""Gets the typename for serialization to JSON using AsDict."""
raise NotImplementedError()
def AsDict(self):
"""Pre-serializes a value to a dict for output as JSON."""
return self._AsDictImpl()
def _AsDictImpl(self):
d = {
'name': self.name,
'type': self.GetJSONTypeName(),
'units': self.units,
'important': self.important
}
if self.description:
d['description'] = self.description
if self.page:
d['page_id'] = self.page.id
return d
def AsDictWithoutBaseClassEntries(self):
full_dict = self.AsDict()
base_dict_keys = set(self._AsDictImpl().keys())
# Extracts only entries added by the subclass.
return dict([(k, v) for (k, v) in full_dict.iteritems()
if k not in base_dict_keys])
@staticmethod
def FromDict(value_dict, page_dict):
"""Produces a value from a value dict and a page dict.
Value dicts are produced by serialization to JSON, and must be accompanied
by a dict mapping page IDs to pages, also produced by serialization, in
order to be completely deserialized. If deserializing multiple values, use
ListOfValuesFromListOfDicts instead.
value_dict: a dictionary produced by AsDict() on a value subclass.
page_dict: a dictionary mapping IDs to page objects.
"""
return Value.ListOfValuesFromListOfDicts([value_dict], page_dict)[0]
@staticmethod
def ListOfValuesFromListOfDicts(value_dicts, page_dict):
"""Takes a list of value dicts to values.
Given a list of value dicts produced by AsDict, this method
deserializes the dicts given a dict mapping page IDs to pages.
This method performs memoization for deserializing a list of values
efficiently, where FromDict is meant to handle one-offs.
values: a list of value dicts produced by AsDict() on a value subclass.
page_dict: a dictionary mapping IDs to page objects.
"""
value_dir = os.path.dirname(__file__)
value_classes = discover.DiscoverClasses(
value_dir, util.GetTelemetryDir(),
Value, index_by_class_name=True)
value_json_types = dict((value_classes[x].GetJSONTypeName(), x) for x in
value_classes)
values = []
for value_dict in value_dicts:
value_class = value_classes[value_json_types[value_dict['type']]]
assert 'FromDict' in value_class.__dict__, \
'Subclass doesn\'t override FromDict'
values.append(value_class.FromDict(value_dict, page_dict))
return values
@staticmethod
def GetConstructorKwArgs(value_dict, page_dict):
"""Produces constructor arguments from a value dict and a page dict.
Takes a dict parsed from JSON and an index of pages and recovers the
keyword arguments to be passed to the constructor for deserializing the
dict.
value_dict: a dictionary produced by AsDict() on a value subclass.
page_dict: a dictionary mapping IDs to page objects.
"""
d = {
'name': value_dict['name'],
'units': value_dict['units']
}
description = value_dict.get('description', None)
if description:
d['description'] = description
else:
d['description'] = None
page_id = value_dict.get('page_id', None)
if page_id:
d['page'] = page_dict[int(page_id)]
else:
d['page'] = None
d['important'] = False
return d
def ValueNameFromTraceAndChartName(trace_name, chart_name=None):
"""Mangles a trace name plus optional chart name into a standard string.
A value might just be a bareword name, e.g. numPixels. In that case, its
chart may be None.
But, a value might also be intended for display with other values, in which
case the chart name indicates that grouping. So, you might have
screen.numPixels, screen.resolution, where chartName='screen'.
"""
assert trace_name != 'url', 'The name url cannot be used'
if chart_name:
return '%s.%s' % (chart_name, trace_name)
else:
assert '.' not in trace_name, ('Trace names cannot contain "." with an '
'empty chart_name since this is used to delimit chart_name.trace_name.')
return trace_name
def _ConvertValueNameToChartAndTraceName(value_name):
"""Converts a value_name into the equivalent chart-trace name pair.
Buildbot represents values by the measurement name and an optional trace name,
whereas telemetry represents values with a chart_name.trace_name convention,
where chart_name is optional. This convention is also used by chart_json.
This converts from the telemetry convention to the buildbot convention,
returning a 2-tuple (measurement_name, trace_name).
"""
if '.' in value_name:
return value_name.split('.', 1)
else:
return value_name, value_name
| bsd-3-clause |
csixteen/HackerRank_Python | Algorithms/magic_square.py | 1 | 1071 | class Solution(object):
MAGIC_SQUARES = [
[4, 9, 2, 3, 5, 7, 8, 1, 6],
[2, 9, 4, 7, 5, 3, 6, 1, 8],
[8, 3, 4, 1, 5, 9, 6, 7, 2],
[4, 3, 8, 9, 5, 1, 2, 7, 6],
[6, 1, 8, 7, 5, 3, 2, 9, 4],
[8, 1, 6, 3, 5, 7, 4, 9, 2],
[6, 7, 2, 1, 5, 9, 8, 3, 4],
[2, 7, 6, 9, 5, 1, 4, 3, 8]
]
def magic_square(self, s):
totals = []
for ms in self.MAGIC_SQUARES:
totals.append(sum([abs(ms_e - s_e) for ms_e, s_e in zip(ms, s)]))
return min(totals)
import unittest
class SolutionTest(unittest.TestCase):
def test_magic_square(self):
s = Solution()
self.assertEqual(0, s.magic_square([6, 1, 8, 7, 5, 3, 2, 9, 4]))
self.assertEqual(1, s.magic_square([4, 9, 2, 3, 5, 7, 8, 1, 5]))
self.assertEqual(4, s.magic_square([4, 8, 2, 4, 5, 7, 6, 1, 6]))
self.assertEqual(45, s.magic_square([0, 0, 0, 0, 0, 0, 0, 0, 0]))
self.assertEqual(36, s.magic_square([9, 9, 9, 9, 9, 9, 9, 9, 9]))
if __name__ == "__main__":
unittest.main()
| mit |
Nu3001/external_chromium_org | tools/telemetry/telemetry/page/actions/seek.py | 23 | 2205 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A Telemetry page_action that performs the "seek" action on media elements.
Action attributes are:
- seek_time: The media time to seek to. Test fails if not provided.
- selector: If no selector is defined then the action attempts to seek the first
media element on the page. If 'all' then seek all media elements.
- log_seek_time: If true the seek time is recorded, otherwise media measurement
will not be aware of the seek action. Used to perform multiple
seeks. Default true.
- wait_for_seeked: If true forces the action to wait for seeked event to fire.
Default false.
- wait_timeout: Timeout to wait for seeked event. Only valid with
wait_for_seeked=true
"""
from telemetry.page.actions.media_action import MediaAction
from telemetry.core import exceptions
from telemetry.page.actions import page_action
class SeekAction(MediaAction):
def __init__(self, attributes=None):
super(SeekAction, self).__init__(attributes)
def WillRunAction(self, page, tab):
"""Load the media metrics JS code prior to running the action."""
super(SeekAction, self).WillRunAction(page, tab)
self.LoadJS(tab, 'seek.js')
def RunAction(self, page, tab, previous_action):
try:
assert hasattr(self, 'seek_time')
selector = self.selector if hasattr(self, 'selector') else ''
log_seek = self.log_seek == True if hasattr(self, 'log_seek') else True
tab.ExecuteJavaScript('window.__seekMedia("%s", "%s", %i);' %
(selector, self.seek_time, log_seek))
timeout = self.wait_timeout if hasattr(self, 'wait_timeout') else 60
# Check if we need to wait for 'seeked' event to fire.
if hasattr(self, 'wait_for_seeked') and self.wait_for_seeked:
self.WaitForEvent(tab, selector, 'seeked', timeout)
except exceptions.EvaluateException:
raise page_action.PageActionFailed('Cannot seek media element(s) with '
'selector = %s.' % selector)
| bsd-3-clause |
alexpap/exareme | exareme-tools/madis/src/functionslocal/aggregate/approximatedmedian.py | 1 | 2110 | import inspect
import math
import random
import numpy
from fractions import Fraction
import sys
import json
from array import *
class approximatedmedian:
registered = True #Value to define db operator
def __init__(self):
self.n = 0
self.totalnums = 0
self.numberofcolumns = 5
self.colname = []
self.buckets = []
self.minvalues = []
self.maxvalues = []
self.nums = []
def step(self, *args):
try:
self.colname.append(args[0])
self.buckets.append(int(args[1]))
self.minvalues.append(float(args[2]))
self.maxvalues.append(float(args[3]))
self.nums.append(int(args[4]))
self.totalnums += int(args[4])
self.n += 1
except (ValueError, TypeError):
raise
def final(self):
# print self.nums
# print self.totalnums / 2.0
yield ('colname0', 'val', 'bucket', 'numsBeforeMedian', 'numsAfterMedian')
# yield ('attr1', 'attr2', 'val', 'reccount')
currentsum = 0
for i in xrange(0,self.n):
# print i,self.totalnums / 2.0,self.nums[i],currentsum
currentsum += self.nums[i]
if currentsum >= (self.totalnums / 2.0):
break
median = self.minvalues[i]+(currentsum-self.totalnums / 2.0) * (self.maxvalues[i]-self.minvalues[i]) / self.nums[i]
# print (self.totalnums / 2.0), currentsum, currentsum -self.nums[i]
numsBeforeMedian = (self.totalnums / 2.0) - (currentsum - self.nums[i])
numsAfterMedian = currentsum - (self.totalnums / 2.0)
yield self.colname[0], median, i, numsBeforeMedian,numsAfterMedian
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
#from functions import *
#testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
| mit |
kunaltyagi/nsiqcppstyle | rules/RULE_4_1_B_locate_each_enum_item_in_seperate_line.py | 1 | 3034 | """
Locate the each enum item in seperate lines.
== Violation ==
enum A {
A_A, A_B <== Violation
}
== Good ==
enum A {
A_A, <== Good
A_B
}
"""
from nsiqunittest.nsiqcppstyle_unittestbase import *
from nsiqcppstyle_rulehelper import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
def RunRule(lexer, typeName, typeFullName, decl, contextStack, typeContext):
if not decl and typeContext is not None:
# column = GetRealColumn(lexer.GetCurToken())
if typeName == "ENUM":
lexer._MoveToToken(typeContext.startToken)
while(True):
nt = lexer.GetNextTokenInTypeList(
["COMMA", "RBRACE"], False, True)
if nt is None or nt == typeContext.endToken:
break
if typeContext != nt.contextStack.Peek():
continue
nt2 = lexer.PeekNextTokenSkipWhiteSpaceAndCommentAndPreprocess()
nt3 = lexer.PeekPrevTokenSkipWhiteSpaceAndCommentAndPreprocess()
# print nt, nt2,nt3
if nt.lineno == nt2.lineno and nt3.lineno == nt.lineno:
nsiqcppstyle_reporter.Error(
nt2, __name__, "Each enum item(%s) should be located in the different line" % nt2.value)
ruleManager.AddTypeNameRule(RunRule)
##########################################################################
# Unit Test
##########################################################################
class testRule(nct):
def setUpRule(self):
ruleManager.AddTypeNameRule(RunRule)
def test1(self):
self.Analyze("test/thisFile.c",
"""
enum A {
}
""")
self.ExpectSuccess(__name__)
def test2(self):
self.Analyze("test/thisFile.c",
"""
enum C {
AA, BB
}
""")
self.ExpectError(__name__)
def test3(self):
self.Analyze("test/thisFile.c",
"""
enum C {
AA = 4,
BB
}
""")
self.ExpectSuccess(__name__)
def test4(self):
self.Analyze("test/thisFile.c",
"""
enum C {
AA = 4
,BB
}
""")
self.ExpectSuccess(__name__)
def test5(self):
self.Analyze("test/thisFile.c",
"""
enum C
{
AA = 4
,BB
} TT;
""")
self.ExpectSuccess(__name__)
def test6(self):
self.Analyze("test/thisFile.c",
"""
enum COLOR
{
COLOR_TRANSPARENT = RGB(0, 0, 255),
COLOR_ROOM_IN_OUT = 0xffff00,
COLOR_CHAT_ITEM = 0xff9419,
COLOR_CHAT_MY = 0x00b4ff,
COLOR_CHAT_YOUR = 0xa3d5ff,
COLOR_ROOM_INFO = 0x00ffff,
COLOR_RESULT_SCORE = 0xffcc00,
COLOR_RESULT_RATING = 0x00fcff,
COLOR_RESULT_POINT = 0x33ff00
}; """)
self.ExpectSuccess(__name__)
| gpl-2.0 |
colmmacc/s2n | tests/integrationv2/processes.py | 3 | 14984 | import time
import os
import select
import selectors
import subprocess
import threading
from common import Results, TimeoutException
from time import monotonic as _time
_PopenSelector = selectors.PollSelector
_PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
class _processCommunicator(object):
"""
This class allows greater control over stdin than using Popen.communicate().
Popen.communicate() closes stdin as soon as data is written. This causes
TLS clients (OpenSSL derivatives) to shut down before the handshake is complete.
To prevent a premature shutdown, we need to wait until the handshake is complete
before writing to stdin. To accomplish this we `poll` stdout for a send
marker. Once that marker is found, we can write input data to stdin. The
benefit of using `poll` and `os.read` is that we get non-blocking IO. Our timeouts
are much more reliable, and we don't risk deadlocking on a readline() call which
will never complete.
Another method is to read stdout line by line. This removes a lot of code that
registers and unregisters file descriptors with a selector. It would make reading
and writing sequential (as opposed to event based), which can be easier to read
and maintain. The downsides with this method exist in the current integration test
framework. We rely on sleeps and waits, and still hit hard to debug deadlocks from
time to time.
"""
def __init__(self, proc):
self.proc = proc
self.wait_for_marker = None
# If the process times out, communicate() is called once more to pick
# up any data remaining in stdout/stderr. This flags lets us know if
# we need to do initial setup on the file descriptors, or if it was done
# during the initial call.
self._communication_started = False
def wait_for(self, wait_for_marker, timeout=None):
"""
Wait for a specific marker in stdout.
If the marker is not seen, a timeout will be raised.
"""
self.wait_for_marker = wait_for_marker
stdout = None
stderr = None
try:
stdout, stderr = self._communicate(None, timeout=timeout)
finally:
self._communication_started = True
return (stdout, stderr)
def communicate(self, input_data=None, send_marker_list=None, close_marker=None, timeout=None):
"""
Communicates with the managed process. If send_marker_list is set, input_data will not be sent
until the marker is seen.
This method acts very similar to the Popen.communicate method. The only difference is the
send_marker_list and close_marker.
"""
self.wait_for_marker = None
stdout = None
stderr = None
try:
stdout, stderr = self._communicate(input_data, send_marker_list, close_marker, timeout)
finally:
self._communication_started = True
return (stdout, stderr)
def _communicate(self, input_data=None, send_marker_list=None, close_marker=None, timeout=None):
"""
This method will read and write data to a subprocess in a non-blocking manner.
The code is heavily based on Popen.communicate. There are a couple differences:
* STDIN is not registered for events until the read_to_send marker is found
* STDIN is only closed after all registered events have been processed (including
pending stdout/stderr events, allowing more data to be stored).
"""
if input_data is not None and self.proc.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
try:
self.proc.stdin.flush()
except BrokenPipeError:
pass # communicate() must ignore BrokenPipeError.
# The process' stdout and stderr are stored in a map, with two variable
# pointing to the file objects. This allows us to include stdout/stderr
# data in a timeout exception.
if not self._communication_started:
self._fileobj2output = {}
if self.proc.stdout:
self._fileobj2output[self.proc.stdout] = []
if self.proc.stderr:
self._fileobj2output[self.proc.stderr] = []
stdout = self._fileobj2output[self.proc.stdout]
stderr = self._fileobj2output[self.proc.stderr]
input_data_len = 0
input_data_offset = 0
input_data_sent = False
send_marker = None
if send_marker_list:
send_marker = send_marker_list.pop(0)
# Keeping track of the original timeout value, and the expected end
# time of the operation allow us to timeout while reads/writes are
# still pending. It also allows us to only wait for the remainder of
# the timeout after reads/writes have completed.
orig_timeout = timeout
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
with _PopenSelector() as selector:
if self.proc.stdout and not self.proc.stdout.closed:
selector.register(self.proc.stdout, selectors.EVENT_READ)
if self.proc.stderr and not self.proc.stderr.closed:
selector.register(self.proc.stderr, selectors.EVENT_READ)
while selector.get_map():
timeout = self._remaining_time(endtime)
if timeout is not None and timeout < 0:
self._check_timeout(endtime, orig_timeout,
stdout, stderr,
skip_check_and_raise=True)
raise RuntimeError( # Impossible :)
'_check_timeout(..., skip_check_and_raise=True) '
'failed to raise TimeoutExpired.')
ready = selector.select(timeout)
self._check_timeout(endtime, orig_timeout, stdout, stderr)
for key, events in ready:
# STDIN is only registered to receive events after the send_marker is found.
if key.fileobj is self.proc.stdin:
chunk = input_view[input_data_offset :
input_data_offset + _PIPE_BUF]
try:
input_data_offset += os.write(key.fd, chunk)
except BrokenPipeError:
selector.unregister(key.fileobj)
else:
if input_data_offset >= input_data_len:
selector.unregister(key.fileobj)
input_data_sent = True
input_data_offset = 0
if send_marker_list:
send_marker = send_marker_list.pop(0)
elif key.fileobj in (self.proc.stdout, self.proc.stderr):
data = os.read(key.fd, 32768)
if not data:
selector.unregister(key.fileobj)
# fileobj2output[key.fileobj] is a list of data chunks
# that get joined later
self._fileobj2output[key.fileobj].append(data)
# If we are looking for, and find, the ready-to-send marker, then
# register STDIN to receive events. If there is no data to send,
# just mark input_send as true so we can close out STDIN.
if send_marker is not None and send_marker in str(data):
if self.proc.stdin and input_data:
selector.register(self.proc.stdin, selectors.EVENT_WRITE)
message = input_data.pop(0)
# Data destined for stdin is stored in a memoryview
input_view = memoryview(message)
input_data_len = len(message)
else:
input_data_sent = True
if self.wait_for_marker is not None and self.wait_for_marker in str(data):
selector.unregister(self.proc.stdout)
selector.unregister(self.proc.stderr)
return None, None
# If we have finished sending all our input, and have received the
# ready-to-send marker, we can close out stdin.
if self.proc.stdin and input_data_sent:
if close_marker is None or (close_marker and close_marker in str(data)):
input_data_sent = None
self.proc.stdin.close()
self.proc.wait(timeout=self._remaining_time(endtime))
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = b''.join(stdout)
if stderr is not None:
stderr = b''.join(stderr)
return (stdout, stderr)
def _remaining_time(self, endtime):
"""Convenience for _communicate when computing timeouts."""
if endtime is None:
return None
else:
return endtime - _time()
def _check_timeout(self, endtime, orig_timeout, stdout_seq, stderr_seq,
skip_check_and_raise=False):
"""
Convenience for checking if a timeout has expired.
NOTE: This method is included here to prevent our custom _communicate method
from relying on a particular version of Python.
"""
if endtime is None:
return
if skip_check_and_raise or _time() > endtime:
raise subprocess.TimeoutExpired(
self.proc.args, orig_timeout,
output=b''.join(stdout_seq) if stdout_seq else None,
stderr=b''.join(stderr_seq) if stderr_seq else None)
class ManagedProcess(threading.Thread):
"""
A ManagedProcess is a thread that monitors a subprocess.
This class provides a single place to control process timeouts and cleanup.
The stdin/stdout/stderr and exist code a monitored and results
are made available to the caller.
"""
def __init__(self, cmd_line, provider_set_ready_condition, wait_for_marker=None, send_marker_list=None, close_marker=None, timeout=5, data_source=None, env_overrides=dict()):
threading.Thread.__init__(self)
proc_env = os.environ.copy()
for key in env_overrides:
proc_env[key] = env_overrides[key]
self.proc_env = proc_env
# Command line to execute in the subprocess
self.cmd_line = cmd_line
# Total time to wait until killing the subprocess
self.timeout = timeout
# Condition variable indicating when results are ready to be collected
self.results_condition = threading.Condition()
self.results = None
# Condition variable indicating when this subprocess has been launched successfully
self.ready_condition = threading.Condition()
self.process_ready = False
self.provider_set_ready_condition = provider_set_ready_condition
# Indicates the process has completed some initial setup and is ready for testing
self.ready_to_test = wait_for_marker
self.close_marker = close_marker
self.data_source = data_source
self.send_marker_list = send_marker_list
if data_source is not None:
if type(data_source) is not list:
self.data_source = [data_source]
if send_marker_list is not None:
if type(send_marker_list) is not list:
self.send_marker_list = [send_marker_list]
def run(self):
with self.results_condition:
try:
proc = subprocess.Popen(self.cmd_line, env=self.proc_env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
self.proc = proc
except Exception as ex:
self.results = Results(None, None, None, ex)
raise ex
communicator = _processCommunicator(proc)
if self.ready_to_test is not None:
# Some processes won't be ready until they have emitted some string in stdout.
communicator.wait_for(self.ready_to_test, timeout=self.timeout)
# Let any threads waiting on process launch proceed
self.provider_set_ready_condition()
proc_results = None
try:
proc_results = communicator.communicate(input_data=self.data_source, send_marker_list=self.send_marker_list, close_marker=self.close_marker, timeout=self.timeout)
self.results = Results(proc_results[0], proc_results[1], proc.returncode, None)
except subprocess.TimeoutExpired as ex:
proc.kill()
wrapped_ex = TimeoutException(ex)
# Read any remaining output
proc_results = communicator.communicate()
self.results = Results(proc_results[0], proc_results[1], proc.returncode, wrapped_ex)
except Exception as ex:
self.results = Results(proc_results[0], proc_results[1], proc.returncode, ex)
raise ex
finally:
# This data is dumped to stdout so we capture this
# information no matter where a test fails.
print("Command line: {}".format(" ".join(self.cmd_line)))
print("Exit code: {}".format(proc.returncode))
print("Stdout: {}".format(proc_results[0].decode("utf-8", "backslashreplace")))
print("Stderr: {}".format(proc_results[1].decode("utf-8", "backslashreplace")))
def _process_ready(self):
"""Condition variable predicate"""
return self.process_ready is True
def _results_ready(self):
"""Condition variable predicate"""
return self.results is not None
def get_cmd_line(self):
return self.cmd_line
def launch(self):
"""
This method must be implemented by the subclass.
It should call the run function.
"""
raise NotImplementedError
def get_results(self, send_data=None):
"""
Block until the results are ready, or a timeout is reached.
Return the results, or raise the timeout exception.
"""
with self.results_condition:
result = self.results_condition.wait_for(self._results_ready, timeout=self.timeout)
if result is False:
raise Exception("Timeout")
yield self.results
| apache-2.0 |
i17c/selenium | py/selenium/webdriver/safari/service.py | 71 | 2879 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from os import devnull
import subprocess
from subprocess import PIPE
import time
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common import utils
class Service(object):
"""
Object that manages the starting and stopping of the SafariDriver
"""
def __init__(self, executable_path, port=0, quiet=False):
"""
Creates a new instance of the Service
:Args:
- executable_path : Path to the SafariDriver
- port : Port the service is running on """
self.port = port
self.path = executable_path
if self.port == 0:
self.port = utils.free_port()
self.quiet = quiet
def start(self):
"""
Starts the SafariDriver Service.
:Exceptions:
- WebDriverException : Raised either when it can't start the service
or when it can't connect to the service
"""
kwargs = dict()
if self.quiet:
devnull_out = open(devnull, 'w')
kwargs.update(stdout=devnull_out,
stderr=devnull_out)
try:
self.process = subprocess.Popen(["java", "-jar", self.path, "-port", "%s" % self.port],
**kwargs)
except:
raise WebDriverException(
"SafariDriver executable needs to be available in the path.")
time.sleep(10)
count = 0
while not utils.is_connectable(self.port):
count += 1
time.sleep(1)
if count == 30:
raise WebDriverException("Can not connect to the SafariDriver")
@property
def service_url(self):
"""
Gets the url of the SafariDriver Service
"""
return "http://localhost:%d/wd/hub" % self.port
def stop(self):
"""
Tells the SafariDriver to stop and cleans up the process
"""
# If it's dead don't worry
if self.process is None:
return
self.process.kill()
self.process.wait()
| apache-2.0 |
Tejal011089/fbd_erpnext | erpnext/stock/doctype/stock_entry/test_stock_entry.py | 6 | 22688 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, unittest
import frappe.defaults
from frappe.utils import flt, nowdate, nowtime
from erpnext.stock.doctype.serial_no.serial_no import *
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt \
import set_perpetual_inventory
from erpnext.stock.doctype.stock_ledger_entry.stock_ledger_entry import StockFreezeError
from erpnext.stock.stock_ledger import get_previous_sle
from erpnext.stock.doctype.stock_reconciliation.test_stock_reconciliation import create_stock_reconciliation
def get_sle(**args):
condition, values = "", []
for key, value in args.iteritems():
condition += " and " if condition else " where "
condition += "`{0}`=%s".format(key)
values.append(value)
return frappe.db.sql("""select * from `tabStock Ledger Entry` %s
order by timestamp(posting_date, posting_time) desc, name desc limit 1"""% condition,
values, as_dict=1)
class TestStockEntry(unittest.TestCase):
def tearDown(self):
frappe.set_user("Administrator")
set_perpetual_inventory(0)
def test_fifo(self):
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1)
item_code = "_Test Item 2"
warehouse = "_Test Warehouse - _TC"
create_stock_reconciliation(item_code="_Test Item 2", warehouse="_Test Warehouse - _TC",
qty=0, rate=100)
make_stock_entry(item_code=item_code, target=warehouse, qty=1, incoming_rate=10)
sle = get_sle(item_code = item_code, warehouse = warehouse)[0]
self.assertEqual([[1, 10]], eval(sle.stock_queue))
# negative qty
make_stock_entry(item_code=item_code, source=warehouse, qty=2, incoming_rate=10)
sle = get_sle(item_code = item_code, warehouse = warehouse)[0]
self.assertEqual([[-1, 10]], eval(sle.stock_queue))
# further negative
make_stock_entry(item_code=item_code, source=warehouse, qty=1)
sle = get_sle(item_code = item_code, warehouse = warehouse)[0]
self.assertEqual([[-2, 10]], eval(sle.stock_queue))
# move stock to positive
make_stock_entry(item_code=item_code, target=warehouse, qty=3, incoming_rate=20)
sle = get_sle(item_code = item_code, warehouse = warehouse)[0]
self.assertEqual([[1, 20]], eval(sle.stock_queue))
# incoming entry with diff rate
make_stock_entry(item_code=item_code, target=warehouse, qty=1, incoming_rate=30)
sle = get_sle(item_code = item_code, warehouse = warehouse)[0]
self.assertEqual([[1, 20],[1, 30]], eval(sle.stock_queue))
frappe.db.set_default("allow_negative_stock", 0)
def test_auto_material_request(self):
self._test_auto_material_request("_Test Item")
def test_auto_material_request_for_variant(self):
manage_variant = frappe.new_doc("Manage Variants")
manage_variant.update({
"item_code": "_Test Variant Item",
"attributes": [
{
"attribute": "Test Size",
"attribute_value": "Small"
}
]
})
manage_variant.generate_combinations()
manage_variant.create_variants()
self._test_auto_material_request("_Test Variant Item-S")
def _test_auto_material_request(self, item_code):
item = frappe.get_doc("Item", item_code)
if item.variant_of:
template = frappe.get_doc("Item", item.variant_of)
else:
template = item
projected_qty, actual_qty = frappe.db.get_value("Bin", {"item_code": item_code,
"warehouse": "_Test Warehouse - _TC"}, ["projected_qty", "actual_qty"]) or [0, 0]
# stock entry reqd for auto-reorder
create_stock_reconciliation(item_code=item_code, warehouse="_Test Warehouse - _TC",
qty = actual_qty + abs(projected_qty) + 10, rate=100)
projected_qty = frappe.db.get_value("Bin", {"item_code": item_code,
"warehouse": "_Test Warehouse - _TC"}, "projected_qty") or 0
frappe.db.set_value("Stock Settings", None, "auto_indent", 1)
# update re-level qty so that it is more than projected_qty
if projected_qty >= template.reorder_levels[0].warehouse_reorder_level:
template.reorder_levels[0].warehouse_reorder_level += projected_qty
template.save()
from erpnext.stock.reorder_item import reorder_item
mr_list = reorder_item()
frappe.db.set_value("Stock Settings", None, "auto_indent", 0)
items = []
for mr in mr_list:
for d in mr.items:
items.append(d.item_code)
self.assertTrue(item_code in items)
def test_material_receipt_gl_entry(self):
set_perpetual_inventory()
mr = make_stock_entry(item_code="_Test Item", target="_Test Warehouse - _TC",
qty=50, incoming_rate=100)
stock_in_hand_account = frappe.db.get_value("Account", {"account_type": "Warehouse",
"warehouse": mr.get("items")[0].t_warehouse})
self.check_stock_ledger_entries("Stock Entry", mr.name,
[["_Test Item", "_Test Warehouse - _TC", 50.0]])
self.check_gl_entries("Stock Entry", mr.name,
sorted([
[stock_in_hand_account, 5000.0, 0.0],
["Stock Adjustment - _TC", 0.0, 5000.0]
])
)
mr.cancel()
self.assertFalse(frappe.db.sql("""select * from `tabStock Ledger Entry`
where voucher_type='Stock Entry' and voucher_no=%s""", mr.name))
self.assertFalse(frappe.db.sql("""select * from `tabGL Entry`
where voucher_type='Stock Entry' and voucher_no=%s""", mr.name))
def test_material_issue_gl_entry(self):
set_perpetual_inventory()
make_stock_entry(item_code="_Test Item", target="_Test Warehouse - _TC",
qty=50, incoming_rate=100)
mi = make_stock_entry(item_code="_Test Item", source="_Test Warehouse - _TC", qty=40)
self.check_stock_ledger_entries("Stock Entry", mi.name,
[["_Test Item", "_Test Warehouse - _TC", -40.0]])
stock_in_hand_account = frappe.db.get_value("Account", {"account_type": "Warehouse",
"warehouse": "_Test Warehouse - _TC"})
stock_value_diff = abs(frappe.db.get_value("Stock Ledger Entry", {"voucher_type": "Stock Entry",
"voucher_no": mi.name}, "stock_value_difference"))
self.check_gl_entries("Stock Entry", mi.name,
sorted([
[stock_in_hand_account, 0.0, stock_value_diff],
["Stock Adjustment - _TC", stock_value_diff, 0.0]
])
)
mi.cancel()
self.assertFalse(frappe.db.sql("""select name from `tabStock Ledger Entry`
where voucher_type='Stock Entry' and voucher_no=%s""", mi.name))
self.assertFalse(frappe.db.sql("""select name from `tabGL Entry`
where voucher_type='Stock Entry' and voucher_no=%s""", mi.name))
def test_material_transfer_gl_entry(self):
set_perpetual_inventory()
create_stock_reconciliation(qty=100, rate=100)
mtn = make_stock_entry(item_code="_Test Item", source="_Test Warehouse - _TC",
target="_Test Warehouse 1 - _TC", qty=45)
self.check_stock_ledger_entries("Stock Entry", mtn.name,
[["_Test Item", "_Test Warehouse - _TC", -45.0], ["_Test Item", "_Test Warehouse 1 - _TC", 45.0]])
stock_in_hand_account = frappe.db.get_value("Account", {"account_type": "Warehouse",
"warehouse": mtn.get("items")[0].s_warehouse})
fixed_asset_account = frappe.db.get_value("Account", {"account_type": "Warehouse",
"warehouse": mtn.get("items")[0].t_warehouse})
stock_value_diff = abs(frappe.db.get_value("Stock Ledger Entry", {"voucher_type": "Stock Entry",
"voucher_no": mtn.name, "warehouse": "_Test Warehouse - _TC"}, "stock_value_difference"))
self.check_gl_entries("Stock Entry", mtn.name,
sorted([
[stock_in_hand_account, 0.0, stock_value_diff],
[fixed_asset_account, stock_value_diff, 0.0],
])
)
mtn.cancel()
self.assertFalse(frappe.db.sql("""select * from `tabStock Ledger Entry`
where voucher_type='Stock Entry' and voucher_no=%s""", mtn.name))
self.assertFalse(frappe.db.sql("""select * from `tabGL Entry`
where voucher_type='Stock Entry' and voucher_no=%s""", mtn.name))
def test_repack_no_change_in_valuation(self):
set_perpetual_inventory(0)
make_stock_entry(item_code="_Test Item", target="_Test Warehouse - _TC", qty=50, incoming_rate=100)
make_stock_entry(item_code="_Test Item Home Desktop 100", target="_Test Warehouse - _TC",
qty=50, incoming_rate=100)
repack = frappe.copy_doc(test_records[3])
repack.posting_date = nowdate()
repack.posting_time = nowtime()
repack.insert()
repack.submit()
self.check_stock_ledger_entries("Stock Entry", repack.name,
[["_Test Item", "_Test Warehouse - _TC", -50.0],
["_Test Item Home Desktop 100", "_Test Warehouse - _TC", 1]])
gl_entries = frappe.db.sql("""select account, debit, credit
from `tabGL Entry` where voucher_type='Stock Entry' and voucher_no=%s
order by account desc""", repack.name, as_dict=1)
self.assertFalse(gl_entries)
set_perpetual_inventory(0)
def test_repack_with_change_in_valuation(self):
set_perpetual_inventory()
make_stock_entry(item_code="_Test Item", target="_Test Warehouse - _TC", qty=50, incoming_rate=100)
repack = frappe.copy_doc(test_records[3])
repack.posting_date = nowdate()
repack.posting_time = nowtime()
repack.additional_operating_cost = 1000.0
repack.insert()
repack.submit()
stock_in_hand_account = frappe.db.get_value("Account", {"account_type": "Warehouse",
"warehouse": repack.get("items")[1].t_warehouse})
rm_stock_value_diff = abs(frappe.db.get_value("Stock Ledger Entry", {"voucher_type": "Stock Entry",
"voucher_no": repack.name, "item_code": "_Test Item"}, "stock_value_difference"))
fg_stock_value_diff = abs(frappe.db.get_value("Stock Ledger Entry", {"voucher_type": "Stock Entry",
"voucher_no": repack.name, "item_code": "_Test Item Home Desktop 100"}, "stock_value_difference"))
stock_value_diff = flt(fg_stock_value_diff - rm_stock_value_diff, 2)
self.check_gl_entries("Stock Entry", repack.name,
sorted([
[stock_in_hand_account, stock_value_diff, 0.0],
["Stock Adjustment - _TC", 0.0, stock_value_diff],
])
)
set_perpetual_inventory(0)
def check_stock_ledger_entries(self, voucher_type, voucher_no, expected_sle):
expected_sle.sort(key=lambda x: x[0])
# check stock ledger entries
sle = frappe.db.sql("""select item_code, warehouse, actual_qty
from `tabStock Ledger Entry` where voucher_type = %s
and voucher_no = %s order by item_code, warehouse, actual_qty""",
(voucher_type, voucher_no), as_list=1)
self.assertTrue(sle)
sle.sort(key=lambda x: x[0])
for i, sle in enumerate(sle):
self.assertEquals(expected_sle[i][0], sle[0])
self.assertEquals(expected_sle[i][1], sle[1])
self.assertEquals(expected_sle[i][2], sle[2])
def check_gl_entries(self, voucher_type, voucher_no, expected_gl_entries):
expected_gl_entries.sort(key=lambda x: x[0])
gl_entries = frappe.db.sql("""select account, debit, credit
from `tabGL Entry` where voucher_type=%s and voucher_no=%s
order by account asc, debit asc""", (voucher_type, voucher_no), as_list=1)
self.assertTrue(gl_entries)
gl_entries.sort(key=lambda x: x[0])
for i, gle in enumerate(gl_entries):
self.assertEquals(expected_gl_entries[i][0], gle[0])
self.assertEquals(expected_gl_entries[i][1], gle[1])
self.assertEquals(expected_gl_entries[i][2], gle[2])
def test_serial_no_not_reqd(self):
se = frappe.copy_doc(test_records[0])
se.get("items")[0].serial_no = "ABCD"
se.insert()
self.assertRaises(SerialNoNotRequiredError, se.submit)
def test_serial_no_reqd(self):
se = frappe.copy_doc(test_records[0])
se.get("items")[0].item_code = "_Test Serialized Item"
se.get("items")[0].qty = 2
se.get("items")[0].transfer_qty = 2
se.insert()
self.assertRaises(SerialNoRequiredError, se.submit)
def test_serial_no_qty_more(self):
se = frappe.copy_doc(test_records[0])
se.get("items")[0].item_code = "_Test Serialized Item"
se.get("items")[0].qty = 2
se.get("items")[0].serial_no = "ABCD\nEFGH\nXYZ"
se.get("items")[0].transfer_qty = 2
se.insert()
self.assertRaises(SerialNoQtyError, se.submit)
def test_serial_no_qty_less(self):
se = frappe.copy_doc(test_records[0])
se.get("items")[0].item_code = "_Test Serialized Item"
se.get("items")[0].qty = 2
se.get("items")[0].serial_no = "ABCD"
se.get("items")[0].transfer_qty = 2
se.insert()
self.assertRaises(SerialNoQtyError, se.submit)
def test_serial_no_transfer_in(self):
se = frappe.copy_doc(test_records[0])
se.get("items")[0].item_code = "_Test Serialized Item"
se.get("items")[0].qty = 2
se.get("items")[0].serial_no = "ABCD\nEFGH"
se.get("items")[0].transfer_qty = 2
se.insert()
se.submit()
self.assertTrue(frappe.db.exists("Serial No", "ABCD"))
self.assertTrue(frappe.db.exists("Serial No", "EFGH"))
se.cancel()
self.assertFalse(frappe.db.get_value("Serial No", "ABCD", "warehouse"))
def test_serial_no_not_exists(self):
frappe.db.sql("delete from `tabSerial No` where name in ('ABCD', 'EFGH')")
make_serialized_item(target_warehouse="_Test Warehouse 1 - _TC")
se = frappe.copy_doc(test_records[0])
se.purpose = "Material Issue"
se.get("items")[0].item_code = "_Test Serialized Item With Series"
se.get("items")[0].qty = 2
se.get("items")[0].s_warehouse = "_Test Warehouse 1 - _TC"
se.get("items")[0].t_warehouse = None
se.get("items")[0].serial_no = "ABCD\nEFGH"
se.get("items")[0].transfer_qty = 2
se.insert()
self.assertRaises(SerialNoNotExistsError, se.submit)
def test_serial_duplicate(self):
se, serial_nos = self.test_serial_by_series()
se = frappe.copy_doc(test_records[0])
se.get("items")[0].item_code = "_Test Serialized Item With Series"
se.get("items")[0].qty = 1
se.get("items")[0].serial_no = serial_nos[0]
se.get("items")[0].transfer_qty = 1
se.insert()
self.assertRaises(SerialNoDuplicateError, se.submit)
def test_serial_by_series(self):
se = make_serialized_item()
serial_nos = get_serial_nos(se.get("items")[0].serial_no)
self.assertTrue(frappe.db.exists("Serial No", serial_nos[0]))
self.assertTrue(frappe.db.exists("Serial No", serial_nos[1]))
return se, serial_nos
def test_serial_item_error(self):
se, serial_nos = self.test_serial_by_series()
make_serialized_item("_Test Serialized Item", "ABCD\nEFGH")
se = frappe.copy_doc(test_records[0])
se.purpose = "Material Transfer"
se.get("items")[0].item_code = "_Test Serialized Item"
se.get("items")[0].qty = 1
se.get("items")[0].transfer_qty = 1
se.get("items")[0].serial_no = serial_nos[0]
se.get("items")[0].s_warehouse = "_Test Warehouse - _TC"
se.get("items")[0].t_warehouse = "_Test Warehouse 1 - _TC"
se.insert()
self.assertRaises(SerialNoItemError, se.submit)
def test_serial_move(self):
se = make_serialized_item()
serial_no = get_serial_nos(se.get("items")[0].serial_no)[0]
se = frappe.copy_doc(test_records[0])
se.purpose = "Material Transfer"
se.get("items")[0].item_code = "_Test Serialized Item With Series"
se.get("items")[0].qty = 1
se.get("items")[0].transfer_qty = 1
se.get("items")[0].serial_no = serial_no
se.get("items")[0].s_warehouse = "_Test Warehouse - _TC"
se.get("items")[0].t_warehouse = "_Test Warehouse 1 - _TC"
se.insert()
se.submit()
self.assertTrue(frappe.db.get_value("Serial No", serial_no, "warehouse"), "_Test Warehouse 1 - _TC")
se.cancel()
self.assertTrue(frappe.db.get_value("Serial No", serial_no, "warehouse"), "_Test Warehouse - _TC")
def test_serial_warehouse_error(self):
make_serialized_item(target_warehouse="_Test Warehouse 1 - _TC")
t = make_serialized_item()
serial_nos = get_serial_nos(t.get("items")[0].serial_no)
se = frappe.copy_doc(test_records[0])
se.purpose = "Material Transfer"
se.get("items")[0].item_code = "_Test Serialized Item With Series"
se.get("items")[0].qty = 1
se.get("items")[0].transfer_qty = 1
se.get("items")[0].serial_no = serial_nos[0]
se.get("items")[0].s_warehouse = "_Test Warehouse 1 - _TC"
se.get("items")[0].t_warehouse = "_Test Warehouse - _TC"
se.insert()
self.assertRaises(SerialNoWarehouseError, se.submit)
def test_serial_cancel(self):
se, serial_nos = self.test_serial_by_series()
se.cancel()
serial_no = get_serial_nos(se.get("items")[0].serial_no)[0]
self.assertFalse(frappe.db.get_value("Serial No", serial_no, "warehouse"))
def test_warehouse_company_validation(self):
set_perpetual_inventory(0)
frappe.get_doc("User", "test2@example.com")\
.add_roles("Sales User", "Sales Manager", "Stock User", "Stock Manager")
frappe.set_user("test2@example.com")
from erpnext.stock.utils import InvalidWarehouseCompany
st1 = frappe.copy_doc(test_records[0])
st1.get("items")[0].t_warehouse="_Test Warehouse 2 - _TC1"
st1.insert()
self.assertRaises(InvalidWarehouseCompany, st1.submit)
# permission tests
def test_warehouse_user(self):
set_perpetual_inventory(0)
frappe.defaults.add_default("Warehouse", "_Test Warehouse 1 - _TC", "test@example.com", "User Permission")
frappe.defaults.add_default("Warehouse", "_Test Warehouse 2 - _TC1", "test2@example.com", "User Permission")
test_user = frappe.get_doc("User", "test@example.com")
test_user.add_roles("Sales User", "Sales Manager", "Stock User")
test_user.remove_roles("Stock Manager")
frappe.get_doc("User", "test2@example.com")\
.add_roles("Sales User", "Sales Manager", "Stock User", "Stock Manager")
frappe.set_user("test@example.com")
st1 = frappe.copy_doc(test_records[0])
st1.company = "_Test Company 1"
st1.get("items")[0].t_warehouse="_Test Warehouse 2 - _TC1"
self.assertRaises(frappe.PermissionError, st1.insert)
frappe.set_user("test2@example.com")
st1 = frappe.copy_doc(test_records[0])
st1.company = "_Test Company 1"
st1.get("items")[0].t_warehouse="_Test Warehouse 2 - _TC1"
st1.insert()
st1.submit()
frappe.defaults.clear_default("Warehouse", "_Test Warehouse 1 - _TC",
"test@example.com", parenttype="User Permission")
frappe.defaults.clear_default("Warehouse", "_Test Warehouse 2 - _TC1",
"test2@example.com", parenttype="User Permission")
def test_freeze_stocks(self):
frappe.db.set_value('Stock Settings', None,'stock_auth_role', '')
# test freeze_stocks_upto
frappe.db.set_value("Stock Settings", None, "stock_frozen_upto", add_days(nowdate(), 5))
se = frappe.copy_doc(test_records[0]).insert()
self.assertRaises(StockFreezeError, se.submit)
frappe.db.set_value("Stock Settings", None, "stock_frozen_upto", '')
# test freeze_stocks_upto_days
frappe.db.set_value("Stock Settings", None, "stock_frozen_upto_days", 7)
se = frappe.copy_doc(test_records[0])
se.posting_date = add_days(nowdate(), -15)
se.insert()
self.assertRaises(StockFreezeError, se.submit)
frappe.db.set_value("Stock Settings", None, "stock_frozen_upto_days", 0)
def test_production_order(self):
bom_no, bom_operation_cost = frappe.db.get_value("BOM", {"item": "_Test FG Item 2",
"is_default": 1, "docstatus": 1}, ["name", "operating_cost"])
production_order = frappe.new_doc("Production Order")
production_order.update({
"company": "_Test Company",
"fg_warehouse": "_Test Warehouse 1 - _TC",
"production_item": "_Test FG Item 2",
"bom_no": bom_no,
"qty": 1.0,
"stock_uom": "_Test UOM",
"wip_warehouse": "_Test Warehouse - _TC"
})
production_order.insert()
production_order.submit()
make_stock_entry(item_code="_Test Item", target="_Test Warehouse - _TC", qty=50, incoming_rate=100)
stock_entry = frappe.new_doc("Stock Entry")
stock_entry.update({
"purpose": "Manufacture",
"production_order": production_order.name,
"bom_no": bom_no,
"fg_completed_qty": "1",
"additional_operating_cost": 1000
})
stock_entry.get_items()
rm_cost = 0
for d in stock_entry.get("items"):
if d.s_warehouse:
rm_cost += flt(d.amount)
fg_cost = filter(lambda x: x.item_code=="_Test FG Item 2", stock_entry.get("items"))[0].amount
self.assertEqual(fg_cost,
flt(rm_cost + bom_operation_cost + stock_entry.additional_operating_cost, 2))
def test_variant_production_order(self):
bom_no = frappe.db.get_value("BOM", {"item": "_Test Variant Item",
"is_default": 1, "docstatus": 1})
production_order = frappe.new_doc("Production Order")
production_order.update({
"company": "_Test Company",
"fg_warehouse": "_Test Warehouse 1 - _TC",
"production_item": "_Test Variant Item-S",
"bom_no": bom_no,
"qty": 1.0,
"stock_uom": "_Test UOM",
"wip_warehouse": "_Test Warehouse - _TC"
})
production_order.insert()
production_order.submit()
from erpnext.manufacturing.doctype.production_order.production_order import make_stock_entry
stock_entry = frappe.get_doc(make_stock_entry(production_order.name, "Manufacture", 1))
stock_entry.insert()
self.assertTrue("_Test Variant Item-S" in [d.item_code for d in stock_entry.items])
def make_serialized_item(item_code=None, serial_no=None, target_warehouse=None):
se = frappe.copy_doc(test_records[0])
se.get("items")[0].item_code = item_code or "_Test Serialized Item With Series"
se.get("items")[0].serial_no = serial_no
se.get("items")[0].qty = 2
se.get("items")[0].transfer_qty = 2
if target_warehouse:
se.get("items")[0].t_warehouse = target_warehouse
se.insert()
se.submit()
return se
def make_stock_entry(**args):
from erpnext.accounts.utils import get_fiscal_year
s = frappe.new_doc("Stock Entry")
args = frappe._dict(args)
if args.posting_date:
s.posting_date = args.posting_date
if args.posting_time:
s.posting_time = args.posting_time
if not args.purpose:
if args.source and args.target:
s.purpose = "Material Transfer"
elif args.source:
s.purpose = "Material Issue"
else:
s.purpose = "Material Receipt"
else:
s.purpose = args.purpose
s.company = args.company or "_Test Company"
s.fiscal_year = get_fiscal_year(s.posting_date)[0]
s.purchase_receipt_no = args.purchase_receipt_no
s.delivery_note_no = args.delivery_note_no
s.sales_invoice_no = args.sales_invoice_no
s.difference_account = args.difference_account or "Stock Adjustment - _TC"
s.append("items", {
"item_code": args.item or args.item_code or "_Test Item",
"s_warehouse": args.from_warehouse or args.source,
"t_warehouse": args.to_warehouse or args.target,
"qty": args.qty,
"incoming_rate": args.incoming_rate,
"expense_account": args.expense_account or "Stock Adjustment - _TC",
"conversion_factor": 1.0,
"cost_center": "_Test Cost Center - _TC"
})
if not args.do_not_save:
s.insert()
if not args.do_not_submit:
s.submit()
return s
def get_qty_after_transaction(**args):
args = frappe._dict(args)
last_sle = get_previous_sle({
"item_code": args.item_code or "_Test Item",
"warehouse": args.warehouse or "_Test Warehouse - _TC",
"posting_date": args.posting_date or nowdate(),
"posting_time": args.posting_time or nowtime()
})
return flt(last_sle.get("qty_after_transaction"))
test_records = frappe.get_test_records('Stock Entry')
| agpl-3.0 |
zhuanxuhit/deep-learning | embeddings/utils.py | 150 | 2194 | import re
from collections import Counter
def preprocess(text):
# Replace punctuation with tokens so we can use them in our model
text = text.lower()
text = text.replace('.', ' <PERIOD> ')
text = text.replace(',', ' <COMMA> ')
text = text.replace('"', ' <QUOTATION_MARK> ')
text = text.replace(';', ' <SEMICOLON> ')
text = text.replace('!', ' <EXCLAMATION_MARK> ')
text = text.replace('?', ' <QUESTION_MARK> ')
text = text.replace('(', ' <LEFT_PAREN> ')
text = text.replace(')', ' <RIGHT_PAREN> ')
text = text.replace('--', ' <HYPHENS> ')
text = text.replace('?', ' <QUESTION_MARK> ')
# text = text.replace('\n', ' <NEW_LINE> ')
text = text.replace(':', ' <COLON> ')
words = text.split()
# Remove all words with 5 or fewer occurences
word_counts = Counter(words)
trimmed_words = [word for word in words if word_counts[word] > 5]
return trimmed_words
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: A list where each item is a tuple of (batch of input, batch of target).
"""
n_batches = int(len(int_text) / (batch_size * seq_length))
# Drop the last few characters to make only full batches
xdata = np.array(int_text[: n_batches * batch_size * seq_length])
ydata = np.array(int_text[1: n_batches * batch_size * seq_length + 1])
x_batches = np.split(xdata.reshape(batch_size, -1), n_batches, 1)
y_batches = np.split(ydata.reshape(batch_size, -1), n_batches, 1)
return list(zip(x_batches, y_batches))
def create_lookup_tables(words):
"""
Create lookup tables for vocabulary
:param words: Input list of words
:return: A tuple of dicts. The first dict....
"""
word_counts = Counter(words)
sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)
int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)}
vocab_to_int = {word: ii for ii, word in int_to_vocab.items()}
return vocab_to_int, int_to_vocab | mit |
MOSAIC-UA/802.11ah-ns3 | ns-3/.waf-1.8.12-f00e5b53f6bbeab1384a38c9cc5d51f7/waflib/Tools/cxx.py | 20 | 1217 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
from waflib import TaskGen,Task
from waflib.Tools import c_preproc
from waflib.Tools.ccroot import link_task,stlink_task
@TaskGen.extension('.cpp','.cc','.cxx','.C','.c++')
def cxx_hook(self,node):
return self.create_compiled_task('cxx',node)
if not'.c'in TaskGen.task_gen.mappings:
TaskGen.task_gen.mappings['.c']=TaskGen.task_gen.mappings['.cpp']
class cxx(Task.Task):
run_str='${CXX} ${ARCH_ST:ARCH} ${CXXFLAGS} ${CPPFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CXX_SRC_F}${SRC} ${CXX_TGT_F}${TGT[0].abspath()}'
vars=['CXXDEPS']
ext_in=['.h']
scan=c_preproc.scan
class cxxprogram(link_task):
run_str='${LINK_CXX} ${LINKFLAGS} ${CXXLNK_SRC_F}${SRC} ${CXXLNK_TGT_F}${TGT[0].abspath()} ${RPATH_ST:RPATH} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${FRAMEWORK_ST:FRAMEWORK} ${ARCH_ST:ARCH} ${STLIB_MARKER} ${STLIBPATH_ST:STLIBPATH} ${STLIB_ST:STLIB} ${SHLIB_MARKER} ${LIBPATH_ST:LIBPATH} ${LIB_ST:LIB} ${LDFLAGS}'
vars=['LINKDEPS']
ext_out=['.bin']
inst_to='${BINDIR}'
class cxxshlib(cxxprogram):
inst_to='${LIBDIR}'
class cxxstlib(stlink_task):
pass
| gpl-2.0 |
noba3/KoTos | addons/script.module.youtube.dl/lib/youtube_dl/extractor/zdf.py | 12 | 6292 | # coding: utf-8
from __future__ import unicode_literals
import functools
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
OnDemandPagedList,
xpath_text,
)
def extract_from_xml_url(ie, video_id, xml_url):
doc = ie._download_xml(
xml_url, video_id,
note='Downloading video info',
errnote='Failed to download video info')
title = doc.find('.//information/title').text
description = xpath_text(doc, './/information/detail', 'description')
duration = int_or_none(xpath_text(doc, './/details/lengthSec', 'duration'))
uploader = xpath_text(doc, './/details/originChannelTitle', 'uploader')
uploader_id = xpath_text(doc, './/details/originChannelId', 'uploader id')
upload_date = unified_strdate(xpath_text(doc, './/details/airtime', 'upload date'))
def xml_to_format(fnode):
video_url = fnode.find('url').text
is_available = 'http://www.metafilegenerator' not in video_url
format_id = fnode.attrib['basetype']
format_m = re.match(r'''(?x)
(?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_
(?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+)
''', format_id)
ext = format_m.group('container')
proto = format_m.group('proto').lower()
quality = xpath_text(fnode, './quality', 'quality')
abr = int_or_none(xpath_text(fnode, './audioBitrate', 'abr'), 1000)
vbr = int_or_none(xpath_text(fnode, './videoBitrate', 'vbr'), 1000)
width = int_or_none(xpath_text(fnode, './width', 'width'))
height = int_or_none(xpath_text(fnode, './height', 'height'))
filesize = int_or_none(xpath_text(fnode, './filesize', 'filesize'))
format_note = ''
if not format_note:
format_note = None
return {
'format_id': format_id + '-' + quality,
'url': video_url,
'ext': ext,
'acodec': format_m.group('acodec'),
'vcodec': format_m.group('vcodec'),
'abr': abr,
'vbr': vbr,
'width': width,
'height': height,
'filesize': filesize,
'format_note': format_note,
'protocol': proto,
'_available': is_available,
}
def xml_to_thumbnails(fnode):
thumbnails = []
for node in fnode:
thumbnail_url = node.text
if not thumbnail_url:
continue
thumbnail = {
'url': thumbnail_url,
}
if 'key' in node.attrib:
m = re.match('^([0-9]+)x([0-9]+)$', node.attrib['key'])
if m:
thumbnail['width'] = int(m.group(1))
thumbnail['height'] = int(m.group(2))
thumbnails.append(thumbnail)
return thumbnails
thumbnails = xml_to_thumbnails(doc.findall('.//teaserimages/teaserimage'))
format_nodes = doc.findall('.//formitaeten/formitaet')
formats = list(filter(
lambda f: f['_available'],
map(xml_to_format, format_nodes)))
ie._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'duration': duration,
'thumbnails': thumbnails,
'uploader': uploader,
'uploader_id': uploader_id,
'upload_date': upload_date,
'formats': formats,
}
class ZDFIE(InfoExtractor):
_VALID_URL = r'(?:zdf:|zdf:video:|https?://www\.zdf\.de/ZDFmediathek(?:#)?/(.*beitrag/(?:video/)?))(?P<id>[0-9]+)(?:/[^/?]+)?(?:\?.*)?'
_TEST = {
'url': 'http://www.zdf.de/ZDFmediathek/beitrag/video/2037704/ZDFspezial---Ende-des-Machtpokers--?bc=sts;stt',
'info_dict': {
'id': '2037704',
'ext': 'webm',
'title': 'ZDFspezial - Ende des Machtpokers',
'description': 'Union und SPD haben sich auf einen Koalitionsvertrag geeinigt. Aber was bedeutet das für die Bürger? Sehen Sie hierzu das ZDFspezial "Ende des Machtpokers - Große Koalition für Deutschland".',
'duration': 1022,
'uploader': 'spezial',
'uploader_id': '225948',
'upload_date': '20131127',
},
'skip': 'Videos on ZDF.de are depublicised in short order',
}
def _real_extract(self, url):
video_id = self._match_id(url)
xml_url = 'http://www.zdf.de/ZDFmediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id
return extract_from_xml_url(self, video_id, xml_url)
class ZDFChannelIE(InfoExtractor):
_VALID_URL = r'(?:zdf:topic:|https?://www\.zdf\.de/ZDFmediathek(?:#)?/.*kanaluebersicht/)(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.zdf.de/ZDFmediathek#/kanaluebersicht/1586442/sendung/Titanic',
'info_dict': {
'id': '1586442',
},
'playlist_count': 3,
}
_PAGE_SIZE = 50
def _fetch_page(self, channel_id, page):
offset = page * self._PAGE_SIZE
xml_url = (
'http://www.zdf.de/ZDFmediathek/xmlservice/web/aktuellste?ak=web&offset=%d&maxLength=%d&id=%s'
% (offset, self._PAGE_SIZE, channel_id))
doc = self._download_xml(
xml_url, channel_id,
note='Downloading channel info',
errnote='Failed to download channel info')
title = doc.find('.//information/title').text
description = doc.find('.//information/detail').text
for asset in doc.findall('.//teasers/teaser'):
a_type = asset.find('./type').text
a_id = asset.find('./details/assetId').text
if a_type not in ('video', 'topic'):
continue
yield {
'_type': 'url',
'playlist_title': title,
'playlist_description': description,
'url': 'zdf:%s:%s' % (a_type, a_id),
}
def _real_extract(self, url):
channel_id = self._match_id(url)
entries = OnDemandPagedList(
functools.partial(self._fetch_page, channel_id), self._PAGE_SIZE)
return {
'_type': 'playlist',
'id': channel_id,
'entries': entries,
}
| gpl-2.0 |
bruderstein/PythonScript | PythonLib/full/unittest/test/testmock/testmagicmethods.py | 6 | 16243 | import math
import unittest
import os
from asyncio import iscoroutinefunction
from unittest.mock import AsyncMock, Mock, MagicMock, _magics
class TestMockingMagicMethods(unittest.TestCase):
def test_deleting_magic_methods(self):
mock = Mock()
self.assertFalse(hasattr(mock, '__getitem__'))
mock.__getitem__ = Mock()
self.assertTrue(hasattr(mock, '__getitem__'))
del mock.__getitem__
self.assertFalse(hasattr(mock, '__getitem__'))
def test_magicmock_del(self):
mock = MagicMock()
# before using getitem
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
mock = MagicMock()
# this time use it first
mock['foo']
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
def test_magic_method_wrapping(self):
mock = Mock()
def f(self, name):
return self, 'fish'
mock.__getitem__ = f
self.assertIsNot(mock.__getitem__, f)
self.assertEqual(mock['foo'], (mock, 'fish'))
self.assertEqual(mock.__getitem__('foo'), (mock, 'fish'))
mock.__getitem__ = mock
self.assertIs(mock.__getitem__, mock)
def test_magic_methods_isolated_between_mocks(self):
mock1 = Mock()
mock2 = Mock()
mock1.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock1), [])
self.assertRaises(TypeError, lambda: list(mock2))
def test_repr(self):
mock = Mock()
self.assertEqual(repr(mock), "<Mock id='%s'>" % id(mock))
mock.__repr__ = lambda s: 'foo'
self.assertEqual(repr(mock), 'foo')
def test_str(self):
mock = Mock()
self.assertEqual(str(mock), object.__str__(mock))
mock.__str__ = lambda s: 'foo'
self.assertEqual(str(mock), 'foo')
def test_dict_methods(self):
mock = Mock()
self.assertRaises(TypeError, lambda: mock['foo'])
def _del():
del mock['foo']
def _set():
mock['foo'] = 3
self.assertRaises(TypeError, _del)
self.assertRaises(TypeError, _set)
_dict = {}
def getitem(s, name):
return _dict[name]
def setitem(s, name, value):
_dict[name] = value
def delitem(s, name):
del _dict[name]
mock.__setitem__ = setitem
mock.__getitem__ = getitem
mock.__delitem__ = delitem
self.assertRaises(KeyError, lambda: mock['foo'])
mock['foo'] = 'bar'
self.assertEqual(_dict, {'foo': 'bar'})
self.assertEqual(mock['foo'], 'bar')
del mock['foo']
self.assertEqual(_dict, {})
def test_numeric(self):
original = mock = Mock()
mock.value = 0
self.assertRaises(TypeError, lambda: mock + 3)
def add(self, other):
mock.value += other
return self
mock.__add__ = add
self.assertEqual(mock + 3, mock)
self.assertEqual(mock.value, 3)
del mock.__add__
def iadd(mock):
mock += 3
self.assertRaises(TypeError, iadd, mock)
mock.__iadd__ = add
mock += 6
self.assertEqual(mock, original)
self.assertEqual(mock.value, 9)
self.assertRaises(TypeError, lambda: 3 + mock)
mock.__radd__ = add
self.assertEqual(7 + mock, mock)
self.assertEqual(mock.value, 16)
def test_division(self):
original = mock = Mock()
mock.value = 32
self.assertRaises(TypeError, lambda: mock / 2)
def truediv(self, other):
mock.value /= other
return self
mock.__truediv__ = truediv
self.assertEqual(mock / 2, mock)
self.assertEqual(mock.value, 16)
del mock.__truediv__
def itruediv(mock):
mock /= 4
self.assertRaises(TypeError, itruediv, mock)
mock.__itruediv__ = truediv
mock /= 8
self.assertEqual(mock, original)
self.assertEqual(mock.value, 2)
self.assertRaises(TypeError, lambda: 8 / mock)
mock.__rtruediv__ = truediv
self.assertEqual(0.5 / mock, mock)
self.assertEqual(mock.value, 4)
def test_hash(self):
mock = Mock()
# test delegation
self.assertEqual(hash(mock), Mock.__hash__(mock))
def _hash(s):
return 3
mock.__hash__ = _hash
self.assertEqual(hash(mock), 3)
def test_nonzero(self):
m = Mock()
self.assertTrue(bool(m))
m.__bool__ = lambda s: False
self.assertFalse(bool(m))
def test_comparison(self):
mock = Mock()
def comp(s, o):
return True
mock.__lt__ = mock.__gt__ = mock.__le__ = mock.__ge__ = comp
self. assertTrue(mock < 3)
self. assertTrue(mock > 3)
self. assertTrue(mock <= 3)
self. assertTrue(mock >= 3)
self.assertRaises(TypeError, lambda: MagicMock() < object())
self.assertRaises(TypeError, lambda: object() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > object())
self.assertRaises(TypeError, lambda: object() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= object())
self.assertRaises(TypeError, lambda: object() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= object())
self.assertRaises(TypeError, lambda: object() >= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= MagicMock())
def test_equality(self):
for mock in Mock(), MagicMock():
self.assertEqual(mock == mock, True)
self.assertIsInstance(mock == mock, bool)
self.assertEqual(mock != mock, False)
self.assertIsInstance(mock != mock, bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
def eq(self, other):
return other == 3
mock.__eq__ = eq
self.assertTrue(mock == 3)
self.assertFalse(mock == 4)
def ne(self, other):
return other == 3
mock.__ne__ = ne
self.assertTrue(mock != 3)
self.assertFalse(mock != 4)
mock = MagicMock()
mock.__eq__.return_value = True
self.assertIsInstance(mock == 3, bool)
self.assertEqual(mock == 3, True)
mock.__ne__.return_value = False
self.assertIsInstance(mock != 3, bool)
self.assertEqual(mock != 3, False)
def test_len_contains_iter(self):
mock = Mock()
self.assertRaises(TypeError, len, mock)
self.assertRaises(TypeError, iter, mock)
self.assertRaises(TypeError, lambda: 'foo' in mock)
mock.__len__ = lambda s: 6
self.assertEqual(len(mock), 6)
mock.__contains__ = lambda s, o: o == 3
self.assertIn(3, mock)
self.assertNotIn(6, mock)
mock.__iter__ = lambda s: iter('foobarbaz')
self.assertEqual(list(mock), list('foobarbaz'))
def test_magicmock(self):
mock = MagicMock()
mock.__iter__.return_value = iter([1, 2, 3])
self.assertEqual(list(mock), [1, 2, 3])
getattr(mock, '__bool__').return_value = False
self.assertFalse(hasattr(mock, '__nonzero__'))
self.assertFalse(bool(mock))
for entry in _magics:
self.assertTrue(hasattr(mock, entry))
self.assertFalse(hasattr(mock, '__imaginary__'))
def test_magic_mock_equality(self):
mock = MagicMock()
self.assertIsInstance(mock == object(), bool)
self.assertIsInstance(mock != object(), bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
self.assertEqual(mock == mock, True)
self.assertEqual(mock != mock, False)
def test_asyncmock_defaults(self):
mock = AsyncMock()
self.assertEqual(int(mock), 1)
self.assertEqual(complex(mock), 1j)
self.assertEqual(float(mock), 1.0)
self.assertNotIn(object(), mock)
self.assertEqual(len(mock), 0)
self.assertEqual(list(mock), [])
self.assertEqual(hash(mock), object.__hash__(mock))
self.assertEqual(str(mock), object.__str__(mock))
self.assertTrue(bool(mock))
self.assertEqual(round(mock), mock.__round__())
self.assertEqual(math.trunc(mock), mock.__trunc__())
self.assertEqual(math.floor(mock), mock.__floor__())
self.assertEqual(math.ceil(mock), mock.__ceil__())
self.assertTrue(iscoroutinefunction(mock.__aexit__))
self.assertTrue(iscoroutinefunction(mock.__aenter__))
self.assertIsInstance(mock.__aenter__, AsyncMock)
self.assertIsInstance(mock.__aexit__, AsyncMock)
# in Python 3 oct and hex use __index__
# so these tests are for __index__ in py3k
self.assertEqual(oct(mock), '0o1')
self.assertEqual(hex(mock), '0x1')
# how to test __sizeof__ ?
def test_magicmock_defaults(self):
mock = MagicMock()
self.assertEqual(int(mock), 1)
self.assertEqual(complex(mock), 1j)
self.assertEqual(float(mock), 1.0)
self.assertNotIn(object(), mock)
self.assertEqual(len(mock), 0)
self.assertEqual(list(mock), [])
self.assertEqual(hash(mock), object.__hash__(mock))
self.assertEqual(str(mock), object.__str__(mock))
self.assertTrue(bool(mock))
self.assertEqual(round(mock), mock.__round__())
self.assertEqual(math.trunc(mock), mock.__trunc__())
self.assertEqual(math.floor(mock), mock.__floor__())
self.assertEqual(math.ceil(mock), mock.__ceil__())
self.assertTrue(iscoroutinefunction(mock.__aexit__))
self.assertTrue(iscoroutinefunction(mock.__aenter__))
self.assertIsInstance(mock.__aenter__, AsyncMock)
self.assertIsInstance(mock.__aexit__, AsyncMock)
# in Python 3 oct and hex use __index__
# so these tests are for __index__ in py3k
self.assertEqual(oct(mock), '0o1')
self.assertEqual(hex(mock), '0x1')
# how to test __sizeof__ ?
def test_magic_methods_fspath(self):
mock = MagicMock()
expected_path = mock.__fspath__()
mock.reset_mock()
self.assertEqual(os.fspath(mock), expected_path)
mock.__fspath__.assert_called_once()
def test_magic_methods_and_spec(self):
class Iterable(object):
def __iter__(self): pass
mock = Mock(spec=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_magic_methods_and_spec_set(self):
class Iterable(object):
def __iter__(self): pass
mock = Mock(spec_set=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec_set=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec_set=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_setting_unsupported_magic_method(self):
mock = MagicMock()
def set_setattr():
mock.__setattr__ = lambda self, name: None
self.assertRaisesRegex(AttributeError,
"Attempting to set unsupported magic method '__setattr__'.",
set_setattr
)
def test_attributes_and_return_value(self):
mock = MagicMock()
attr = mock.foo
def _get_type(obj):
# the type of every mock (or magicmock) is a custom subclass
# so the real type is the second in the mro
return type(obj).__mro__[1]
self.assertEqual(_get_type(attr), MagicMock)
returned = mock()
self.assertEqual(_get_type(returned), MagicMock)
def test_magic_methods_are_magic_mocks(self):
mock = MagicMock()
self.assertIsInstance(mock.__getitem__, MagicMock)
mock[1][2].__getitem__.return_value = 3
self.assertEqual(mock[1][2][3], 3)
def test_magic_method_reset_mock(self):
mock = MagicMock()
str(mock)
self.assertTrue(mock.__str__.called)
mock.reset_mock()
self.assertFalse(mock.__str__.called)
def test_dir(self):
# overriding the default implementation
for mock in Mock(), MagicMock():
def _dir(self):
return ['foo']
mock.__dir__ = _dir
self.assertEqual(dir(mock), ['foo'])
def test_bound_methods(self):
m = Mock()
# XXXX should this be an expected failure instead?
# this seems like it should work, but is hard to do without introducing
# other api inconsistencies. Failure message could be better though.
m.__iter__ = [3].__iter__
self.assertRaises(TypeError, iter, m)
def test_magic_method_type(self):
class Foo(MagicMock):
pass
foo = Foo()
self.assertIsInstance(foo.__int__, Foo)
def test_descriptor_from_class(self):
m = MagicMock()
type(m).__str__.return_value = 'foo'
self.assertEqual(str(m), 'foo')
def test_iterable_as_iter_return_value(self):
m = MagicMock()
m.__iter__.return_value = [1, 2, 3]
self.assertEqual(list(m), [1, 2, 3])
self.assertEqual(list(m), [1, 2, 3])
m.__iter__.return_value = iter([4, 5, 6])
self.assertEqual(list(m), [4, 5, 6])
self.assertEqual(list(m), [])
def test_matmul(self):
m = MagicMock()
self.assertIsInstance(m @ 1, MagicMock)
m.__matmul__.return_value = 42
m.__rmatmul__.return_value = 666
m.__imatmul__.return_value = 24
self.assertEqual(m @ 1, 42)
self.assertEqual(1 @ m, 666)
m @= 24
self.assertEqual(m, 24)
def test_divmod_and_rdivmod(self):
m = MagicMock()
self.assertIsInstance(divmod(5, m), MagicMock)
m.__divmod__.return_value = (2, 1)
self.assertEqual(divmod(m, 2), (2, 1))
m = MagicMock()
foo = divmod(2, m)
self.assertIsInstance(foo, MagicMock)
foo_direct = m.__divmod__(2)
self.assertIsInstance(foo_direct, MagicMock)
bar = divmod(m, 2)
self.assertIsInstance(bar, MagicMock)
bar_direct = m.__rdivmod__(2)
self.assertIsInstance(bar_direct, MagicMock)
# http://bugs.python.org/issue23310
# Check if you can change behaviour of magic methods in MagicMock init
def test_magic_in_initialization(self):
m = MagicMock(**{'__str__.return_value': "12"})
self.assertEqual(str(m), "12")
def test_changing_magic_set_in_initialization(self):
m = MagicMock(**{'__str__.return_value': "12"})
m.__str__.return_value = "13"
self.assertEqual(str(m), "13")
m = MagicMock(**{'__str__.return_value': "12"})
m.configure_mock(**{'__str__.return_value': "14"})
self.assertEqual(str(m), "14")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
TNick/pylearn2 | pylearn2/utils/utlc.py | 49 | 7347 | """Several utilities for experimenting upon utlc datasets"""
# Standard library imports
import logging
import os
import inspect
import zipfile
from tempfile import TemporaryFile
# Third-party imports
import numpy
import theano
from pylearn2.datasets.utlc import load_ndarray_dataset, load_sparse_dataset
from pylearn2.utils import subdict, sharedX
logger = logging.getLogger(__name__)
##################################################
# Shortcuts and auxiliary functions
##################################################
def getboth(dict1, dict2, key, default=None):
"""
Try to retrieve key from dict1 if exists, otherwise try with dict2.
If the key is not found in any of them, raise an exception.
Parameters
----------
dict1 : dict
WRITEME
dict2 : dict
WRITEME
key : WRITEME
default : WRITEME
Returns
-------
WRITEME
"""
try:
return dict1[key]
except KeyError:
if default is None:
return dict2[key]
else:
return dict2.get(key, default)
##################################################
# Datasets loading and contest facilities
##################################################
def load_data(conf):
"""
Loads a specified dataset according to the parameters in the dictionary
Parameters
----------
conf : WRITEME
Returns
-------
WRITEME
"""
logger.info('... loading dataset')
# Special case for sparse format
if conf.get('sparse', False):
expected = inspect.getargspec(load_sparse_dataset)[0][1:]
data = load_sparse_dataset(conf['dataset'], **subdict(conf, expected))
valid, test = data[1:3]
# Sparse TERRY data on LISA servers contains an extra null first row in
# valid and test subsets.
if conf['dataset'] == 'terry':
valid = valid[1:]
test = test[1:]
assert valid.shape[0] == test.shape[0] == 4096, \
'Sparse TERRY data loaded has wrong number of examples'
if len(data) == 3:
return [data[0], valid, test]
else:
return [data[0], valid, test, data[3]]
# Load as the usual ndarray
expected = inspect.getargspec(load_ndarray_dataset)[0][1:]
data = load_ndarray_dataset(conf['dataset'], **subdict(conf, expected))
# Special case for on-the-fly normalization
if conf.get('normalize_on_the_fly', False):
return data
# Allocate shared variables
def shared_dataset(data_x):
"""Function that loads the dataset into shared variables"""
if conf.get('normalize', True):
return sharedX(data_x, borrow=True)
else:
return theano.shared(theano._asarray(data_x), borrow=True)
return map(shared_dataset, data)
def save_submission(conf, valid_repr, test_repr):
"""
Create a submission file given a configuration dictionary and a
representation for valid and test.
Parameters
----------
conf : WRITEME
valid_repr : WRITEME
test_repr : WRITEME
"""
logger.info('... creating zipfile')
# Ensure the given directory is correct
submit_dir = conf['savedir']
if not os.path.exists(submit_dir):
os.makedirs(submit_dir)
elif not os.path.isdir(submit_dir):
raise IOError('savedir %s is not a directory' % submit_dir)
basename = os.path.join(submit_dir, conf['dataset'] + '_' + conf['expname'])
# If there are too much features, outputs kernel matrices
if (valid_repr.shape[1] > valid_repr.shape[0]):
valid_repr = numpy.dot(valid_repr, valid_repr.T)
test_repr = numpy.dot(test_repr, test_repr.T)
# Quantitize data
valid_repr = numpy.floor((valid_repr / valid_repr.max())*999)
test_repr = numpy.floor((test_repr / test_repr.max())*999)
# Store the representations in two temporary files
valid_file = TemporaryFile()
test_file = TemporaryFile()
numpy.savetxt(valid_file, valid_repr, fmt="%.3f")
numpy.savetxt(test_file, test_repr, fmt="%.3f")
# Reread those files and put them together in a .zip
valid_file.seek(0)
test_file.seek(0)
submission = zipfile.ZipFile(basename + ".zip", "w",
compression=zipfile.ZIP_DEFLATED)
submission.writestr(basename + '_valid.prepro', valid_file.read())
submission.writestr(basename + '_final.prepro', test_file.read())
submission.close()
valid_file.close()
test_file.close()
def create_submission(conf, transform_valid, transform_test=None, features=None):
"""
Create a submission file given a configuration dictionary and a
computation function.
Note that it always reload the datasets to ensure valid & test
are not permuted.
Parameters
----------
conf : WRITEME
transform_valid : WRITEME
transform_test : WRITEME
features : WRITEME
"""
if transform_test is None:
transform_test = transform_valid
# Load the dataset, without permuting valid and test
kwargs = subdict(conf, ['dataset', 'normalize', 'normalize_on_the_fly', 'sparse'])
kwargs.update(randomize_valid=False, randomize_test=False)
valid_set, test_set = load_data(kwargs)[1:3]
# Sparse datasets are not stored as Theano shared vars.
if not conf.get('sparse', False):
valid_set = valid_set.get_value(borrow=True)
test_set = test_set.get_value(borrow=True)
# Prefilter features, if needed.
if features is not None:
valid_set = valid_set[:, features]
test_set = test_set[:, features]
# Valid and test representations
valid_repr = transform_valid(valid_set)
test_repr = transform_test(test_set)
# Convert into text info
save_submission(conf, valid_repr, test_repr)
##################################################
# Proxies for representation evaluations
##################################################
def compute_alc(valid_repr, test_repr):
"""
Returns the ALC of the valid set VS test set
Note: This proxy won't work in the case of transductive learning
(This is an assumption) but it seems to be a good proxy in the
normal case (i.e only train on training set)
Parameters
----------
valid_repr : WRITEME
test_repr : WRITEME
Returns
-------
WRITEME
"""
# Concatenate the sets, and give different one hot labels for valid and test
n_valid = valid_repr.shape[0]
n_test = test_repr.shape[0]
_labvalid = numpy.hstack((numpy.ones((n_valid, 1)),
numpy.zeros((n_valid, 1))))
_labtest = numpy.hstack((numpy.zeros((n_test, 1)),
numpy.ones((n_test, 1))))
dataset = numpy.vstack((valid_repr, test_repr))
label = numpy.vstack((_labvalid, _labtest))
logger.info('... computing the ALC')
raise NotImplementedError("This got broken by embed no longer being "
"where it used to be (if it even still exists, I haven't "
"looked for it)")
# return embed.score(dataset, label)
def lookup_alc(data, transform):
"""
.. todo::
WRITEME
"""
valid_repr = transform(data[1].get_value(borrow=True))
test_repr = transform(data[2].get_value(borrow=True))
return compute_alc(valid_repr, test_repr)
| bsd-3-clause |
QuLogic/vispy | vispy/app/_default_app.py | 21 | 2422 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from .application import Application
# Initialize default app
# Only for use within *this* module.
# One should always call use_app() to obtain the default app.
default_app = None
def use_app(backend_name=None, call_reuse=True):
""" Get/create the default Application object
It is safe to call this function multiple times, as long as
backend_name is None or matches the already selected backend.
Parameters
----------
backend_name : str | None
The name of the backend application to use. If not specified, Vispy
tries to select a backend automatically. See ``vispy.use()`` for
details.
call_reuse : bool
Whether to call the backend's `reuse()` function (True by default).
Not implemented by default, but some backends need it. For example,
the notebook backends need to inject some JavaScript in a notebook as
soon as `use_app()` is called.
"""
global default_app
# If we already have a default_app, raise error or return
if default_app is not None:
names = default_app.backend_name.lower().replace('(', ' ').strip(') ')
names = [name for name in names.split(' ') if name]
if backend_name and backend_name.lower() not in names:
raise RuntimeError('Can only select a backend once, already using '
'%s.' % names)
else:
if call_reuse:
default_app.reuse()
return default_app # Current backend matches backend_name
# Create default app
default_app = Application(backend_name)
return default_app
def create():
"""Create the native application.
"""
use_app(call_reuse=False)
return default_app.create()
def run():
"""Enter the native GUI event loop.
"""
use_app(call_reuse=False)
return default_app.run()
def quit():
"""Quit the native GUI event loop.
"""
use_app(call_reuse=False)
return default_app.quit()
def process_events():
"""Process all pending GUI events
If the mainloop is not running, this should be done regularly to
keep the visualization interactive and to keep the event system going.
"""
use_app(call_reuse=False)
return default_app.process_events()
| bsd-3-clause |
fishtown-analytics/dbt | test/integration/041_presto_test/test_simple_presto_view.py | 1 | 2230 | from test.integration.base import DBTIntegrationTest, FakeArgs, use_profile
import random
import time
class TestBasePrestoRun(DBTIntegrationTest):
@property
def schema(self):
return "presto_test_41"
@property
def models(self):
return "models"
@property
def project_config(self):
return {
'config-version': 2,
'data-paths': ['data'],
'macro-paths': ['macros'],
'seeds': {
'quote_columns': False,
},
}
@property
def profile_config(self):
return self.presto_profile()
def assert_nondupes_pass(self):
# The 'dupe' model should fail, but all others should pass
test_results = self.run_dbt(['test'], expect_pass=False)
for result in test_results:
if 'dupe' in result.node.name:
self.assertIsNone(result.error)
self.assertFalse(result.skipped)
self.assertTrue(result.status > 0)
# assert that actual tests pass
else:
self.assertIsNone(result.error)
self.assertFalse(result.skipped)
# status = # of failing rows
self.assertEqual(result.status, 0)
class TestSimplePrestoRun(TestBasePrestoRun):
def setUp(self):
super().setUp()
for conn in self.adapter.connections.in_use.values():
conn.transaction_open
@use_profile('presto')
def test__presto_simple_run(self):
# make sure seed works twice. Full-refresh is a no-op
self.run_dbt(['seed'])
self.run_dbt(['seed', '--full-refresh'])
results = self.run_dbt()
self.assertEqual(len(results), 2)
self.assert_nondupes_pass()
class TestUnderscorePrestoRun(TestBasePrestoRun):
prefix = "_test{}{:04}".format(int(time.time()), random.randint(0, 9999))
@use_profile('presto')
def test_presto_run_twice(self):
self.run_dbt(['seed'])
results = self.run_dbt()
self.assertEqual(len(results), 2)
self.assert_nondupes_pass()
results = self.run_dbt()
self.assertEqual(len(results), 2)
self.assert_nondupes_pass()
| apache-2.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/numpy-1.9.2/numpy/lib/arrayterator.py | 52 | 7282 | """
A buffered iterator for big arrays.
This module solves the problem of iterating over a big file-based array
without having to read it into memory. The `Arrayterator` class wraps
an array object, and when iterated it will return sub-arrays with at most
a user-specified number of elements.
"""
from __future__ import division, absolute_import, print_function
from operator import mul
from functools import reduce
from numpy.compat import long
__all__ = ['Arrayterator']
class Arrayterator(object):
"""
Buffered iterator for big arrays.
`Arrayterator` creates a buffered iterator for reading big arrays in small
contiguous blocks. The class is useful for objects stored in the
file system. It allows iteration over the object *without* reading
everything in memory; instead, small blocks are read and iterated over.
`Arrayterator` can be used with any object that supports multidimensional
slices. This includes NumPy arrays, but also variables from
Scientific.IO.NetCDF or pynetcdf for example.
Parameters
----------
var : array_like
The object to iterate over.
buf_size : int, optional
The buffer size. If `buf_size` is supplied, the maximum amount of
data that will be read into memory is `buf_size` elements.
Default is None, which will read as many element as possible
into memory.
Attributes
----------
var
buf_size
start
stop
step
shape
flat
See Also
--------
ndenumerate : Multidimensional array iterator.
flatiter : Flat array iterator.
memmap : Create a memory-map to an array stored in a binary file on disk.
Notes
-----
The algorithm works by first finding a "running dimension", along which
the blocks will be extracted. Given an array of dimensions
``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the
first dimension will be used. If, on the other hand,
``d1 < buf_size < d1*d2`` the second dimension will be used, and so on.
Blocks are extracted along this dimension, and when the last block is
returned the process continues from the next dimension, until all
elements have been read.
Examples
--------
>>> import numpy as np
>>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
>>> a_itor = np.lib.arrayterator.Arrayterator(a, 2)
>>> a_itor.shape
(3, 4, 5, 6)
Now we can iterate over ``a_itor``, and it will return arrays of size
two. Since `buf_size` was smaller than any dimension, the first
dimension will be iterated over first:
>>> for subarr in a_itor:
... if not subarr.all():
... print subarr, subarr.shape
...
[[[[0 1]]]] (1, 1, 1, 2)
"""
def __init__(self, var, buf_size=None):
self.var = var
self.buf_size = buf_size
self.start = [0 for dim in var.shape]
self.stop = [dim for dim in var.shape]
self.step = [1 for dim in var.shape]
def __getattr__(self, attr):
return getattr(self.var, attr)
def __getitem__(self, index):
"""
Return a new arrayterator.
"""
# Fix index, handling ellipsis and incomplete slices.
if not isinstance(index, tuple):
index = (index,)
fixed = []
length, dims = len(index), len(self.shape)
for slice_ in index:
if slice_ is Ellipsis:
fixed.extend([slice(None)] * (dims-length+1))
length = len(fixed)
elif isinstance(slice_, (int, long)):
fixed.append(slice(slice_, slice_+1, 1))
else:
fixed.append(slice_)
index = tuple(fixed)
if len(index) < dims:
index += (slice(None),) * (dims-len(index))
# Return a new arrayterator object.
out = self.__class__(self.var, self.buf_size)
for i, (start, stop, step, slice_) in enumerate(
zip(self.start, self.stop, self.step, index)):
out.start[i] = start + (slice_.start or 0)
out.step[i] = step * (slice_.step or 1)
out.stop[i] = start + (slice_.stop or stop-start)
out.stop[i] = min(stop, out.stop[i])
return out
def __array__(self):
"""
Return corresponding data.
"""
slice_ = tuple(slice(*t) for t in zip(
self.start, self.stop, self.step))
return self.var[slice_]
@property
def flat(self):
"""
A 1-D flat iterator for Arrayterator objects.
This iterator returns elements of the array to be iterated over in
`Arrayterator` one by one. It is similar to `flatiter`.
See Also
--------
`Arrayterator`
flatiter
Examples
--------
>>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
>>> a_itor = np.lib.arrayterator.Arrayterator(a, 2)
>>> for subarr in a_itor.flat:
... if not subarr:
... print subarr, type(subarr)
...
0 <type 'numpy.int32'>
"""
for block in self:
for value in block.flat:
yield value
@property
def shape(self):
"""
The shape of the array to be iterated over.
For an example, see `Arrayterator`.
"""
return tuple(((stop-start-1)//step+1) for start, stop, step in
zip(self.start, self.stop, self.step))
def __iter__(self):
# Skip arrays with degenerate dimensions
if [dim for dim in self.shape if dim <= 0]:
raise StopIteration
start = self.start[:]
stop = self.stop[:]
step = self.step[:]
ndims = len(self.var.shape)
while True:
count = self.buf_size or reduce(mul, self.shape)
# iterate over each dimension, looking for the
# running dimension (ie, the dimension along which
# the blocks will be built from)
rundim = 0
for i in range(ndims-1, -1, -1):
# if count is zero we ran out of elements to read
# along higher dimensions, so we read only a single position
if count == 0:
stop[i] = start[i]+1
elif count <= self.shape[i]:
# limit along this dimension
stop[i] = start[i] + count*step[i]
rundim = i
else:
# read everything along this dimension
stop[i] = self.stop[i]
stop[i] = min(self.stop[i], stop[i])
count = count//self.shape[i]
# yield a block
slice_ = tuple(slice(*t) for t in zip(start, stop, step))
yield self.var[slice_]
# Update start position, taking care of overflow to
# other dimensions
start[rundim] = stop[rundim] # start where we stopped
for i in range(ndims-1, 0, -1):
if start[i] >= self.stop[i]:
start[i] = self.start[i]
start[i-1] += self.step[i-1]
if start[0] >= self.stop[0]:
raise StopIteration
| mit |
nihlaeth/Nagios_check_slackpkg | check_slackpkg_nonpriv.py | 1 | 1673 | #!/usr/bin/env python
"""Nagios module for monitoring available updates via slackpkg."""
import subprocess
import sys
import os
# pylint: disable=invalid-name
# run check-updates to poll mirror for changes
result = []
try:
result = subprocess.check_output("myslackpkg check-updates", shell=True).split("\n")
except (OSError, subprocess.CalledProcessError) as error:
print "Failed to check for updates: %s" % error
sys.exit(3)
updates = "idk"
for line in result:
if "good news" in line:
updates = "no"
elif "News on" in line:
updates = "yes"
if updates == "idk":
print "Error parsing slackpkg check-updates status"
sys.exit(3)
elif updates == "yes":
# fetch updated package list
try:
_ = subprocess.check_output("myslackpkg update &> /dev/null", shell=True)
except (OSError, subprocess.CalledProcessError) as error:
print "Failed to update package list: %s" % error
sys.exit(3)
# Now the packages list is up to date, check if we need to upgrade anything
result = []
devnull = open(os.devnull, 'w')
try:
result = subprocess.check_output([
"myslackpkg",
"upgrade-all"], stderr=devnull).split("\n")
except (OSError, subprocess.CalledProcessError) as error:
print "Failed to check for upgrades: %s" % error
sys.exit(3)
packages = []
for line in result:
if ".txz" in line:
packages.append(line.strip())
if "update gpg" in line:
print "Error: need up-to-date gpg key!"
sys.exit(3)
if len(packages) == 0:
print "OK: everything up-to-date"
sys.exit(0)
else:
print "Updates available: " + " ".join(packages)
sys.exit(2)
| gpl-3.0 |
vshtanko/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
haphaeu/yoshimi | sql/data_analysis/database.py | 1 | 3122 | from os import path
from sqlalchemy import (create_engine,
Column,
String,
Integer,
Boolean,
Table,
ForeignKey)
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
database_filename = 'twitter.sqlite3'
directory = path.abspath(path.dirname(__file__))
database_filepath = path.join(directory, database_filename)
engine_url = 'sqlite:///{}'.format(database_filepath)
engine = create_engine(engine_url)
# Our database class objects are going to inherit from
# this class
Base = declarative_base(bind=engine)
# create a configured “Session” class
Session = sessionmaker(bind=engine, autoflush=False)
# Create a Session
session = Session()
hashtag_tweet = Table('hashtag_tweet', Base.metadata,
Column('hashtag_id', Integer, ForeignKey('hashtags.id'), nullable=False),
Column('tweet_id', Integer, ForeignKey('tweets.id'), nullable=False))
class Tweet(Base):
__tablename__ = 'tweets'
id = Column(Integer, primary_key=True)
tid = Column(String(100), nullable=False)
tweet = Column(String(300), nullable=False)
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
coordinates = Column(String(50), nullable=True)
user = relationship('User', backref='tweets')
created_at = Column(String(100), nullable=False)
favorite_count = Column(Integer)
in_reply_to_screen_name = Column(String)
in_reply_to_status_id = Column(Integer)
in_reply_to_user_id = Column(Integer)
lang = Column(String)
quoted_status_id = Column(Integer)
retweet_count = Column(Integer)
source = Column(String)
is_retweet = Column(Boolean)
hashtags = relationship('Hashtag',
secondary='hashtag_tweet',
back_populates='tweets')
def __repr__(self):
return '<Tweet {}>'.format(self.id)
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
uid = Column(String(50), nullable=False)
name = Column(String(100), nullable=False)
screen_name = Column(String)
created_at = Column(String)
# Nullable
description = Column(String)
followers_count = Column(Integer)
friends_count = Column(Integer)
statuses_count = Column(Integer)
favourites_count = Column(Integer)
listed_count = Column(Integer)
geo_enabled = Column(Boolean)
lang = Column(String)
def __repr__(self):
return '<User {}>'.format(self.id)
class Hashtag(Base):
__tablename__ = 'hashtags'
id = Column(Integer, primary_key=True)
text = Column(String(200), nullable=False)
tweets = relationship('Tweet',
secondary='hashtag_tweet',
back_populates='hashtags')
def __repr__(self):
return '<Hashtag {}>'.format(self.text)
def init_db():
Base.metadata.create_all()
if not path.isfile(database_filepath):
init_db()
| lgpl-3.0 |
cherusk/ansible | lib/ansible/executor/playbook_executor.py | 57 | 12811 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.module_utils._text import to_native, to_text
from ansible.playbook import Playbook
from ansible.template import Templar
from ansible.utils.helpers import pct_to_int
from ansible.utils.path import makedirs_safe
from ansible.utils.ssh_functions import check_for_controlpersist
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class PlaybookExecutor:
'''
This is the primary class for executing playbooks, and thus the
basis for bin/ansible-playbook operation.
'''
def __init__(self, playbooks, inventory, variable_manager, loader, options, passwords):
self._playbooks = playbooks
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._options = options
self.passwords = passwords
self._unreachable_hosts = dict()
if options.listhosts or options.listtasks or options.listtags or options.syntax:
self._tqm = None
else:
self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords)
# Note: We run this here to cache whether the default ansible ssh
# executable supports control persist. Sometime in the future we may
# need to enhance this to check that ansible_ssh_executable specified
# in inventory is also cached. We can't do this caching at the point
# where it is used (in task_executor) because that is post-fork and
# therefore would be discarded after every task.
check_for_controlpersist(C.ANSIBLE_SSH_EXECUTABLE)
def run(self):
'''
Run the given playbook, based on the settings in the play which
may limit the runs to serialized groups, etc.
'''
result = 0
entrylist = []
entry = {}
try:
for playbook_path in self._playbooks:
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))
if self._tqm is None: # we are doing a listing
entry = {'playbook': playbook_path}
entry['plays'] = []
else:
# make sure the tqm has callbacks loaded
self._tqm.load_callbacks()
self._tqm.send_callback('v2_playbook_on_start', pb)
i = 1
plays = pb.get_plays()
display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path)))
for play in plays:
if play._included_path is not None:
self._loader.set_basedir(play._included_path)
else:
self._loader.set_basedir(pb._basedir)
# clear any filters which may have been applied to the inventory
self._inventory.remove_restriction()
if play.vars_prompt:
for var in play.vars_prompt:
vname = var['name']
prompt = var.get("prompt", vname)
default = var.get("default", None)
private = var.get("private", True)
confirm = var.get("confirm", False)
encrypt = var.get("encrypt", None)
salt_size = var.get("salt_size", None)
salt = var.get("salt", None)
if vname not in self._variable_manager.extra_vars:
if self._tqm:
self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default)
play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default)
else: # we are either in --list-<option> or syntax check
play.vars[vname] = default
# Create a temporary copy of the play here, so we can run post_validate
# on it without the templating changes affecting the original object.
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
if self._options.syntax:
continue
if self._tqm is None:
# we are just doing a listing
entry['plays'].append(new_play)
else:
self._tqm._unreachable_hosts.update(self._unreachable_hosts)
previously_failed = len(self._tqm._failed_hosts)
previously_unreachable = len(self._tqm._unreachable_hosts)
break_play = False
# we are actually running plays
batches = self._get_serialized_batches(new_play)
if len(batches) == 0:
self._tqm.send_callback('v2_playbook_on_play_start', new_play)
self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
for batch in batches:
# restrict the inventory to the hosts in the serialized batch
self._inventory.restrict_to_hosts(batch)
# and run it...
result = self._tqm.run(play=play)
# break the play if the result equals the special return code
if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0:
result = self._tqm.RUN_FAILED_HOSTS
break_play = True
# check the number of failures here, to see if they're above the maximum
# failure percentage allowed, or if any errors are fatal. If either of those
# conditions are met, we break out, otherwise we only break out if the entire
# batch failed
failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \
(previously_failed + previously_unreachable)
if len(batch) == failed_hosts_count:
break_play = True
break
# update the previous counts so they don't accumulate incorrectly
# over multiple serial batches
previously_failed += len(self._tqm._failed_hosts) - previously_failed
previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable
# save the unreachable hosts from this batch
self._unreachable_hosts.update(self._tqm._unreachable_hosts)
if break_play:
break
i = i + 1 # per play
if entry:
entrylist.append(entry) # per playbook
# send the stats callback for this playbook
if self._tqm is not None:
if C.RETRY_FILES_ENABLED:
retries = set(self._tqm._failed_hosts.keys())
retries.update(self._tqm._unreachable_hosts.keys())
retries = sorted(retries)
if len(retries) > 0:
if C.RETRY_FILES_SAVE_PATH:
basedir = C.shell_expand(C.RETRY_FILES_SAVE_PATH)
elif playbook_path:
basedir = os.path.dirname(os.path.abspath(playbook_path))
else:
basedir = '~/'
(retry_name, _) = os.path.splitext(os.path.basename(playbook_path))
filename = os.path.join(basedir, "%s.retry" % retry_name)
if self._generate_retry_inventory(filename, retries):
display.display("\tto retry, use: --limit @%s\n" % filename)
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
# if the last result wasn't zero, break out of the playbook file name loop
if result != 0:
break
if entrylist:
return entrylist
finally:
if self._tqm is not None:
self._tqm.cleanup()
if self._loader:
self._loader.cleanup_all_tmp_files()
if self._options.syntax:
display.display("No issues encountered")
return result
return result
def _get_serialized_batches(self, play):
'''
Returns a list of hosts, subdivided into batches based on
the serial size specified in the play.
'''
# make sure we have a unique list of hosts
all_hosts = self._inventory.get_hosts(play.hosts)
all_hosts_len = len(all_hosts)
# the serial value can be listed as a scalar or a list of
# scalars, so we make sure it's a list here
serial_batch_list = play.serial
if len(serial_batch_list) == 0:
serial_batch_list = [-1]
cur_item = 0
serialized_batches = []
while len(all_hosts) > 0:
# get the serial value from current item in the list
serial = pct_to_int(serial_batch_list[cur_item], all_hosts_len)
# if the serial count was not specified or is invalid, default to
# a list of all hosts, otherwise grab a chunk of the hosts equal
# to the current serial item size
if serial <= 0:
serialized_batches.append(all_hosts)
break
else:
play_hosts = []
for x in range(serial):
if len(all_hosts) > 0:
play_hosts.append(all_hosts.pop(0))
serialized_batches.append(play_hosts)
# increment the current batch list item number, and if we've hit
# the end keep using the last element until we've consumed all of
# the hosts in the inventory
cur_item += 1
if cur_item > len(serial_batch_list) - 1:
cur_item = len(serial_batch_list) - 1
return serialized_batches
def _generate_retry_inventory(self, retry_path, replay_hosts):
'''
Called when a playbook run fails. It generates an inventory which allows
re-running on ONLY the failed hosts. This may duplicate some variable
information in group_vars/host_vars but that is ok, and expected.
'''
try:
makedirs_safe(os.path.dirname(retry_path))
with open(retry_path, 'w') as fd:
for x in replay_hosts:
fd.write("%s\n" % x)
except Exception as e:
display.warning("Could not create retry file '%s'.\n\t%s" % (retry_path, to_native(e)))
return False
return True
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.