repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
felipeacsi/python-acoustics | tests/standards/test_iec_61672_1_2013.py | 2 | 2692 | import pytest
import numpy as np
from acoustics.standards.iec_61672_1_2013 import *
from scipy.signal import freqresp
def signal_fs():
fs = 4000.0
f = 400.0
duration = 3.0
samples = int(duration * fs)
t = np.arange(samples) / fs
x = np.sin(2.0 * np.pi * f * t)
return x, fs
def test_fast_level():
"""Test whether integration with time-constant FAST gives the correct level.
Note that the reference sound pressure is used.
In this test the amplitude of the sine is 1, which means the mean squared $MS$ is 0.5
With a reference pressure $p_r$ of 2.0e-5 the level should be 91 decibel
.. math:: L = 10 \cdot \\log_{10}{\\left(\\frac{MS}{p_r^2} \\right)}
.. math:: L = 10 \cdot \\log_{10}{\\left(\\frac{0.5}{(2e-5)^2} \\right)} = 91
"""
x, fs = signal_fs()
times, levels = fast_level(x, fs)
assert (abs(levels.mean() - 91) < 0.05)
x *= 4.0
times, levels = fast_level(x, fs)
assert (abs(levels.mean() - 103) < 0.05)
def test_slow_level():
"""Test whether integration with time-constant SLOW gives the correct level.
"""
x, fs = signal_fs()
times, levels = fast_level(x, fs)
assert (abs(levels.mean() - 91) < 0.05)
x *= 4.0
times, levels = fast_level(x, fs)
assert (abs(levels.mean() - 103) < 0.05)
def test_time_weighted_sound_level():
x, fs = signal_fs()
fast = 0.125
times, levels = time_weighted_sound_level(x, fs, fast)
assert (abs(levels.mean() - 91) < 0.05)
x *= 4.0
times, levels = time_weighted_sound_level(x, fs, fast)
assert (abs(levels.mean() - 103) < 0.05)
def test_time_averaged_sound_level():
x, fs = signal_fs()
fast = 0.125
times, levels = time_averaged_sound_level(x, fs, fast)
assert (abs(levels.mean() - 91) < 0.05)
x *= 4.0
times, levels = time_averaged_sound_level(x, fs, fast)
assert (abs(levels.mean() - 103) < 0.05)
class TestWeighting():
@pytest.fixture(params=['A', 'C', 'Z'])
def weighting(self, request):
return request.param
def test_weighting_functions(self, weighting):
frequencies = NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES
values = WEIGHTING_VALUES[weighting]
function_values = WEIGHTING_FUNCTIONS[weighting](frequencies)
assert (np.abs(values - function_values).max() < 0.3)
def test_weighting_systems(self, weighting):
frequencies = NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES
values = WEIGHTING_VALUES[weighting]
w, H = freqresp((WEIGHTING_SYSTEMS[weighting]()), w=2.0 * np.pi * frequencies)
results = 20.0 * np.log10(np.abs(H))
assert (np.abs(values - results).max() < 0.3)
| bsd-3-clause |
wzbozon/scikit-learn | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
matthaywardwebdesign/rethinkdb | external/v8_3.30.33.16/tools/testrunner/local/commands.py | 65 | 5069 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import signal
import subprocess
import sys
import tempfile
import time
from ..local import utils
from ..objects import output
def KillProcessWithID(pid):
if utils.IsWindows():
os.popen('taskkill /T /F /PID %d' % pid)
else:
os.kill(pid, signal.SIGTERM)
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.0001
SLEEP_TIME_FACTOR = 1.25
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = \
ctypes.windll.kernel32.SetErrorMode(mode) #@UndefinedVariable
except ImportError:
pass
return prev_error_mode
def RunProcess(verbose, timeout, args, **rest):
if verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE
if utils.IsWindows():
popen_args = subprocess.list2cmdline(args)
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX
prev_error_mode = Win32SetErrorMode(error_mode)
Win32SetErrorMode(error_mode | prev_error_mode)
process = subprocess.Popen(
shell=utils.IsWindows(),
args=popen_args,
**rest
)
if (utils.IsWindows() and prev_error_mode != SEM_INVALID_VALUE):
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (exit_code, timed_out)
def PrintError(string):
sys.stderr.write(string)
sys.stderr.write("\n")
def CheckedUnlink(name):
# On Windows, when run with -jN in parallel processes,
# OS often fails to unlink the temp file. Not sure why.
# Need to retry.
# Idea from https://bugs.webkit.org/attachment.cgi?id=75982&action=prettypatch
retry_count = 0
while retry_count < 30:
try:
os.unlink(name)
return
except OSError, e:
retry_count += 1
time.sleep(retry_count * 0.1)
PrintError("os.unlink() " + str(e))
def Execute(args, verbose=False, timeout=None):
try:
args = [ c for c in args if c != "" ]
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
(exit_code, timed_out) = RunProcess(
verbose,
timeout,
args=args,
stdout=fd_out,
stderr=fd_err
)
finally:
# TODO(machenbach): A keyboard interrupt before the assignment to
# fd_out|err can lead to reference errors here.
os.close(fd_out)
os.close(fd_err)
out = file(outname).read()
errors = file(errname).read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return output.Output(exit_code, timed_out, out, errors)
| agpl-3.0 |
udayinfy/openerp-7.0 | gap_analysis_project_long_term/gap_analysis_project_long_term.py | 4 | 10840 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010-2013 Elico Corp. All Rights Reserved.
# Author: Yannick Gouin <yannick.gouin@elico-corp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
import time
from osv import fields, osv
from tools.translate import _
from tools import ustr
#import tools
class gap_analysis(osv.Model):
_inherit = "gap_analysis"
_name = "gap_analysis"
def generate_project(self, cr, uid, ids, context=None):
project_pool = self.pool.get('project.project')
phase_pool = self.pool.get('project.phase')
gapline_pool = self.pool.get('gap_analysis.line')
task_pool = self.pool.get('project.task')
uom_hour = self.pool.get('product.uom').search(cr, uid, [('name', '=', _('Hour(s)'))], context=context)[0]
for gap in self.browse(cr, uid, ids, context=context):
partner_id = gap.partner_id and gap.partner_id.id or False
notes = gap.note or ''
notes = gap.note or ''
project_vals = {
'name': gap.name,
'description': notes,
'user_id': gap.user_id.id,
'partner_id': partner_id,
'gap_analysis_id': gap.id,
}
project_id = project_pool.create(cr, uid, project_vals, context=context)
phases = {}
for gap_line in gap.gap_lines:
if gap_line.to_project and gap_line.keep:
time4dev = 0
time4tech = 0
time4fct = 0
time4test = gap_line.testing or 0
if gap_line.effort:
if gap_line.effort.unknown:
time4dev = gap_line.duration_wk
else:
time4dev = gap_line.effort.duration
for workload in gap_line.workloads:
if workload.type.category == "Technical Analysis":
time4tech += workload.duration
else:
time4fct += workload.duration
#CREATE PROJECT PHASES
phase = gap_line.phase or '0'
phase = phase.upper()
if phase not in phases:
gapline_ids = gapline_pool.search(cr, uid, [('gap_id', '=', gap.id),('phase', 'ilike', phase),('keep', '=', True),('to_project', '=', True)])
duration_hour = 0
if gapline_ids:
for l in gapline_pool.browse(cr, uid, gapline_ids):
duration_hour += l.total_time
phase_vals = {
'name': gap.name + " - " + phase,
'project_id': project_id,
'duration': duration_hour,
'product_uom': uom_hour,
'previous_phase_ids': [],#TODO
'next_phase_ids': [],#TODO
}
phases[phase] = phase_pool.create(cr, uid, phase_vals, context=context)
# Create Tasks
if time4dev > 0 or time4tech > 0 or time4fct > 0 or time4test > 0:
maintask_vals = {
'name': gap_line.functionality.name[0:100],
'code_gap': gap_line.code or "",
'project_id': project_id,
'notes': ustr(gap_line.functionality.description or gap_line.functionality.name),
'partner_id': partner_id,
'phase_id': phases[phase],
'gap_category_id': gap_line.category and gap_line.category.id or False,
'user_id': gap.user_functional and gap.user_functional.id or False,
'gap_line_id': gap_line.id,
'to_report': True,
'org_planned_hours': 0,
'planned_hours': 0,
'remaining_hours': 0,
}
maintask_id = task_pool.create(cr, uid, maintask_vals, context=context)
maintask_id = [int(maintask_id)]
if time4test > 0:
task_vals4test = {
'name': gap_line.functionality.name[0:100] + " [TEST]",
'code_gap': gap_line.code or "",
'project_id': project_id,
'notes': ustr(gap_line.functionality.description or gap_line.functionality.name),
'partner_id': partner_id,
'org_planned_hours': time4test,
'planned_hours': time4test,
'remaining_hours': time4test,
'phase_id': phases[phase],
'parent_ids': [(6,0,maintask_id)],
'gap_category_id': gap_line.category and gap_line.category.id or False,
'user_id': gap.user_test and gap.user_test.id or False,
'gap_line_id': gap_line.id,
}
task_pool.create(cr, uid, task_vals4test, context=context)
if time4dev > 0:
task_vals4dev = {
'name': gap_line.functionality.name[0:100] + " [DEV]",
'code_gap': gap_line.code or "",
'project_id': project_id,
'notes': ustr(gap_line.functionality.description or gap_line.functionality.name),
'partner_id': partner_id,
'org_planned_hours': time4dev,
'planned_hours': time4dev,
'remaining_hours': time4dev,
'phase_id': phases[phase],
'parent_ids': [(6,0,maintask_id)],
'gap_category_id': gap_line.category and gap_line.category.id or False,
'user_id': gap.user_dev and gap.user_dev.id or False,
'gap_line_id': gap_line.id,
}
task_pool.create(cr, uid, task_vals4dev, context=context)
if time4tech > 0:
task_vals4tech = {
'name': gap_line.functionality.name[0:100] + " [TECH]",
'code_gap': gap_line.code or "",
'project_id': project_id,
'notes': ustr(gap_line.functionality.description or gap_line.functionality.name),
'partner_id': partner_id,
'org_planned_hours': time4tech,
'planned_hours': time4tech,
'remaining_hours': time4tech,
'phase_id': phases[phase],
'parent_ids': [(6,0,maintask_id)],
'gap_category_id': gap_line.category and gap_line.category.id or False,
'user_id': gap.user_technical and gap.user_technical.id or False,
'gap_line_id': gap_line.id,
}
task_pool.create(cr, uid, task_vals4tech, context=context)
if time4fct > 0:
task_vals4fct = {
'name': gap_line.functionality.name[0:100] + " [FUNC]",
'code_gap': gap_line.code or "",
'project_id': project_id,
'notes': ustr(gap_line.functionality.description or gap_line.functionality.name),
'partner_id': partner_id,
'org_planned_hours': time4fct,
'planned_hours': time4fct,
'remaining_hours': time4fct,
'phase_id': phases[phase],
'parent_ids': [(6,0,maintask_id)],
'gap_category_id': gap_line.category and gap_line.category.id or False,
'user_id': gap.user_functional and gap.user_functional.id or False,
'gap_line_id': gap_line.id,
}
task_pool.create(cr, uid, task_vals4fct, context=context)
if project_id:
return {
'type': 'ir.actions.act_window',
'name':"Generated Project",
'view_mode': 'form',
'view_id': False,
'view_type': 'form',
'res_model': 'project.project',
'res_id': project_id,
'context': context
}
return True
class gap_analysis_line(osv.Model):
_inherit = "gap_analysis.line"
_name = "gap_analysis.line"
_columns = {
'phase': fields.char('Phase', size=4, help='Specify the Phase where the functionality will be done.', required=True),
}
_defaults = {
'phase': 1,
}
_order = 'phase asc, critical desc, effort asc'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
2ndQuadrant/ansible | lib/ansible/modules/system/facter.py | 125 | 1321 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: facter
short_description: Runs the discovery program I(facter) on the remote system
description:
- Runs the I(facter) discovery program
(U(https://github.com/puppetlabs/facter)) on the remote system, returning
JSON data that can be useful for inventory purposes.
version_added: "0.2"
requirements:
- facter
- ruby-json
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''
# Example command-line invocation
ansible www.example.net -m facter
'''
import json
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict()
)
facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
cmd = [facter_path, "--json"]
rc, out, err = module.run_command(cmd, check_rc=True)
module.exit_json(**json.loads(out))
if __name__ == '__main__':
main()
| gpl-3.0 |
yask123/django | tests/test_client_regress/views.py | 143 | 5161 | import json
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.test import Client
from django.test.client import CONTENT_TYPE_RE
from django.test.utils import setup_test_environment
from django.utils.six.moves.urllib.parse import urlencode
class CustomTestException(Exception):
pass
def no_template_view(request):
"A simple view that expects a GET request, and returns a rendered template"
return HttpResponse("No template used. Sample content: twice once twice. Content ends.")
def staff_only_view(request):
"A view that can only be visited by staff. Non staff members get an exception"
if request.user.is_staff:
return HttpResponse('')
else:
raise CustomTestException()
def get_view(request):
"A simple login protected view"
return HttpResponse("Hello world")
get_view = login_required(get_view)
def request_data(request, template='base.html', data='sausage'):
"A simple view that returns the request data in the context"
return render_to_response(template, {
'get-foo': request.GET.get('foo'),
'get-bar': request.GET.get('bar'),
'post-foo': request.POST.get('foo'),
'post-bar': request.POST.get('bar'),
'data': data,
})
def view_with_argument(request, name):
"""A view that takes a string argument
The purpose of this view is to check that if a space is provided in
the argument, the test framework unescapes the %20 before passing
the value to the view.
"""
if name == 'Arthur Dent':
return HttpResponse('Hi, Arthur')
else:
return HttpResponse('Howdy, %s' % name)
def nested_view(request):
"""
A view that uses test client to call another view.
"""
setup_test_environment()
c = Client()
c.get("/no_template_view/")
return render_to_response('base.html', {'nested': 'yes'})
def login_protected_redirect_view(request):
"A view that redirects all requests to the GET view"
return HttpResponseRedirect('/get_view/')
login_protected_redirect_view = login_required(login_protected_redirect_view)
def redirect_to_self_with_changing_query_view(request):
query = request.GET.copy()
query['counter'] += '0'
return HttpResponseRedirect('/redirect_to_self_with_changing_query_view/?%s' % urlencode(query))
def set_session_view(request):
"A view that sets a session variable"
request.session['session_var'] = 'YES'
return HttpResponse('set_session')
def check_session_view(request):
"A view that reads a session variable"
return HttpResponse(request.session.get('session_var', 'NO'))
def request_methods_view(request):
"A view that responds with the request method"
return HttpResponse('request method: %s' % request.method)
def return_unicode(request):
return render_to_response('unicode.html')
def return_undecodable_binary(request):
return HttpResponse(
b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e ReportLab Generated PDF document http://www.reportlab.com'
)
def return_json_response(request):
return JsonResponse({'key': 'value'})
def return_json_file(request):
"A view that parses and returns a JSON string as a file."
match = CONTENT_TYPE_RE.match(request.META['CONTENT_TYPE'])
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
# This just checks that the uploaded data is JSON
obj_dict = json.loads(request.body.decode(charset))
obj_json = json.dumps(obj_dict, cls=DjangoJSONEncoder, ensure_ascii=False)
response = HttpResponse(obj_json.encode(charset), status=200,
content_type='application/json; charset=%s' % charset)
response['Content-Disposition'] = 'attachment; filename=testfile.json'
return response
def check_headers(request):
"A view that responds with value of the X-ARG-CHECK header"
return HttpResponse('HTTP_X_ARG_CHECK: %s' % request.META.get('HTTP_X_ARG_CHECK', 'Undefined'))
def body(request):
"A view that is requested with GET and accesses request.body. Refs #14753."
return HttpResponse(request.body)
def read_all(request):
"A view that is requested with accesses request.read()."
return HttpResponse(request.read())
def read_buffer(request):
"A view that is requested with accesses request.read(LARGE_BUFFER)."
return HttpResponse(request.read(99999))
def request_context_view(request):
# Special attribute that won't be present on a plain HttpRequest
request.special_path = request.path
return render_to_response('request_context.html', context_instance=RequestContext(request, {}))
def render_template_multiple_times(request):
"""A view that renders a template multiple times."""
return HttpResponse(
render_to_string('base.html') + render_to_string('base.html'))
| bsd-3-clause |
amite/ghostblog | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/formatters/latex.py | 96 | 13931 | # -*- coding: utf-8 -*-
"""
pygments.formatters.latex
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for LaTeX fancyvrb output.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.token import Token, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, StringIO
__all__ = ['LatexFormatter']
def escape_tex(text, commandprefix):
return text.replace('\\', '\x00'). \
replace('{', '\x01'). \
replace('}', '\x02'). \
replace('\x00', r'\%sZbs{}' % commandprefix). \
replace('\x01', r'\%sZob{}' % commandprefix). \
replace('\x02', r'\%sZcb{}' % commandprefix). \
replace('^', r'\%sZca{}' % commandprefix). \
replace('_', r'\%sZus{}' % commandprefix). \
replace('&', r'\%sZam{}' % commandprefix). \
replace('<', r'\%sZlt{}' % commandprefix). \
replace('>', r'\%sZgt{}' % commandprefix). \
replace('#', r'\%sZsh{}' % commandprefix). \
replace('%', r'\%sZpc{}' % commandprefix). \
replace('$', r'\%sZdl{}' % commandprefix). \
replace('-', r'\%sZhy{}' % commandprefix). \
replace("'", r'\%sZsq{}' % commandprefix). \
replace('"', r'\%sZdq{}' % commandprefix). \
replace('~', r'\%sZti{}' % commandprefix)
DOC_TEMPLATE = r'''
\documentclass{%(docclass)s}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[%(encoding)s]{inputenc}
%(preamble)s
%(styledefs)s
\begin{document}
\section*{%(title)s}
%(code)s
\end{document}
'''
## Small explanation of the mess below :)
#
# The previous version of the LaTeX formatter just assigned a command to
# each token type defined in the current style. That obviously is
# problematic if the highlighted code is produced for a different style
# than the style commands themselves.
#
# This version works much like the HTML formatter which assigns multiple
# CSS classes to each <span> tag, from the most specific to the least
# specific token type, thus falling back to the parent token type if one
# is not defined. Here, the classes are there too and use the same short
# forms given in token.STANDARD_TYPES.
#
# Highlighted code now only uses one custom command, which by default is
# \PY and selectable by the commandprefix option (and in addition the
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
# backwards compatibility purposes).
#
# \PY has two arguments: the classes, separated by +, and the text to
# render in that style. The classes are resolved into the respective
# style commands by magic, which serves to ignore unknown classes.
#
# The magic macros are:
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
# to render in \PY@do. Their definition determines the style.
# * \PY@reset resets \PY@it etc. to do nothing.
# * \PY@toks parses the list of classes, using magic inspired by the
# keyval package (but modified to use plusses instead of commas
# because fancyvrb redefines commas inside its environments).
# * \PY@tok processes one class, calling the \PY@tok@classname command
# if it exists.
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
# for its class.
# * \PY resets the style, parses the classnames and then calls \PY@do.
#
# Tip: to read this code, print it out in substituted form using e.g.
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
STYLE_TEMPLATE = r'''
\makeatletter
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
%(styles)s
\def\%(cp)sZbs{\char`\\}
\def\%(cp)sZus{\char`\_}
\def\%(cp)sZob{\char`\{}
\def\%(cp)sZcb{\char`\}}
\def\%(cp)sZca{\char`\^}
\def\%(cp)sZam{\char`\&}
\def\%(cp)sZlt{\char`\<}
\def\%(cp)sZgt{\char`\>}
\def\%(cp)sZsh{\char`\#}
\def\%(cp)sZpc{\char`\%%}
\def\%(cp)sZdl{\char`\$}
\def\%(cp)sZhy{\char`\-}
\def\%(cp)sZsq{\char`\'}
\def\%(cp)sZdq{\char`\"}
\def\%(cp)sZti{\char`\~}
%% for compatibility with earlier versions
\def\%(cp)sZat{@}
\def\%(cp)sZlb{[}
\def\%(cp)sZrb{]}
\makeatother
'''
def _get_ttype_name(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
class LatexFormatter(Formatter):
r"""
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
standard packages.
Without the `full` option, code is formatted as one ``Verbatim``
environment, like this:
.. sourcecode:: latex
\begin{Verbatim}[commandchars=\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
With the `full` option, a complete LaTeX document is output, including
the command definitions in the preamble.
The `get_style_defs()` method of a `LatexFormatter` returns a string
containing ``\def`` commands defining the macros needed inside the
``Verbatim`` environments.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`docclass`
If the `full` option is enabled, this is the document class to use
(default: ``'article'``).
`preamble`
If the `full` option is enabled, this can be further preamble commands,
e.g. ``\usepackage`` (default: ``''``).
`linenos`
If set to ``True``, output line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`verboptions`
Additional options given to the Verbatim environment (see the *fancyvrb*
docs for possible values) (default: ``''``).
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
*New in Pygments 0.7.*
*New in Pygments 0.10:* the default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
``False``). *New in Pygments 1.2.*
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
``False``). *New in Pygments 1.2.*
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
filenames = ['*.tex']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.verboptions = options.get('verboptions', '')
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
self._create_stylesheet()
def _create_stylesheet(self):
t2n = self.ttype2name = {Token: ''}
c2d = self.cmd2def = {}
cp = self.commandprefix
def rgbcolor(col):
if col:
return ','.join(['%.2f' %(int(col[i] + col[i + 1], 16) / 255.0)
for i in (0, 2, 4)])
else:
return '1,1,1'
for ttype, ndef in self.style:
name = _get_ttype_name(ttype)
cmndef = ''
if ndef['bold']:
cmndef += r'\let\$$@bf=\textbf'
if ndef['italic']:
cmndef += r'\let\$$@it=\textit'
if ndef['underline']:
cmndef += r'\let\$$@ul=\underline'
if ndef['roman']:
cmndef += r'\let\$$@ff=\textrm'
if ndef['sans']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['mono']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['color']:
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
rgbcolor(ndef['color']))
if ndef['border']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}' %
(rgbcolor(ndef['border']),
rgbcolor(ndef['bgcolor'])))
elif ndef['bgcolor']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\colorbox[rgb]{%s}{\strut ##1}}' %
rgbcolor(ndef['bgcolor']))
if cmndef == '':
continue
cmndef = cmndef.replace('$$', cp)
t2n[ttype] = name
c2d[name] = cmndef
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in self.cmd2def.items():
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
def format_unencoded(self, tokensource, outfile):
# TODO: add support for background colors
t2n = self.ttype2name
cp = self.commandprefix
if self.full:
realoutfile = outfile
outfile = StringIO()
outfile.write(r'\begin{Verbatim}[commandchars=\\\{\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(',numbers=left' +
(start and ',firstnumber=%d' % start or '') +
(step and ',stepnumber=%d' % step or ''))
if self.mathescape or self.texcomments:
outfile.write(r',codes={\catcode`\$=3\catcode`\^=7\catcode`\_=8}')
if self.verboptions:
outfile.write(',' + self.verboptions)
outfile.write(']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
for i in range(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
value = value[len(start):]
start = escape_tex(start, self.commandprefix)
# ... but do not escape inside comment.
value = start + value
elif self.mathescape:
# Only escape parts not inside a math environment.
parts = value.split('$')
in_math = False
for i, part in enumerate(parts):
if not in_math:
parts[i] = escape_tex(part, self.commandprefix)
in_math = not in_math
value = '$'.join(parts)
else:
value = escape_tex(value, self.commandprefix)
else:
value = escape_tex(value, self.commandprefix)
styles = []
while ttype is not Token:
try:
styles.append(t2n[ttype])
except KeyError:
# not in current style
styles.append(_get_ttype_name(ttype))
ttype = ttype.parent
styleval = '+'.join(reversed(styles))
if styleval:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
outfile.write('\n')
if spl[-1]:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
else:
outfile.write(value)
outfile.write('\\end{Verbatim}\n')
if self.full:
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
encoding = self.encoding or 'latin1',
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
| mit |
zverevalexei/trex-http-proxy | trex_client/external_libs/pyzmq-14.5.0/python3/cel59/64bit/zmq/tests/test_z85.py | 43 | 2232 | # -*- coding: utf8 -*-
"""Test Z85 encoding
confirm values and roundtrip with test values from the reference implementation.
"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from unittest import TestCase
from zmq.utils import z85
class TestZ85(TestCase):
def test_client_public(self):
client_public = \
b"\xBB\x88\x47\x1D\x65\xE2\x65\x9B" \
b"\x30\xC5\x5A\x53\x21\xCE\xBB\x5A" \
b"\xAB\x2B\x70\xA3\x98\x64\x5C\x26" \
b"\xDC\xA2\xB2\xFC\xB4\x3F\xC5\x18"
encoded = z85.encode(client_public)
self.assertEqual(encoded, b"Yne@$w-vo<fVvi]a<NY6T1ed:M$fCG*[IaLV{hID")
decoded = z85.decode(encoded)
self.assertEqual(decoded, client_public)
def test_client_secret(self):
client_secret = \
b"\x7B\xB8\x64\xB4\x89\xAF\xA3\x67" \
b"\x1F\xBE\x69\x10\x1F\x94\xB3\x89" \
b"\x72\xF2\x48\x16\xDF\xB0\x1B\x51" \
b"\x65\x6B\x3F\xEC\x8D\xFD\x08\x88"
encoded = z85.encode(client_secret)
self.assertEqual(encoded, b"D:)Q[IlAW!ahhC2ac:9*A}h:p?([4%wOTJ%JR%cs")
decoded = z85.decode(encoded)
self.assertEqual(decoded, client_secret)
def test_server_public(self):
server_public = \
b"\x54\xFC\xBA\x24\xE9\x32\x49\x96" \
b"\x93\x16\xFB\x61\x7C\x87\x2B\xB0" \
b"\xC1\xD1\xFF\x14\x80\x04\x27\xC5" \
b"\x94\xCB\xFA\xCF\x1B\xC2\xD6\x52"
encoded = z85.encode(server_public)
self.assertEqual(encoded, b"rq:rM>}U?@Lns47E1%kR.o@n%FcmmsL/@{H8]yf7")
decoded = z85.decode(encoded)
self.assertEqual(decoded, server_public)
def test_server_secret(self):
server_secret = \
b"\x8E\x0B\xDD\x69\x76\x28\xB9\x1D" \
b"\x8F\x24\x55\x87\xEE\x95\xC5\xB0" \
b"\x4D\x48\x96\x3F\x79\x25\x98\x77" \
b"\xB4\x9C\xD9\x06\x3A\xEA\xD3\xB7"
encoded = z85.encode(server_secret)
self.assertEqual(encoded, b"JTKVSB%%)wK0E.X)V>+}o?pNmC{O&4W4b!Ni{Lh6")
decoded = z85.decode(encoded)
self.assertEqual(decoded, server_secret)
| mit |
liuyi1112/flask | tests/test_blueprints.py | 143 | 18147 | # -*- coding: utf-8 -*-
"""
tests.blueprints
~~~~~~~~~~~~~~~~
Blueprints (and currently modules)
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import flask
from flask._compat import text_type
from werkzeug.http import parse_cache_control_header
from jinja2 import TemplateNotFound
def test_blueprint_specific_error_handling():
frontend = flask.Blueprint('frontend', __name__)
backend = flask.Blueprint('backend', __name__)
sideend = flask.Blueprint('sideend', __name__)
@frontend.errorhandler(403)
def frontend_forbidden(e):
return 'frontend says no', 403
@frontend.route('/frontend-no')
def frontend_no():
flask.abort(403)
@backend.errorhandler(403)
def backend_forbidden(e):
return 'backend says no', 403
@backend.route('/backend-no')
def backend_no():
flask.abort(403)
@sideend.route('/what-is-a-sideend')
def sideend_no():
flask.abort(403)
app = flask.Flask(__name__)
app.register_blueprint(frontend)
app.register_blueprint(backend)
app.register_blueprint(sideend)
@app.errorhandler(403)
def app_forbidden(e):
return 'application itself says no', 403
c = app.test_client()
assert c.get('/frontend-no').data == b'frontend says no'
assert c.get('/backend-no').data == b'backend says no'
assert c.get('/what-is-a-sideend').data == b'application itself says no'
def test_blueprint_specific_user_error_handling():
class MyDecoratorException(Exception):
pass
class MyFunctionException(Exception):
pass
blue = flask.Blueprint('blue', __name__)
@blue.errorhandler(MyDecoratorException)
def my_decorator_exception_handler(e):
assert isinstance(e, MyDecoratorException)
return 'boom'
def my_function_exception_handler(e):
assert isinstance(e, MyFunctionException)
return 'bam'
blue.register_error_handler(MyFunctionException, my_function_exception_handler)
@blue.route('/decorator')
def blue_deco_test():
raise MyDecoratorException()
@blue.route('/function')
def blue_func_test():
raise MyFunctionException()
app = flask.Flask(__name__)
app.register_blueprint(blue)
c = app.test_client()
assert c.get('/decorator').data == b'boom'
assert c.get('/function').data == b'bam'
def test_blueprint_url_definitions():
bp = flask.Blueprint('test', __name__)
@bp.route('/foo', defaults={'baz': 42})
def foo(bar, baz):
return '%s/%d' % (bar, baz)
@bp.route('/bar')
def bar(bar):
return text_type(bar)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/1', url_defaults={'bar': 23})
app.register_blueprint(bp, url_prefix='/2', url_defaults={'bar': 19})
c = app.test_client()
assert c.get('/1/foo').data == b'23/42'
assert c.get('/2/foo').data == b'19/42'
assert c.get('/1/bar').data == b'23'
assert c.get('/2/bar').data == b'19'
def test_blueprint_url_processors():
bp = flask.Blueprint('frontend', __name__, url_prefix='/<lang_code>')
@bp.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', flask.g.lang_code)
@bp.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop('lang_code')
@bp.route('/')
def index():
return flask.url_for('.about')
@bp.route('/about')
def about():
return flask.url_for('.index')
app = flask.Flask(__name__)
app.register_blueprint(bp)
c = app.test_client()
assert c.get('/de/').data == b'/de/about'
assert c.get('/de/about').data == b'/de/'
def test_templates_and_static(test_apps):
from blueprintapp import app
c = app.test_client()
rv = c.get('/')
assert rv.data == b'Hello from the Frontend'
rv = c.get('/admin/')
assert rv.data == b'Hello from the Admin'
rv = c.get('/admin/index2')
assert rv.data == b'Hello from the Admin'
rv = c.get('/admin/static/test.txt')
assert rv.data.strip() == b'Admin File'
rv.close()
rv = c.get('/admin/static/css/test.css')
assert rv.data.strip() == b'/* nested file */'
rv.close()
# try/finally, in case other tests use this app for Blueprint tests.
max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT']
try:
expected_max_age = 3600
if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == expected_max_age:
expected_max_age = 7200
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = expected_max_age
rv = c.get('/admin/static/css/test.css')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == expected_max_age
rv.close()
finally:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default
with app.test_request_context():
assert flask.url_for('admin.static', filename='test.txt') == '/admin/static/test.txt'
with app.test_request_context():
try:
flask.render_template('missing.html')
except TemplateNotFound as e:
assert e.name == 'missing.html'
else:
assert 0, 'expected exception'
with flask.Flask(__name__).test_request_context():
assert flask.render_template('nested/nested.txt') == 'I\'m nested'
def test_default_static_cache_timeout():
app = flask.Flask(__name__)
class MyBlueprint(flask.Blueprint):
def get_send_file_max_age(self, filename):
return 100
blueprint = MyBlueprint('blueprint', __name__, static_folder='static')
app.register_blueprint(blueprint)
# try/finally, in case other tests use this app for Blueprint tests.
max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT']
try:
with app.test_request_context():
unexpected_max_age = 3600
if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == unexpected_max_age:
unexpected_max_age = 7200
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = unexpected_max_age
rv = blueprint.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 100
rv.close()
finally:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default
def test_templates_list(test_apps):
from blueprintapp import app
templates = sorted(app.jinja_env.list_templates())
assert templates == ['admin/index.html', 'frontend/index.html']
def test_dotted_names():
frontend = flask.Blueprint('myapp.frontend', __name__)
backend = flask.Blueprint('myapp.backend', __name__)
@frontend.route('/fe')
def frontend_index():
return flask.url_for('myapp.backend.backend_index')
@frontend.route('/fe2')
def frontend_page2():
return flask.url_for('.frontend_index')
@backend.route('/be')
def backend_index():
return flask.url_for('myapp.frontend.frontend_index')
app = flask.Flask(__name__)
app.register_blueprint(frontend)
app.register_blueprint(backend)
c = app.test_client()
assert c.get('/fe').data.strip() == b'/be'
assert c.get('/fe2').data.strip() == b'/fe'
assert c.get('/be').data.strip() == b'/fe'
def test_dotted_names_from_app():
app = flask.Flask(__name__)
app.testing = True
test = flask.Blueprint('test', __name__)
@app.route('/')
def app_index():
return flask.url_for('test.index')
@test.route('/test/')
def index():
return flask.url_for('app_index')
app.register_blueprint(test)
with app.test_client() as c:
rv = c.get('/')
assert rv.data == b'/test/'
def test_empty_url_defaults():
bp = flask.Blueprint('bp', __name__)
@bp.route('/', defaults={'page': 1})
@bp.route('/page/<int:page>')
def something(page):
return str(page)
app = flask.Flask(__name__)
app.register_blueprint(bp)
c = app.test_client()
assert c.get('/').data == b'1'
assert c.get('/page/2').data == b'2'
def test_route_decorator_custom_endpoint():
bp = flask.Blueprint('bp', __name__)
@bp.route('/foo')
def foo():
return flask.request.endpoint
@bp.route('/bar', endpoint='bar')
def foo_bar():
return flask.request.endpoint
@bp.route('/bar/123', endpoint='123')
def foo_bar_foo():
return flask.request.endpoint
@bp.route('/bar/foo')
def bar_foo():
return flask.request.endpoint
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.request.endpoint
c = app.test_client()
assert c.get('/').data == b'index'
assert c.get('/py/foo').data == b'bp.foo'
assert c.get('/py/bar').data == b'bp.bar'
assert c.get('/py/bar/123').data == b'bp.123'
assert c.get('/py/bar/foo').data == b'bp.bar_foo'
def test_route_decorator_custom_endpoint_with_dots():
bp = flask.Blueprint('bp', __name__)
@bp.route('/foo')
def foo():
return flask.request.endpoint
try:
@bp.route('/bar', endpoint='bar.bar')
def foo_bar():
return flask.request.endpoint
except AssertionError:
pass
else:
raise AssertionError('expected AssertionError not raised')
try:
@bp.route('/bar/123', endpoint='bar.123')
def foo_bar_foo():
return flask.request.endpoint
except AssertionError:
pass
else:
raise AssertionError('expected AssertionError not raised')
def foo_foo_foo():
pass
pytest.raises(
AssertionError,
lambda: bp.add_url_rule(
'/bar/123', endpoint='bar.123', view_func=foo_foo_foo
)
)
pytest.raises(
AssertionError,
bp.route('/bar/123', endpoint='bar.123'),
lambda: None
)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
c = app.test_client()
assert c.get('/py/foo').data == b'bp.foo'
# The rule's didn't actually made it through
rv = c.get('/py/bar')
assert rv.status_code == 404
rv = c.get('/py/bar/123')
assert rv.status_code == 404
def test_template_filter():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'my_reverse' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['my_reverse'] == my_reverse
assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba'
def test_add_template_filter():
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'my_reverse' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['my_reverse'] == my_reverse
assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba'
def test_template_filter_with_name():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter('strrev')
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'strrev' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['strrev'] == my_reverse
assert app.jinja_env.filters['strrev']('abcd') == 'dcba'
def test_add_template_filter_with_name():
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse, 'strrev')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'strrev' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['strrev'] == my_reverse
assert app.jinja_env.filters['strrev']('abcd') == 'dcba'
def test_template_filter_with_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def super_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_filter_after_route_with_template():
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def super_reverse(s):
return s[::-1]
app.register_blueprint(bp, url_prefix='/py')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_add_template_filter_with_template():
bp = flask.Blueprint('bp', __name__)
def super_reverse(s):
return s[::-1]
bp.add_app_template_filter(super_reverse)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_filter_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter('super_reverse')
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_add_template_filter_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse, 'super_reverse')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_test():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'is_boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['is_boolean'] == is_boolean
assert app.jinja_env.tests['is_boolean'](False)
def test_add_template_test():
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'is_boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['is_boolean'] == is_boolean
assert app.jinja_env.tests['is_boolean'](False)
def test_template_test_with_name():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == is_boolean
assert app.jinja_env.tests['boolean'](False)
def test_add_template_test_with_name():
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean, 'boolean')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == is_boolean
assert app.jinja_env.tests['boolean'](False)
def test_template_test_with_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_template_test_after_route_with_template():
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def boolean(value):
return isinstance(value, bool)
app.register_blueprint(bp, url_prefix='/py')
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_test_with_template():
bp = flask.Blueprint('bp', __name__)
def boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(boolean)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_template_test_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_test_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean, 'boolean')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
| bsd-3-clause |
richardcs/ansible | lib/ansible/modules/cloud/linode/linode_v4.py | 12 | 8497 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community',
}
DOCUMENTATION = """
---
module: linode_v4
short_description: Manage instances on the Linode cloud.
description: Manage instances on the Linode cloud.
version_added: "2.8"
requirements:
- python >= 2.7
- linode_api4 >= 2.0.0
author:
- Luke Murphy (@lwm)
notes:
- No Linode resizing is currently implemented. This module will, in time,
replace the current Linode module which uses deprecated API bindings on the
Linode side.
options:
region:
description:
- The region of the instance. This is a required parameter only when
creating Linode instances. See
U(https://developers.linode.com/api/v4#tag/Regions).
required: false
type: str
image:
description:
- The image of the instance. This is a required parameter only when
creating Linode instances. See
U(https://developers.linode.com/api/v4#tag/Images).
type: str
required: false
type:
description:
- The type of the instance. This is a required parameter only when
creating Linode instances. See
U(https://developers.linode.com/api/v4#tag/Linode-Types).
type: str
required: false
label:
description:
- The instance label. This label is used as the main determiner for
idempotence for the module and is therefore mandatory.
type: str
required: true
group:
description:
- The group that the instance should be marked under. Please note, that
group labelling is deprecated but still supported. The encouraged
method for marking instances is to use tags.
type: str
required: false
tags:
description:
- The tags that the instance should be marked under. See
U(https://developers.linode.com/api/v4#tag/Tags).
required: false
type: list
root_pass:
description:
- The password for the root user. If not specified, one will be
generated. This generated password will be available in the task
success JSON.
required: false
type: str
authorized_keys:
description:
- A list of SSH public key parts to deploy for the root user.
required: false
type: list
state:
description:
- The desired instance state.
type: str
choices:
- present
- absent
required: true
access_token:
description:
- The Linode API v4 access token. It may also be specified by exposing
the C(LINODE_ACCESS_TOKEN) environment variable. See
U(https://developers.linode.com/api/v4#section/Access-and-Authentication).
required: true
"""
EXAMPLES = """
- name: Create a new Linode.
linode_v4:
label: new-linode
type: g6-nanode-1
region: eu-west
image: linode/debian9
root_pass: passw0rd
authorized_keys:
- "ssh-rsa ..."
state: present
- name: Delete that new Linode.
linode_v4:
label: new-linode
state: absent
"""
RETURN = """
instance:
description: The instance description in JSON serialized form.
returned: Always.
type: dict
sample: {
"root_pass": "foobar", # if auto-generated
"alerts": {
"cpu": 90,
"io": 10000,
"network_in": 10,
"network_out": 10,
"transfer_quota": 80
},
"backups": {
"enabled": false,
"schedule": {
"day": null,
"window": null
}
},
"created": "2018-09-26T08:12:33",
"group": "Foobar Group",
"hypervisor": "kvm",
"id": 10480444,
"image": "linode/centos7",
"ipv4": [
"130.132.285.233"
],
"ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64",
"label": "lin-foo",
"region": "eu-west",
"specs": {
"disk": 25600,
"memory": 1024,
"transfer": 1000,
"vcpus": 1
},
"status": "running",
"tags": [],
"type": "g6-nanode-1",
"updated": "2018-09-26T10:10:14",
"watchdog_enabled": true
}
"""
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.linode import get_user_agent
try:
from linode_api4 import Instance, LinodeClient
HAS_LINODE_DEPENDENCY = True
except ImportError:
HAS_LINODE_DEPENDENCY = False
def create_linode(module, client, **kwargs):
"""Creates a Linode instance and handles return format."""
if kwargs['root_pass'] is None:
kwargs.pop('root_pass')
try:
response = client.linode.instance_create(**kwargs)
except Exception as exception:
module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
try:
if isinstance(response, tuple):
instance, root_pass = response
instance_json = instance._raw_json
instance_json.update({'root_pass': root_pass})
return instance_json
else:
return response._raw_json
except TypeError:
module.fail_json(msg='Unable to parse Linode instance creation'
' response. Please raise a bug against this'
' module on https://github.com/ansible/ansible/issues'
)
def maybe_instance_from_label(module, client):
"""Try to retrieve an instance based on a label."""
try:
label = module.params['label']
result = client.linode.instances(Instance.label == label)
return result[0]
except IndexError:
return None
except Exception as exception:
module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
def initialise_module():
"""Initialise the module parameter specification."""
return AnsibleModule(
argument_spec=dict(
label=dict(type='str', required=True),
state=dict(
type='str',
required=True,
choices=['present', 'absent']
),
access_token=dict(
type='str',
required=True,
no_log=True,
fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']),
),
authorized_keys=dict(type='list', required=False),
group=dict(type='str', required=False),
image=dict(type='str', required=False),
region=dict(type='str', required=False),
root_pass=dict(type='str', required=False, no_log=True),
tags=dict(type='list', required=False),
type=dict(type='str', required=False),
),
supports_check_mode=False,
required_one_of=(
['state', 'label'],
),
required_together=(
['region', 'image', 'type'],
)
)
def build_client(module):
"""Build a LinodeClient."""
return LinodeClient(
module.params['access_token'],
user_agent=get_user_agent('linode_v4_module')
)
def main():
"""Module entrypoint."""
module = initialise_module()
if not HAS_LINODE_DEPENDENCY:
module.fail_json(msg='The linode_v4 module requires the linode_api4 package')
client = build_client(module)
instance = maybe_instance_from_label(module, client)
if module.params['state'] == 'present' and instance is not None:
module.exit_json(changed=False, instance=instance._raw_json)
elif module.params['state'] == 'present' and instance is None:
instance_json = create_linode(
module, client,
authorized_keys=module.params['authorized_keys'],
group=module.params['group'],
image=module.params['image'],
label=module.params['label'],
region=module.params['region'],
root_pass=module.params['root_pass'],
tags=module.params['tags'],
ltype=module.params['type'],
)
module.exit_json(changed=True, instance=instance_json)
elif module.params['state'] == 'absent' and instance is not None:
instance.delete()
module.exit_json(changed=True, instance=instance._raw_json)
elif module.params['state'] == 'absent' and instance is None:
module.exit_json(changed=False, instance={})
if __name__ == "__main__":
main()
| gpl-3.0 |
jjas0nn/solvem | tensorflow/lib/python2.7/site-packages/tensorflow/contrib/quantization/python/array_ops.py | 178 | 1156 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized Array Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops import gen_array_ops as quantized_gen_array_ops
from tensorflow.python.ops.gen_array_ops import dequantize
from tensorflow.python.ops.gen_array_ops import quantize_v2
from tensorflow.python.ops.gen_array_ops import quantized_concat
# pylint: enable=unused-import
| mit |
edensparkles/FIRSTAID | FIRST_AID/venv/Lib/site-packages/click/_compat.py | 66 | 20706 | import re
import io
import os
import sys
import codecs
from weakref import WeakKeyDictionary
PY2 = sys.version_info[0] == 2
WIN = sys.platform.startswith('win')
DEFAULT_COLUMNS = 80
_ansi_re = re.compile('\033\[((?:\d|;)*)([a-zA-Z])')
def get_filesystem_encoding():
return sys.getfilesystemencoding() or sys.getdefaultencoding()
def _make_text_stream(stream, encoding, errors):
if encoding is None:
encoding = get_best_encoding(stream)
if errors is None:
errors = 'replace'
return _NonClosingTextIOWrapper(stream, encoding, errors,
line_buffering=True)
def is_ascii_encoding(encoding):
"""Checks if a given encoding is ascii."""
try:
return codecs.lookup(encoding).name == 'ascii'
except LookupError:
return False
def get_best_encoding(stream):
"""Returns the default stream encoding if not found."""
rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding()
if is_ascii_encoding(rv):
return 'utf-8'
return rv
class _NonClosingTextIOWrapper(io.TextIOWrapper):
def __init__(self, stream, encoding, errors, **extra):
self._stream = stream = _FixupStream(stream)
io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra)
# The io module is a place where the Python 3 text behavior
# was forced upon Python 2, so we need to unbreak
# it to look like Python 2.
if PY2:
def write(self, x):
if isinstance(x, str) or is_bytes(x):
try:
self.flush()
except Exception:
pass
return self.buffer.write(str(x))
return io.TextIOWrapper.write(self, x)
def writelines(self, lines):
for line in lines:
self.write(line)
def __del__(self):
try:
self.detach()
except Exception:
pass
def isatty(self):
# https://bitbucket.org/pypy/pypy/issue/1803
return self._stream.isatty()
class _FixupStream(object):
"""The new io interface needs more from streams than streams
traditionally implement. As such, this fix-up code is necessary in
some circumstances.
"""
def __init__(self, stream):
self._stream = stream
def __getattr__(self, name):
return getattr(self._stream, name)
def read1(self, size):
f = getattr(self._stream, 'read1', None)
if f is not None:
return f(size)
# We only dispatch to readline instead of read in Python 2 as we
# do not want cause problems with the different implementation
# of line buffering.
if PY2:
return self._stream.readline(size)
return self._stream.read(size)
def readable(self):
x = getattr(self._stream, 'readable', None)
if x is not None:
return x()
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self):
x = getattr(self._stream, 'writable', None)
if x is not None:
return x()
try:
self._stream.write('')
except Exception:
try:
self._stream.write(b'')
except Exception:
return False
return True
def seekable(self):
x = getattr(self._stream, 'seekable', None)
if x is not None:
return x()
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
if PY2:
text_type = unicode
bytes = str
raw_input = raw_input
string_types = (str, unicode)
iteritems = lambda x: x.iteritems()
range_type = xrange
def is_bytes(x):
return isinstance(x, (buffer, bytearray))
_identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
# For Windows, we need to force stdout/stdin/stderr to binary if it's
# fetched for that. This obviously is not the most correct way to do
# it as it changes global state. Unfortunately, there does not seem to
# be a clear better way to do it as just reopening the file in binary
# mode does not change anything.
#
# An option would be to do what Python 3 does and to open the file as
# binary only, patch it back to the system, and then use a wrapper
# stream that converts newlines. It's not quite clear what's the
# correct option here.
#
# This code also lives in _winconsole for the fallback to the console
# emulation stream.
if WIN:
import msvcrt
def set_binary_mode(f):
try:
fileno = f.fileno()
except Exception:
pass
else:
msvcrt.setmode(fileno, os.O_BINARY)
return f
else:
set_binary_mode = lambda x: x
def isidentifier(x):
return _identifier_re.search(x) is not None
def get_binary_stdin():
return set_binary_mode(sys.stdin)
def get_binary_stdout():
return set_binary_mode(sys.stdout)
def get_binary_stderr():
return set_binary_mode(sys.stderr)
def get_text_stdin(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stdin, encoding, errors)
def get_text_stdout(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stdout, encoding, errors)
def get_text_stderr(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stderr, encoding, errors)
def filename_to_ui(value):
if isinstance(value, bytes):
value = value.decode(get_filesystem_encoding(), 'replace')
return value
else:
import io
text_type = str
raw_input = input
string_types = (str,)
range_type = range
isidentifier = lambda x: x.isidentifier()
iteritems = lambda x: iter(x.items())
def is_bytes(x):
return isinstance(x, (bytes, memoryview, bytearray))
def _is_binary_reader(stream, default=False):
try:
return isinstance(stream.read(0), bytes)
except Exception:
return default
# This happens in some cases where the stream was already
# closed. In this case, we assume the default.
def _is_binary_writer(stream, default=False):
try:
stream.write(b'')
except Exception:
try:
stream.write('')
return False
except Exception:
pass
return default
return True
def _find_binary_reader(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detaching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_reader(stream, False):
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_reader(buf, True):
return buf
def _find_binary_writer(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detatching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_writer(stream, False):
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_writer(buf, True):
return buf
def _stream_is_misconfigured(stream):
"""A stream is misconfigured if its encoding is ASCII."""
# If the stream does not have an encoding set, we assume it's set
# to ASCII. This appears to happen in certain unittest
# environments. It's not quite clear what the correct behavior is
# but this at least will force Click to recover somehow.
return is_ascii_encoding(getattr(stream, 'encoding', None) or 'ascii')
def _is_compatible_text_stream(stream, encoding, errors):
stream_encoding = getattr(stream, 'encoding', None)
stream_errors = getattr(stream, 'errors', None)
# Perfect match.
if stream_encoding == encoding and stream_errors == errors:
return True
# Otherwise, it's only a compatible stream if we did not ask for
# an encoding.
if encoding is None:
return stream_encoding is not None
return False
def _force_correct_text_reader(text_reader, encoding, errors):
if _is_binary_reader(text_reader, False):
binary_reader = text_reader
else:
# If there is no target encoding set, we need to verify that the
# reader is not actually misconfigured.
if encoding is None and not _stream_is_misconfigured(text_reader):
return text_reader
if _is_compatible_text_stream(text_reader, encoding, errors):
return text_reader
# If the reader has no encoding, we try to find the underlying
# binary reader for it. If that fails because the environment is
# misconfigured, we silently go with the same reader because this
# is too common to happen. In that case, mojibake is better than
# exceptions.
binary_reader = _find_binary_reader(text_reader)
if binary_reader is None:
return text_reader
# At this point, we default the errors to replace instead of strict
# because nobody handles those errors anyways and at this point
# we're so fundamentally fucked that nothing can repair it.
if errors is None:
errors = 'replace'
return _make_text_stream(binary_reader, encoding, errors)
def _force_correct_text_writer(text_writer, encoding, errors):
if _is_binary_writer(text_writer, False):
binary_writer = text_writer
else:
# If there is no target encoding set, we need to verify that the
# writer is not actually misconfigured.
if encoding is None and not _stream_is_misconfigured(text_writer):
return text_writer
if _is_compatible_text_stream(text_writer, encoding, errors):
return text_writer
# If the writer has no encoding, we try to find the underlying
# binary writer for it. If that fails because the environment is
# misconfigured, we silently go with the same writer because this
# is too common to happen. In that case, mojibake is better than
# exceptions.
binary_writer = _find_binary_writer(text_writer)
if binary_writer is None:
return text_writer
# At this point, we default the errors to replace instead of strict
# because nobody handles those errors anyways and at this point
# we're so fundamentally fucked that nothing can repair it.
if errors is None:
errors = 'replace'
return _make_text_stream(binary_writer, encoding, errors)
def get_binary_stdin():
reader = _find_binary_reader(sys.stdin)
if reader is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stdin.')
return reader
def get_binary_stdout():
writer = _find_binary_writer(sys.stdout)
if writer is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stdout.')
return writer
def get_binary_stderr():
writer = _find_binary_writer(sys.stderr)
if writer is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stderr.')
return writer
def get_text_stdin(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_reader(sys.stdin, encoding, errors)
def get_text_stdout(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stdout, encoding, errors)
def get_text_stderr(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stderr, encoding, errors)
def filename_to_ui(value):
if isinstance(value, bytes):
value = value.decode(get_filesystem_encoding(), 'replace')
else:
value = value.encode('utf-8', 'surrogateescape') \
.decode('utf-8', 'replace')
return value
def get_streerror(e, default=None):
if hasattr(e, 'strerror'):
msg = e.strerror
else:
if default is not None:
msg = default
else:
msg = str(e)
if isinstance(msg, bytes):
msg = msg.decode('utf-8', 'replace')
return msg
def open_stream(filename, mode='r', encoding=None, errors='strict',
atomic=False):
# Standard streams first. These are simple because they don't need
# special handling for the atomic flag. It's entirely ignored.
if filename == '-':
if 'w' in mode:
if 'b' in mode:
return get_binary_stdout(), False
return get_text_stdout(encoding=encoding, errors=errors), False
if 'b' in mode:
return get_binary_stdin(), False
return get_text_stdin(encoding=encoding, errors=errors), False
# Non-atomic writes directly go out through the regular open functions.
if not atomic:
if encoding is None:
return open(filename, mode), True
return io.open(filename, mode, encoding=encoding, errors=errors), True
# Some usability stuff for atomic writes
if 'a' in mode:
raise ValueError(
'Appending to an existing file is not supported, because that '
'would involve an expensive `copy`-operation to a temporary '
'file. Open the file in normal `w`-mode and copy explicitly '
'if that\'s what you\'re after.'
)
if 'x' in mode:
raise ValueError('Use the `overwrite`-parameter instead.')
if 'w' not in mode:
raise ValueError('Atomic writes only make sense with `w`-mode.')
# Atomic writes are more complicated. They work by opening a file
# as a proxy in the same folder and then using the fdopen
# functionality to wrap it in a Python file. Then we wrap it in an
# atomic file that moves the file over on close.
import tempfile
fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename),
prefix='.__atomic-write')
if encoding is not None:
f = io.open(fd, mode, encoding=encoding, errors=errors)
else:
f = os.fdopen(fd, mode)
return _AtomicFile(f, tmp_filename, filename), True
# Used in a destructor call, needs extra protection from interpreter cleanup.
if hasattr(os, 'replace'):
_replace = os.replace
_can_replace = True
else:
_replace = os.rename
_can_replace = not WIN
class _AtomicFile(object):
def __init__(self, f, tmp_filename, real_filename):
self._f = f
self._tmp_filename = tmp_filename
self._real_filename = real_filename
self.closed = False
@property
def name(self):
return self._real_filename
def close(self, delete=False):
if self.closed:
return
self._f.close()
if not _can_replace:
try:
os.remove(self._real_filename)
except OSError:
pass
_replace(self._tmp_filename, self._real_filename)
self.closed = True
def __getattr__(self, name):
return getattr(self._f, name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close(delete=exc_type is not None)
def __repr__(self):
return repr(self._f)
auto_wrap_for_ansi = None
colorama = None
get_winterm_size = None
def strip_ansi(value):
return _ansi_re.sub('', value)
def should_strip_ansi(stream=None, color=None):
if color is None:
if stream is None:
stream = sys.stdin
return not isatty(stream)
return not color
# If we're on Windows, we provide transparent integration through
# colorama. This will make ANSI colors through the echo function
# work automatically.
if WIN:
# Windows has a smaller terminal
DEFAULT_COLUMNS = 79
from ._winconsole import _get_windows_console_stream
def _get_argv_encoding():
import locale
return locale.getpreferredencoding()
if PY2:
def raw_input(prompt=''):
sys.stderr.flush()
if prompt:
stdout = _default_text_stdout()
stdout.write(prompt)
stdin = _default_text_stdin()
return stdin.readline().rstrip('\r\n')
try:
import colorama
except ImportError:
pass
else:
_ansi_stream_wrappers = WeakKeyDictionary()
def auto_wrap_for_ansi(stream, color=None):
"""This function wraps a stream so that calls through colorama
are issued to the win32 console API to recolor on demand. It
also ensures to reset the colors if a write call is interrupted
to not destroy the console afterwards.
"""
try:
cached = _ansi_stream_wrappers.get(stream)
except Exception:
cached = None
if cached is not None:
return cached
strip = should_strip_ansi(stream, color)
ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
rv = ansi_wrapper.stream
_write = rv.write
def _safe_write(s):
try:
return _write(s)
except:
ansi_wrapper.reset_all()
raise
rv.write = _safe_write
try:
_ansi_stream_wrappers[stream] = rv
except Exception:
pass
return rv
def get_winterm_size():
win = colorama.win32.GetConsoleScreenBufferInfo(
colorama.win32.STDOUT).srWindow
return win.Right - win.Left, win.Bottom - win.Top
else:
def _get_argv_encoding():
return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding()
_get_windows_console_stream = lambda *x: None
def term_len(x):
return len(strip_ansi(x))
def isatty(stream):
try:
return stream.isatty()
except Exception:
return False
def _make_cached_stream_func(src_func, wrapper_func):
cache = WeakKeyDictionary()
def func():
stream = src_func()
try:
rv = cache.get(stream)
except Exception:
rv = None
if rv is not None:
return rv
rv = wrapper_func()
try:
cache[stream] = rv
except Exception:
pass
return rv
return func
_default_text_stdin = _make_cached_stream_func(
lambda: sys.stdin, get_text_stdin)
_default_text_stdout = _make_cached_stream_func(
lambda: sys.stdout, get_text_stdout)
_default_text_stderr = _make_cached_stream_func(
lambda: sys.stderr, get_text_stderr)
binary_streams = {
'stdin': get_binary_stdin,
'stdout': get_binary_stdout,
'stderr': get_binary_stderr,
}
text_streams = {
'stdin': get_text_stdin,
'stdout': get_text_stdout,
'stderr': get_text_stderr,
}
| mit |
Dawny33/luigi | test/contrib/hadoop_jar_test.py | 6 | 3207 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import luigi
import tempfile
import shlex
from helpers import unittest
from luigi.contrib.hadoop_jar import HadoopJarJobError, HadoopJarJobTask
from mock import patch, MagicMock
class TestHadoopJarJob(HadoopJarJobTask):
path = luigi.Parameter()
def jar(self):
return self.path
class TestMissingJarJob(HadoopJarJobTask):
pass
class TestRemoteHadoopJarJob(TestHadoopJarJob):
def ssh(self):
return {"host": "myhost", "key_file": "file", "username": "user"}
class TestRemoteMissingJarJob(TestHadoopJarJob):
def ssh(self):
return {"host": "myhost", "key_file": "file"}
class TestRemoteHadoopJarTwoParamJob(TestRemoteHadoopJarJob):
param2 = luigi.Parameter()
class HadoopJarJobTaskTest(unittest.TestCase):
@patch('luigi.contrib.hadoop.run_and_track_hadoop_job')
def test_good(self, mock_job):
mock_job.return_value = None
with tempfile.NamedTemporaryFile() as temp_file:
task = TestHadoopJarJob(temp_file.name)
task.run()
@patch('luigi.contrib.hadoop.run_and_track_hadoop_job')
def test_missing_jar(self, mock_job):
mock_job.return_value = None
task = TestMissingJarJob()
self.assertRaises(HadoopJarJobError, task.run)
@patch('luigi.contrib.hadoop.run_and_track_hadoop_job')
def test_remote_job(self, mock_job):
mock_job.return_value = None
with tempfile.NamedTemporaryFile() as temp_file:
task = TestRemoteHadoopJarJob(temp_file.name)
task.run()
@patch('luigi.contrib.hadoop.run_and_track_hadoop_job')
def test_remote_job_with_space_in_task_id(self, mock_job):
with tempfile.NamedTemporaryFile() as temp_file:
def check_space(arr, task_id):
for a in arr:
if a.startswith('hadoop jar'):
found = False
for x in shlex.split(a):
if task_id in x:
found = True
if not found:
raise AssertionError
task = TestRemoteHadoopJarTwoParamJob(temp_file.name, 'test')
mock_job.side_effect = lambda x: check_space(x, task.task_id)
task.run()
@patch('luigi.contrib.hadoop.run_and_track_hadoop_job')
def test_remote_job_missing_config(self, mock_job):
mock_job.return_value = None
with tempfile.NamedTemporaryFile() as temp_file:
task = TestRemoteMissingJarJob(temp_file.name)
self.assertRaises(HadoopJarJobError, task.run)
| apache-2.0 |
Alex-Diez/python-tdd-katas | old-katas/prime-factors/day-9.py | 1 | 1042 | # -*- codeing: utf-8 -*-
class PrimeFactor(object):
def generate(self, n):
primes = []
candidate = 2
while n > 1:
while n % candidate == 0:
primes.append(candidate)
n /= candidate
candidate += 1
return primes
import unittest
class PrimeFactorTest(unittest.TestCase):
def setUp(self):
self.primeFactor = PrimeFactor()
def test_one(self):
self.assertEqual([], self.primeFactor.generate(1))
def test_two(self):
self.assertEqual([2], self.primeFactor.generate(2))
def test_three(self):
self.assertEqual([3], self.primeFactor.generate(3))
def test_four(self):
self.assertEqual([2, 2], self.primeFactor.generate(4))
def test_six(self):
self.assertEqual([2, 3], self.primeFactor.generate(6))
def test_eighth(self):
self.assertEqual([2, 2, 2], self.primeFactor.generate(8))
def test_nine(self):
self.assertEqual([3, 3], self.primeFactor.generate(9))
| mit |
piffey/ansible | test/units/plugins/lookup/test_ini.py | 119 | 2642 | # -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.plugins.lookup.ini import _parse_params
class TestINILookup(unittest.TestCase):
# Currently there isn't a new-style
old_style_params_data = (
# Simple case
dict(
term=u'keyA section=sectionA file=/path/to/file',
expected=[u'keyA', u'section=sectionA', u'file=/path/to/file'],
),
dict(
term=u'keyB section=sectionB with space file=/path/with/embedded spaces and/file',
expected=[u'keyB', u'section=sectionB with space', u'file=/path/with/embedded spaces and/file'],
),
dict(
term=u'keyC section=sectionC file=/path/with/equals/cn=com.ansible',
expected=[u'keyC', u'section=sectionC', u'file=/path/with/equals/cn=com.ansible'],
),
dict(
term=u'keyD section=sectionD file=/path/with space and/equals/cn=com.ansible',
expected=[u'keyD', u'section=sectionD', u'file=/path/with space and/equals/cn=com.ansible'],
),
dict(
term=u'keyE section=sectionE file=/path/with/unicode/くらとみ/file',
expected=[u'keyE', u'section=sectionE', u'file=/path/with/unicode/くらとみ/file'],
),
dict(
term=u'keyF section=sectionF file=/path/with/utf 8 and spaces/くらとみ/file',
expected=[u'keyF', u'section=sectionF', u'file=/path/with/utf 8 and spaces/くらとみ/file'],
),
)
def setUp(self):
pass
def tearDown(self):
pass
def test_parse_parameters(self):
for testcase in self.old_style_params_data:
# print(testcase)
params = _parse_params(testcase['term'])
self.assertEqual(params, testcase['expected'])
| gpl-3.0 |
wwitzel3/awx | awx/main/tests/functional/test_session.py | 1 | 4484 | from importlib import import_module
import pytest
import re
from django.conf import settings
from django.test.utils import override_settings
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sessions.models import Session
from django.contrib.auth import SESSION_KEY
import mock
from awx.api.versioning import reverse
class AlwaysPassBackend(object):
user = None
def authenticate(self, **credentials):
return AlwaysPassBackend.user
@classmethod
def get_backend_path(cls):
return '{}.{}'.format(cls.__module__, cls.__name__)
@pytest.mark.django_db
@pytest.mark.parametrize('accept, status', [
['*/*', 200],
['text/html', 200],
['application/json', 406]
])
def test_login_json_not_allowed(get, accept, status):
get(
'/api/login/',
HTTP_ACCEPT=accept,
expect=status
)
@pytest.mark.skip(reason="Needs Update - CA")
@pytest.mark.django_db
def test_session_create_delete(admin, post, get):
AlwaysPassBackend.user = admin
with override_settings(
AUTHENTICATION_BACKENDS=(AlwaysPassBackend.get_backend_path(),),
SESSION_COOKIE_NAME='session_id'
):
response = post(
'/api/login/',
data={'username': admin.username, 'password': admin.password, 'next': '/api/'},
expect=302, middleware=SessionMiddleware(), format='multipart'
)
assert 'session_id' in response.cookies
session_key = re.findall(r'session_id=[a-zA-z0-9]+',
str(response.cookies['session_id']))[0][len('session_id=') :]
session = Session.objects.get(session_key=session_key)
assert int(session.get_decoded()[SESSION_KEY]) == admin.pk
response = get(
'/api/logout/', middleware=SessionMiddleware(),
cookies={'session_id': session_key}, expect=302
)
assert not Session.objects.filter(session_key=session_key).exists()
@pytest.mark.django_db
@mock.patch('awx.main.consumers.emit_channel_notification')
def test_sessions_unlimited(emit, admin):
assert Session.objects.count() == 0
for i in range(5):
store = import_module(settings.SESSION_ENGINE).SessionStore()
store.create_model_instance({SESSION_KEY: admin.pk}).save()
assert Session.objects.count() == i + 1
assert emit.call_count == 0
@pytest.mark.django_db
@mock.patch('awx.main.consumers.emit_channel_notification')
def test_session_overlimit(emit, admin, alice):
# If SESSIONS_PER_USER=3, only persist the three most recently created sessions
assert Session.objects.count() == 0
with override_settings(SESSIONS_PER_USER=3):
created = []
for i in range(5):
store = import_module(settings.SESSION_ENGINE).SessionStore()
session = store.create_model_instance({SESSION_KEY: admin.pk})
session.save()
created.append(session.session_key)
assert [s.pk for s in Session.objects.all()] == created[-3:]
assert emit.call_count == 2 # 2 of 5 sessions were evicted
emit.assert_called_with(
'control-limit_reached_{}'.format(admin.pk),
{'reason': 'limit_reached', 'group_name': 'control'}
)
# Allow sessions for a different user to be saved
store = import_module(settings.SESSION_ENGINE).SessionStore()
store.create_model_instance({SESSION_KEY: alice.pk}).save()
assert Session.objects.count() == 4
@pytest.mark.skip(reason="Needs Update - CA")
@pytest.mark.django_db
def test_password_update_clears_sessions(admin, alice, post, patch):
AlwaysPassBackend.user = alice
with override_settings(
AUTHENTICATION_BACKENDS=(AlwaysPassBackend.get_backend_path(),),
SESSION_COOKIE_NAME='session_id'
):
response = post(
'/api/login/',
data={'username': alice.username, 'password': alice.password, 'next': '/api/'},
expect=302, middleware=SessionMiddleware(), format='multipart'
)
session_key = re.findall(
r'session_id=[a-zA-z0-9]+',
str(response.cookies['session_id'])
)[0][len('session_id=') :]
assert Session.objects.filter(session_key=session_key).exists()
patch(
reverse('api:user_detail', kwargs={'pk': alice.pk}), admin,
data={'password': 'new_password'}, expect=200
)
assert not Session.objects.filter(session_key=session_key).exists()
| apache-2.0 |
shakamunyi/nova | nova/api/openstack/compute/plugins/v3/deferred_delete.py | 7 | 2997 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The deferred instance delete extension."""
import webob
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
ALIAS = 'os-deferred-delete'
authorize = extensions.extension_authorizer('compute',
'v3:' + ALIAS)
class DeferredDeleteController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(DeferredDeleteController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@wsgi.response(202)
@extensions.expected_errors((404, 409, 403))
@wsgi.action('restore')
def _restore(self, req, id, body):
"""Restore a previously deleted instance."""
context = req.environ["nova.context"]
authorize(context)
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.restore(context, instance)
except exception.QuotaError as error:
raise webob.exc.HTTPForbidden(explanation=error.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'restore', id)
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('forceDelete')
def _force_delete(self, req, id, body):
"""Force delete of instance before deferred cleanup."""
context = req.environ["nova.context"]
authorize(context)
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.force_delete(context, instance)
except exception.InstanceIsLocked as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
class DeferredDelete(extensions.V3APIExtensionBase):
"""Instance deferred delete."""
name = "DeferredDelete"
alias = "os-deferred-delete"
version = 1
def get_controller_extensions(self):
controller = DeferredDeleteController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
| apache-2.0 |
rkibria/yapyg | demo/demo_squares.py | 1 | 5627 | # Copyright (c) 2015 Raihan Kibria
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import random
from yapyg import factory
from yapyg import tiles
from yapyg import entities
from yapyg import collisions
from yapyg import debug
from yapyg_widgets.screen_widget import ScreenWidget
from yapyg_movers import physical_mover
from yapyg_helpers import entities_helpers
from physics_params import *
def create(screen_width, screen_height, tile_size):
state = factory.create(screen_width, screen_height, tile_size)
BOTTOM_Y = 0.5
BORDER_THICKNESS = 2.0
BORDER_OFFSET = 0.1
WALLS_COLOR = (0.3, 0.45, 1)
tiles.add_tile_def(state, " ", ("assets/img/tiles/grid_double.png",))
tiles.set_area(state, [[" " for x in xrange(10)] for x in xrange(10)])
entities_helpers.create_screen_wall(state, "000_screenbox", BORDER_THICKNESS, BORDER_OFFSET, BOTTOM_Y, color=WALLS_COLOR)
# collisions.set_handler(state, collision_handler)
for i in xrange(4):
objtype = random.randint(0, 2)
ent_name = "%d" % i
ent_mass = 1.0
angle = random.randint(0, 20) - 10.0
if objtype == 0:
ent_name = "square_" + ent_name
if i % 2 == 0:
tx = "assets/img/sprites/half_square.png"
else:
tx = "assets/img/sprites/half_square_2.png"
entities.insert(state,
ent_name,
{
"*": {
"textures": (tx,),
},
},
(0.5 + i * 0.75, 5, angle,),
collision=(("rectangle", 0, 0, 0.5, 0.5),)
)
elif objtype == 1:
ent_name = "circle_" + ent_name
if i % 2 == 0:
tx = "assets/img/sprites/half_ball.png"
else:
tx = "assets/img/sprites/half_ball_2.png"
entities.insert(state,
ent_name,
{
"*": {
"textures": (tx,),
},
},
(0.5 + i * 0.75, 4.0, angle),
collision=(("circle", 0.25, 0.25, 0.25),))
elif objtype == 2:
ent_mass = 2.0
ent_name = "rect_" + ent_name
if i % 2 == 0:
tx = "assets/img/sprites/one_by_half_rectangle.png"
else:
tx = "assets/img/sprites/one_by_half_rectangle_2.png"
entities.insert(state,
ent_name,
{
"*": {
"textures": (tx,),
},
},
(0.5 + i * 0.75, 3.0, 90 + angle,),
collision=(("rectangle", 0, 0, 1.0, 0.5),)
)
physical_mover.add (state,
ent_name,
ent_mass,
0,
0,
0.0,
YAPYG_STD_GRAVITY,
YAPYG_STD_FRICTION,
YAPYG_STD_INELASTICITY,
0,
YAPYG_STD_ROT_FRICTION,
YAPYG_STD_ROT_DECAY,
YAPYG_STD_STICKYNESS,
)
return state
def collision_handler(state, collisions_list):
for entity_name_1, entity_name_2 in collisions_list:
debug.set_line(state, 0, "collision: %s <-> %s" % (entity_name_1, entity_name_2,))
| mit |
ArcherSys/ArcherSys | eclipse/plugins/org.python.pydev.jython_4.5.5.201603221110/Lib/xml/dom/pulldom.py | 109 | 11972 | import xml.sax
import xml.sax.handler
import types
try:
_StringTypes = [types.StringType, types.UnicodeType]
except AttributeError:
_StringTypes = [types.StringType]
START_ELEMENT = "START_ELEMENT"
END_ELEMENT = "END_ELEMENT"
COMMENT = "COMMENT"
START_DOCUMENT = "START_DOCUMENT"
END_DOCUMENT = "END_DOCUMENT"
PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
CHARACTERS = "CHARACTERS"
class PullDOM(xml.sax.ContentHandler):
_locator = None
document = None
def __init__(self, documentFactory=None):
from xml.dom import XML_NAMESPACE
self.documentFactory = documentFactory
self.firstEvent = [None, None]
self.lastEvent = self.firstEvent
self.elementStack = []
self.push = self.elementStack.append
try:
self.pop = self.elementStack.pop
except AttributeError:
# use class' pop instead
pass
self._ns_contexts = [{XML_NAMESPACE:'xml'}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self.pending_events = []
def pop(self):
result = self.elementStack[-1]
del self.elementStack[-1]
return result
def setDocumentLocator(self, locator):
self._locator = locator
def startPrefixMapping(self, prefix, uri):
if not hasattr(self, '_xmlns_attrs'):
self._xmlns_attrs = []
self._xmlns_attrs.append((prefix or 'xmlns', uri))
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or None
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts.pop()
def startElementNS(self, name, tagName , attrs):
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
# Retrieve xml namespace declaration attributes.
xmlns_uri = 'http://www.w3.org/2000/xmlns/'
xmlns_attrs = getattr(self, '_xmlns_attrs', None)
if xmlns_attrs is not None:
for aname, value in xmlns_attrs:
if aname == 'xmlns':
qname = aname
else:
qname = 'xmlns:' + aname
attr = self.document.createAttributeNS(xmlns_uri, qname)
attr.value = value
node.setAttributeNodeNS(attr)
self._xmlns_attrs = []
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElementNS(self, name, tagName):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def startElement(self, name, attrs):
if self.document:
node = self.document.createElement(name)
else:
node = self.buildDocument(None, name)
for aname,value in attrs.items():
attr = self.document.createAttribute(aname)
attr.value = value
node.setAttributeNode(attr)
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElement(self, name):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def comment(self, s):
if self.document:
node = self.document.createComment(s)
self.lastEvent[1] = [(COMMENT, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(COMMENT, s), None]
self.pending_events.append(event)
def processingInstruction(self, target, data):
if self.document:
node = self.document.createProcessingInstruction(target, data)
self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(PROCESSING_INSTRUCTION, target, data), None]
self.pending_events.append(event)
def ignorableWhitespace(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
self.lastEvent = self.lastEvent[1]
def characters(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(CHARACTERS, node), None]
self.lastEvent = self.lastEvent[1]
def startDocument(self):
if self.documentFactory is None:
import xml.dom.minidom
self.documentFactory = xml.dom.minidom.Document.implementation
def buildDocument(self, uri, tagname):
# Can't do that in startDocument, since we need the tagname
# XXX: obtain DocumentType
node = self.documentFactory.createDocument(uri, tagname, None)
self.document = node
self.lastEvent[1] = [(START_DOCUMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
# Put everything we have seen so far into the document
for e in self.pending_events:
if e[0][0] == PROCESSING_INSTRUCTION:
_,target,data = e[0]
n = self.document.createProcessingInstruction(target, data)
e[0] = (PROCESSING_INSTRUCTION, n)
elif e[0][0] == COMMENT:
n = self.document.createComment(e[0][1])
e[0] = (COMMENT, n)
else:
raise AssertionError("Unknown pending event ",e[0][0])
self.lastEvent[1] = e
self.lastEvent = e
self.pending_events = None
return node.firstChild
def endDocument(self):
self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
self.pop()
def clear(self):
"clear(): Explicitly release parsing structures"
self.document = None
class ErrorHandler:
def warning(self, exception):
print exception
def error(self, exception):
raise exception
def fatalError(self, exception):
raise exception
class DOMEventStream:
def __init__(self, stream, parser, bufsize):
self.stream = stream
self.parser = parser
self.bufsize = bufsize
if not hasattr(self.parser, 'feed'):
self.getEvent = self._slurp
self.reset()
def reset(self):
self.pulldom = PullDOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
def __getitem__(self, pos):
rc = self.getEvent()
if rc:
return rc
raise IndexError
def next(self):
rc = self.getEvent()
if rc:
return rc
raise StopIteration
def __iter__(self):
return self
def expandNode(self, node):
event = self.getEvent()
parents = [node]
while event:
token, cur_node = event
if cur_node is node:
return
if token != END_ELEMENT:
parents[-1].appendChild(cur_node)
if token == START_ELEMENT:
parents.append(cur_node)
elif token == END_ELEMENT:
del parents[-1]
event = self.getEvent()
def getEvent(self):
# use IncrementalParser interface, so we get the desired
# pull effect
if not self.pulldom.firstEvent[1]:
self.pulldom.lastEvent = self.pulldom.firstEvent
while not self.pulldom.firstEvent[1]:
buf = self.stream.read(self.bufsize)
if not buf:
self.parser.close()
return None
self.parser.feed(buf)
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def _slurp(self):
""" Fallback replacement for getEvent() using the
standard SAX2 interface, which means we slurp the
SAX events into memory (no performance gain, but
we are compatible to all SAX parsers).
"""
self.parser.parse(self.stream)
self.getEvent = self._emit
return self._emit()
def _emit(self):
""" Fallback replacement for getEvent() that emits
the events that _slurp() read previously.
"""
if self.pulldom.firstEvent[1] is None:
return None
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def clear(self):
"""clear(): Explicitly release parsing objects"""
self.pulldom.clear()
del self.pulldom
self.parser = None
self.stream = None
class SAX2DOM(PullDOM):
def startElementNS(self, name, tagName , attrs):
PullDOM.startElementNS(self, name, tagName, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def startElement(self, name, attrs):
PullDOM.startElement(self, name, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def processingInstruction(self, target, data):
PullDOM.processingInstruction(self, target, data)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def ignorableWhitespace(self, chars):
PullDOM.ignorableWhitespace(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def characters(self, chars):
PullDOM.characters(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
default_bufsize = (2 ** 14) - 20
def parse(stream_or_string, parser=None, bufsize=None):
if bufsize is None:
bufsize = default_bufsize
if type(stream_or_string) in _StringTypes:
stream = open(stream_or_string)
else:
stream = stream_or_string
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(stream, parser, bufsize)
def parseString(string, parser=None):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
bufsize = len(string)
buf = StringIO(string)
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(buf, parser, bufsize)
| mit |
mariopro/youtube-dl | youtube_dl/extractor/washingtonpost.py | 79 | 5626 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
strip_jsonp,
)
class WashingtonPostIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?washingtonpost\.com/.*?/(?P<id>[^/]+)/(?:$|[?#])'
_TESTS = [{
'url': 'http://www.washingtonpost.com/sf/national/2014/03/22/sinkhole-of-bureaucracy/',
'info_dict': {
'id': 'sinkhole-of-bureaucracy',
'title': 'Sinkhole of bureaucracy',
},
'playlist': [{
'md5': '79132cc09ec5309fa590ae46e4cc31bc',
'info_dict': {
'id': 'fc433c38-b146-11e3-b8b3-44b1d1cd4c1f',
'ext': 'mp4',
'title': 'Breaking Points: The Paper Mine',
'duration': 1287,
'description': 'Overly complicated paper pushing is nothing new to government bureaucracy. But the way federal retirement applications are filed may be the most outdated. David Fahrenthold explains.',
'uploader': 'The Washington Post',
'timestamp': 1395527908,
'upload_date': '20140322',
},
}, {
'md5': 'e1d5734c06865cc504ad99dc2de0d443',
'info_dict': {
'id': '41255e28-b14a-11e3-b8b3-44b1d1cd4c1f',
'ext': 'mp4',
'title': 'The town bureaucracy sustains',
'description': 'Underneath the friendly town of Boyers is a sea of government paperwork. In a disused limestone mine, hundreds of locals now track, file and process retirement applications for the federal government. We set out to find out what it\'s like to do paperwork 230 feet underground.',
'duration': 2217,
'timestamp': 1395528005,
'upload_date': '20140322',
'uploader': 'The Washington Post',
},
}],
}, {
'url': 'http://www.washingtonpost.com/blogs/wonkblog/wp/2014/12/31/one-airline-figured-out-how-to-make-sure-its-airplanes-never-disappear/',
'info_dict': {
'id': 'one-airline-figured-out-how-to-make-sure-its-airplanes-never-disappear',
'title': 'One airline figured out how to make sure its airplanes never disappear',
},
'playlist': [{
'md5': 'a7c1b5634ba5e57a6a82cdffa5b1e0d0',
'info_dict': {
'id': '0e4bb54c-9065-11e4-a66f-0ca5037a597d',
'ext': 'mp4',
'description': 'Washington Post transportation reporter Ashley Halsey III explains why a plane\'s black box needs to be recovered from a crash site instead of having its information streamed in real time throughout the flight.',
'upload_date': '20141230',
'uploader': 'The Washington Post',
'timestamp': 1419974765,
'title': 'Why black boxes don’t transmit data in real time',
}
}]
}]
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
title = self._og_search_title(webpage)
uuids = re.findall(r'''(?x)
(?:
<div\s+class="posttv-video-embed[^>]*?data-uuid=|
data-video-uuid=
)"([^"]+)"''', webpage)
entries = []
for i, uuid in enumerate(uuids, start=1):
vinfo_all = self._download_json(
'http://www.washingtonpost.com/posttv/c/videojson/%s?resType=jsonp' % uuid,
page_id,
transform_source=strip_jsonp,
note='Downloading information of video %d/%d' % (i, len(uuids))
)
vinfo = vinfo_all[0]['contentConfig']
uploader = vinfo.get('credits', {}).get('source')
timestamp = int_or_none(
vinfo.get('dateConfig', {}).get('dateFirstPublished'), 1000)
formats = [{
'format_id': (
'%s-%s-%s' % (s.get('type'), s.get('width'), s.get('bitrate'))
if s.get('width')
else s.get('type')),
'vbr': s.get('bitrate') if s.get('width') != 0 else None,
'width': s.get('width'),
'height': s.get('height'),
'acodec': s.get('audioCodec'),
'vcodec': s.get('videoCodec') if s.get('width') != 0 else 'none',
'filesize': s.get('fileSize'),
'url': s.get('url'),
'ext': 'mp4',
'preference': -100 if s.get('type') == 'smil' else None,
'protocol': {
'MP4': 'http',
'F4F': 'f4m',
}.get(s.get('type')),
} for s in vinfo.get('streams', [])]
source_media_url = vinfo.get('sourceMediaURL')
if source_media_url:
formats.append({
'format_id': 'source_media',
'url': source_media_url,
})
self._sort_formats(formats)
entries.append({
'id': uuid,
'title': vinfo['title'],
'description': vinfo.get('blurb'),
'uploader': uploader,
'formats': formats,
'duration': int_or_none(vinfo.get('videoDuration'), 100),
'timestamp': timestamp,
})
return {
'_type': 'playlist',
'entries': entries,
'id': page_id,
'title': title,
}
| unlicense |
vegarang/devilry-django | devilry/apps/core/models/subject.py | 1 | 2912 | from datetime import datetime
from django.utils.translation import ugettext as _
from django.db.models import Q
from django.contrib.auth.models import User
from django.db import models
from abstract_is_examiner import AbstractIsExaminer
from abstract_is_candidate import AbstractIsCandidate
from custom_db_fields import ShortNameField, LongNameField
from basenode import BaseNode
from node import Node
from model_utils import Etag, EtagMismatchException
class Subject(models.Model, BaseNode, AbstractIsExaminer, AbstractIsCandidate, Etag):
"""
.. attribute:: parentnode
A django.db.models.ForeignKey_ that points to the parent node,
which is always a `Node`_.
.. attribute:: admins
A django.db.models.ManyToManyField_ that holds all the admins of the
`Node`_.
.. attribute:: short_name
A django.db.models.SlugField_ with max 20 characters. Only numbers,
letters, '_' and '-'. Unlike all other children of
:class:`BaseNode`, Subject.short_name is **unique**. This is mainly
to avoid the overhead of having to recurse all the way to the top of
the node hierarchy for every unique path.
.. attribute:: periods
A set of :class:`periods <devilry.apps.core.models.Period>` for this subject.
.. attribute:: etag
A DateTimeField containing the etag for this object.
"""
class Meta:
app_label = 'core'
verbose_name = _('Subject')
verbose_name_plural = _('Subjects')
ordering = ['short_name']
short_name = ShortNameField(unique=True)
long_name = LongNameField()
parentnode = models.ForeignKey(Node, related_name='subjects',
verbose_name=_('Node'))
admins = models.ManyToManyField(User, blank=True)
etag = models.DateTimeField(auto_now_add=True)
@classmethod
def q_is_admin(cls, user_obj):
return Q(admins__pk=user_obj.pk) \
| Q(parentnode__pk__in=Node._get_nodepks_where_isadmin(user_obj))
def get_path(self):
""" Only returns :attr:`short_name` for subject since it is
guaranteed to be unique. """
return self.short_name
#TODO delete this?
#def clean(self, *args, **kwargs):
#super(Subject, self).clean(*args, **kwargs)
@classmethod
def q_published(cls, old=True, active=True):
now = datetime.now()
q = Q(periods__assignments__publishing_time__lt=now)
if not active:
q &= ~Q(periods__end_time__gte=now)
if not old:
q &= ~Q(periods__end_time__lt=now)
return q
@classmethod
def q_is_examiner(cls, user_obj):
return Q(periods__assignments__assignmentgroups__examiners__user=user_obj)
@classmethod
def q_is_candidate(cls, user_obj):
return Q(periods__assignments__assignmentgroups__candidates__student=user_obj)
| bsd-3-clause |
willprice/weboob | modules/googletranslate/browser.py | 7 | 1772 | # -*- coding: utf-8 -*-
# Copyright(C) 2012 Lucien Loiseau
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import urllib
from weboob.deprecated.browser import Browser
from .pages import TranslatePage
__all__ = ['GoogleTranslateBrowser']
class GoogleTranslateBrowser(Browser):
DOMAIN = 'translate.google.com'
ENCODING = 'UTF-8'
USER_AGENT = Browser.USER_AGENTS['desktop_firefox']
PAGES = {
'https?://translate\.google\.com': TranslatePage
}
def __init__(self, *args, **kwargs):
Browser.__init__(self, *args, **kwargs)
def translate(self, source, to, text):
"""
translate 'text' from 'source' language to 'to' language
"""
d = {
'sl': source.encode('utf-8'),
'tl': to.encode('utf-8'),
'js': 'n',
'prev': '_t',
'hl': 'en',
'ie': 'UTF-8',
'layout': '2',
'eotf': '1',
'text': text.encode('utf-8'),
}
self.location('https://'+self.DOMAIN, urllib.urlencode(d))
translation = self.page.get_translation()
return translation
| agpl-3.0 |
Triv90/Nova | nova/tests/api/openstack/common.py | 25 | 1734 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.openstack.common import jsonutils
def webob_factory(url):
"""Factory for removing duplicate webob code from tests."""
base_url = url
def web_request(url, method=None, body=None):
req = webob.Request.blank("%s%s" % (base_url, url))
if method:
req.content_type = "application/json"
req.method = method
if body:
req.body = jsonutils.dumps(body)
return req
return web_request
def compare_links(actual, expected):
"""Compare xml atom links."""
return compare_tree_to_dict(actual, expected, ('rel', 'href', 'type'))
def compare_media_types(actual, expected):
"""Compare xml media types."""
return compare_tree_to_dict(actual, expected, ('base', 'type'))
def compare_tree_to_dict(actual, expected, keys):
"""Compare parts of lxml.etree objects to dicts."""
for elem, data in zip(actual, expected):
for key in keys:
if elem.get(key) != data.get(key):
return False
return True
| apache-2.0 |
UBERMALLOW/kernel_htc_flounder | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
pedro2d10/SickRage-FR | lib/hachoir_metadata/program.py | 94 | 3646 | from hachoir_metadata.metadata import RootMetadata, registerExtractor
from hachoir_parser.program import ExeFile
from hachoir_metadata.safe import fault_tolerant, getValue
class ExeMetadata(RootMetadata):
KEY_TO_ATTR = {
u"ProductName": "title",
u"LegalCopyright": "copyright",
u"LegalTrademarks": "copyright",
u"LegalTrademarks1": "copyright",
u"LegalTrademarks2": "copyright",
u"CompanyName": "author",
u"BuildDate": "creation_date",
u"FileDescription": "title",
u"ProductVersion": "version",
}
SKIP_KEY = set((u"InternalName", u"OriginalFilename", u"FileVersion", u"BuildVersion"))
def extract(self, exe):
if exe.isPE():
self.extractPE(exe)
elif exe.isNE():
self.extractNE(exe)
def extractNE(self, exe):
if "ne_header" in exe:
self.useNE_Header(exe["ne_header"])
if "info" in exe:
self.useNEInfo(exe["info"])
@fault_tolerant
def useNEInfo(self, info):
for node in info.array("node"):
if node["name"].value == "StringFileInfo":
self.readVersionInfo(node["node[0]"])
def extractPE(self, exe):
# Read information from headers
if "pe_header" in exe:
self.usePE_Header(exe["pe_header"])
if "pe_opt_header" in exe:
self.usePE_OptHeader(exe["pe_opt_header"])
# Use PE resource
resource = exe.getResource()
if resource and "version_info/node[0]" in resource:
for node in resource.array("version_info/node[0]/node"):
if getValue(node, "name") == "StringFileInfo" \
and "node[0]" in node:
self.readVersionInfo(node["node[0]"])
@fault_tolerant
def useNE_Header(self, hdr):
if hdr["is_dll"].value:
self.format_version = u"New-style executable: Dynamic-link library (DLL)"
elif hdr["is_win_app"].value:
self.format_version = u"New-style executable: Windows 3.x application"
else:
self.format_version = u"New-style executable for Windows 3.x"
@fault_tolerant
def usePE_Header(self, hdr):
self.creation_date = hdr["creation_date"].value
self.comment = "CPU: %s" % hdr["cpu"].display
if hdr["is_dll"].value:
self.format_version = u"Portable Executable: Dynamic-link library (DLL)"
else:
self.format_version = u"Portable Executable: Windows application"
@fault_tolerant
def usePE_OptHeader(self, hdr):
self.comment = "Subsystem: %s" % hdr["subsystem"].display
def readVersionInfo(self, info):
values = {}
for node in info.array("node"):
if "value" not in node or "name" not in node:
continue
value = node["value"].value.strip(" \0")
if not value:
continue
key = node["name"].value
values[key] = value
if "ProductName" in values and "FileDescription" in values:
# Make sure that FileDescription is set before ProductName
# as title value
self.title = values["FileDescription"]
self.title = values["ProductName"]
del values["FileDescription"]
del values["ProductName"]
for key, value in values.iteritems():
if key in self.KEY_TO_ATTR:
setattr(self, self.KEY_TO_ATTR[key], value)
elif key not in self.SKIP_KEY:
self.comment = "%s=%s" % (key, value)
registerExtractor(ExeFile, ExeMetadata)
| gpl-3.0 |
kthordarson/youtube-dl-ruv | youtube_dl/extractor/musicvault.py | 28 | 3071 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
unified_strdate,
)
class MusicVaultIE(InfoExtractor):
_VALID_URL = r'https?://www\.musicvault\.com/(?P<uploader_id>[^/?#]*)/video/(?P<display_id>[^/?#]*)_(?P<id>[0-9]+)\.html'
_TEST = {
'url': 'http://www.musicvault.com/the-allman-brothers-band/video/straight-from-the-heart_1010863.html',
'md5': '2cdbb3ae75f7fb3519821507d2fb3c15',
'info_dict': {
'id': '1010863',
'ext': 'mp4',
'uploader_id': 'the-allman-brothers-band',
'title': 'Straight from the Heart',
'duration': 244,
'uploader': 'The Allman Brothers Band',
'thumbnail': 're:^https?://.*/thumbnail/.*',
'upload_date': '19811216',
'location': 'Capitol Theatre (Passaic, NJ)',
'description': 'Listen to The Allman Brothers Band perform Straight from the Heart at Capitol Theatre (Passaic, NJ) on Dec 16, 1981',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
thumbnail = self._search_regex(
r'<meta itemprop="thumbnail" content="([^"]+)"',
webpage, 'thumbnail', fatal=False)
data_div = self._search_regex(
r'(?s)<div class="data">(.*?)</div>', webpage, 'data fields')
uploader = self._html_search_regex(
r'<h1.*?>(.*?)</h1>', data_div, 'uploader', fatal=False)
title = self._html_search_regex(
r'<h2.*?>(.*?)</h2>', data_div, 'title')
upload_date = unified_strdate(self._html_search_regex(
r'<h3.*?>(.*?)</h3>', data_div, 'uploader', fatal=False))
location = self._html_search_regex(
r'<h4.*?>(.*?)</h4>', data_div, 'location', fatal=False)
duration = parse_duration(self._html_search_meta('duration', webpage))
VIDEO_URL_TEMPLATE = 'http://cdnapi.kaltura.com/p/%(uid)s/sp/%(wid)s/playManifest/entryId/%(entry_id)s/format/url/protocol/http'
kaltura_id = self._search_regex(
r'<div id="video-detail-player" data-kaltura-id="([^"]+)"',
webpage, 'kaltura ID')
video_url = VIDEO_URL_TEMPLATE % {
'entry_id': kaltura_id,
'wid': self._search_regex(r'/wid/_([0-9]+)/', webpage, 'wid'),
'uid': self._search_regex(r'uiconf_id/([0-9]+)/', webpage, 'uid'),
}
return {
'id': mobj.group('id'),
'url': video_url,
'ext': 'mp4',
'display_id': display_id,
'uploader_id': mobj.group('uploader_id'),
'thumbnail': thumbnail,
'description': self._html_search_meta('description', webpage),
'upload_date': upload_date,
'location': location,
'title': title,
'uploader': uploader,
'duration': duration,
}
| unlicense |
jehutting/kivy | examples/canvas/multitexture.py | 60 | 3061 | '''
Multitexture Example
====================
This example blends two textures: the image mtexture1.png of the letter K
and the image mtexture2.png of an orange circle. You should see an orange
K clipped to a circle. It uses a custom shader, written in glsl
(OpenGL Shading Language), stored in a local string.
Note the image mtexture1.png is a white 'K' on a transparent background, which
makes it hard to see.
'''
from kivy.clock import Clock
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.floatlayout import FloatLayout
from kivy.lang import Builder
from kivy.core.window import Window
from kivy.graphics import RenderContext, Color, Rectangle, BindTexture
fs_multitexture = '''
$HEADER$
// New uniform that will receive texture at index 1
uniform sampler2D texture1;
void main(void) {
// multiple current color with both texture (0 and 1).
// currently, both will use exactly the same texture coordinates.
gl_FragColor = frag_color * \
texture2D(texture0, tex_coord0) * \
texture2D(texture1, tex_coord0);
}
'''
kv = """
<MultitextureLayout>:
Image:
source: "mtexture1.png"
size_hint: .3,.3
id: 1
pos: 0,200
Image:
source: "mtexture2.png"
size_hint: .3,.3
id: 2
pos: 200,200
MultitextureWidget:
"""
Builder.load_string(kv)
class MultitextureWidget(Widget):
def __init__(self, **kwargs):
self.canvas = RenderContext()
# setting shader.fs to new source code automatically compiles it.
self.canvas.shader.fs = fs_multitexture
with self.canvas:
Color(1, 1, 1)
# here, we are binding a custom texture at index 1
# this will be used as texture1 in shader.
# The filenames are misleading: they do not correspond to the
# index here or in the shader.
BindTexture(source='mtexture2.png', index=1)
# create a rectangle with texture (will be at index 0)
Rectangle(size=(150, 150), source='mtexture1.png', pos=(500, 200))
# set the texture1 to use texture index 1
self.canvas['texture1'] = 1
# call the constructor of parent
# if they are any graphics objects, they will be added on our new
# canvas
super(MultitextureWidget, self).__init__(**kwargs)
# We'll update our glsl variables in a clock
Clock.schedule_interval(self.update_glsl, 0)
def update_glsl(self, *largs):
# This is needed for the default vertex shader.
self.canvas['projection_mat'] = Window.render_context['projection_mat']
self.canvas['modelview_mat'] = Window.render_context['modelview_mat']
class MultitextureLayout(FloatLayout):
def __init__(self, **kwargs):
self.size = kwargs['size']
super(MultitextureLayout, self).__init__(**kwargs)
class MultitextureApp(App):
def build(self):
return MultitextureLayout(size=(600, 600))
if __name__ == '__main__':
MultitextureApp().run()
| mit |
iglpdc/nipype | nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py | 12 | 1948 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from .....testing import assert_equal
from ..gtract import gtractFastMarchingTracking
def test_gtractFastMarchingTracking_inputs():
input_map = dict(args=dict(argstr='%s',
),
costStepSize=dict(argstr='--costStepSize %f',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputAnisotropyVolume=dict(argstr='--inputAnisotropyVolume %s',
),
inputCostVolume=dict(argstr='--inputCostVolume %s',
),
inputStartingSeedsLabelMapVolume=dict(argstr='--inputStartingSeedsLabelMapVolume %s',
),
inputTensorVolume=dict(argstr='--inputTensorVolume %s',
),
maximumStepSize=dict(argstr='--maximumStepSize %f',
),
minimumStepSize=dict(argstr='--minimumStepSize %f',
),
numberOfIterations=dict(argstr='--numberOfIterations %d',
),
numberOfThreads=dict(argstr='--numberOfThreads %d',
),
outputTract=dict(argstr='--outputTract %s',
hash_files=False,
),
seedThreshold=dict(argstr='--seedThreshold %f',
),
startingSeedsLabel=dict(argstr='--startingSeedsLabel %d',
),
terminal_output=dict(nohash=True,
),
trackingThreshold=dict(argstr='--trackingThreshold %f',
),
writeXMLPolyDataFile=dict(argstr='--writeXMLPolyDataFile ',
),
)
inputs = gtractFastMarchingTracking.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_gtractFastMarchingTracking_outputs():
output_map = dict(outputTract=dict(),
)
outputs = gtractFastMarchingTracking.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
Communities-Communications/cc-odoo | addons/l10n_pa/__openerp__.py | 260 | 1737 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Panama Localization Chart Account",
"version": "1.0",
"description": """
Panamenian accounting chart and tax localization.
Plan contable panameño e impuestos de acuerdo a disposiciones vigentes
Con la Colaboración de
- AHMNET CORP http://www.ahmnet.com
""",
"author": "Cubic ERP",
"website": "http://cubicERP.com",
"category": "Localization/Account Charts",
"depends": [
"account_chart",
],
"data":[
"account_tax_code.xml",
"l10n_pa_chart.xml",
"account_tax.xml",
"l10n_pa_wizard.xml",
],
"demo_xml": [
],
"active": False,
"installable": True,
"certificate" : "",
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hacpai/show-me-the-code | Python/0066/Card.py | 19 | 3189 | """This module contains code from
Think Python by Allen B. Downey
http://thinkpython.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import random
class Card(object):
"""Represents a standard playing card.
Attributes:
suit: integer 0-3
rank: integer 1-13
"""
suit_names = ["Clubs", "Diamonds", "Hearts", "Spades"]
rank_names = [None, "Ace", "2", "3", "4", "5", "6", "7",
"8", "9", "10", "Jack", "Queen", "King"]
def __init__(self, suit=0, rank=2):
self.suit = suit
self.rank = rank
def __str__(self):
"""Returns a human-readable string representation."""
return '%s of %s' % (Card.rank_names[self.rank],
Card.suit_names[self.suit])
def __cmp__(self, other):
"""Compares this card to other, first by suit, then rank.
Returns a positive number if this > other; negative if other > this;
and 0 if they are equivalent.
"""
t1 = self.suit, self.rank
t2 = other.suit, other.rank
return cmp(t1, t2)
class Deck(object):
"""Represents a deck of cards.
Attributes:
cards: list of Card objects.
"""
def __init__(self):
self.cards = []
for suit in range(4):
for rank in range(1, 14):
card = Card(suit, rank)
self.cards.append(card)
def __str__(self):
res = []
for card in self.cards:
res.append(str(card))
return '\n'.join(res)
def add_card(self, card):
"""Adds a card to the deck."""
self.cards.append(card)
def remove_card(self, card):
"""Removes a card from the deck."""
self.cards.remove(card)
def pop_card(self, i=-1):
"""Removes and returns a card from the deck.
i: index of the card to pop; by default, pops the last card.
"""
return self.cards.pop(i)
def shuffle(self):
"""Shuffles the cards in this deck."""
random.shuffle(self.cards)
def sort(self):
"""Sorts the cards in ascending order."""
self.cards.sort()
def move_cards(self, hand, num):
"""Moves the given number of cards from the deck into the Hand.
hand: destination Hand object
num: integer number of cards to move
"""
for i in range(num):
hand.add_card(self.pop_card())
class Hand(Deck):
"""Represents a hand of playing cards."""
def __init__(self, label=''):
self.cards = []
self.label = label
def find_defining_class(obj, method_name):
"""Finds and returns the class object that will provide
the definition of method_name (as a string) if it is
invoked on obj.
obj: any python object
method_name: string method name
"""
for ty in type(obj).mro():
if method_name in ty.__dict__:
return ty
return None
if __name__ == '__main__':
deck = Deck()
deck.shuffle()
hand = Hand()
print find_defining_class(hand, 'shuffle')
deck.move_cards(hand, 5)
hand.sort()
print hand
| gpl-2.0 |
at1as/Media-Database | tests/test_scraper_imdb.py | 2 | 2927 | #-*- encoding:utf8 -*-
from __future__ import unicode_literals
import codecs
import lxml
from src.scraper import Scraper
import unittest
import sys
reload(sys)
sys.setdefaultencoding('utf8')
class TestScraper(unittest.TestCase):
def setUp(self):
self.scraper = Scraper("IMDB")
with codecs.open("./tests/fixtures/imdb_fight_club_movie_page_2018_09_10_minified.html") as f:
self.movie_page_str = f.read().replace('\n', '')
self.movie_page_xml = lxml.html.document_fromstring(self.movie_page_str)
def tearDown(self):
pass
def test_construct_search_url(self):
search_url = self.scraper.construct_search_url("Fight Club (1999)")
self.assertEquals("http://www.imdb.com/find?q=fight+club+(1999)&s=all", search_url)
def test_construct_search_url_unicode(self):
search_url = self.scraper.construct_search_url(u"Amélie (2001)")
self.assertEquals("http://www.imdb.com/find?q=am\xe9lie+(2001)&s=all", search_url)
def test_get_title(self):
self.assertEquals("Fight Club", self.scraper.get_title(self.movie_page_xml))
def test_get_title(self):
self.assertEquals([], self.scraper.get_alternative_title(self.movie_page_xml))
def test_get_description(self):
self.assertEquals(
'An insomniac office worker and a devil-may-care soapmaker form an underground fight club that evolves into something much, much more.',
self.scraper.get_description(self.movie_page_xml)
)
def test_get_director(self):
self.assertEquals(['David Fincher'], self.scraper.get_director(self.movie_page_xml))
def test_rating(self):
self.assertEquals("8.8", self.scraper.get_rating(self.movie_page_xml))
def test_get_genres(self):
self.assertEquals(['Drama'], self.scraper.get_genres(self.movie_page_xml))
def test_get_votes(self):
self.assertEquals("1,595,752", self.scraper.get_votes(self.movie_page_xml))
def test_get_running_time(self):
self.assertEquals("2h 19min", self.scraper.get_running_time(self.movie_page_xml))
def test_get_content_rating(self):
self.assertEquals('R', self.scraper.get_content_rating(self.movie_page_xml))
def test_get_stars(self):
self.assertEquals(
['Brad Pitt', 'Edward Norton', 'Meat Loaf'],
self.scraper.get_stars(self.movie_page_xml)
)
def test_get_languages(self):
self.assertEquals(['English'], self.scraper.get_languages(self.movie_page_xml))
def test_get_image_url(self):
self.assertEquals(
'https://m.media-amazon.com/images/M/MV5BMjJmYTNkNmItYjYyZC00MGUxLWJhNWMtZDY4Nzc1MDAwMzU5XkEyXkFqcGdeQXVyNzkwMjQ5NzM@._V1_UX182_CR0,0,182,268_AL_.jpg',
self.scraper.get_image_url(self.movie_page_xml)
)
def test_get_movie_year(self):
self.assertEquals('1999', self.scraper.get_movie_year(self.movie_page_xml))
def test_get_awards(self):
self.assertEquals('Nominated for 1 Oscar.', self.scraper.get_awards(self.movie_page_xml))
| mit |
maryklayne/Funcao | sympy/polys/tests/test_densebasic.py | 21 | 21527 | """Tests for dense recursive polynomials' basic tools. """
from sympy.polys.densebasic import (
dup_LC, dmp_LC,
dup_TC, dmp_TC,
dmp_ground_LC, dmp_ground_TC,
dmp_true_LT,
dup_degree, dmp_degree,
dmp_degree_in, dmp_degree_list,
dup_strip, dmp_strip,
dmp_validate,
dup_reverse,
dup_copy, dmp_copy,
dup_normal, dmp_normal,
dup_convert, dmp_convert,
dup_from_sympy, dmp_from_sympy,
dup_nth, dmp_nth, dmp_ground_nth,
dmp_zero_p, dmp_zero,
dmp_one_p, dmp_one,
dmp_ground_p, dmp_ground,
dmp_negative_p, dmp_positive_p,
dmp_zeros, dmp_grounds,
dup_from_dict, dup_from_raw_dict,
dup_to_dict, dup_to_raw_dict,
dmp_from_dict, dmp_to_dict,
dmp_swap, dmp_permute,
dmp_nest, dmp_raise,
dup_deflate, dmp_deflate,
dup_multi_deflate, dmp_multi_deflate,
dup_inflate, dmp_inflate,
dmp_exclude, dmp_include,
dmp_inject, dmp_eject,
dup_terms_gcd, dmp_terms_gcd,
dmp_list_terms, dmp_apply_pairs,
dup_slice, dmp_slice, dmp_slice_in,
dup_random,
)
from sympy.polys.specialpolys import f_polys
from sympy.polys.polyclasses import DMP
from sympy.polys.domains import ZZ, QQ
from sympy.polys.rings import ring
from sympy.core.singleton import S
from sympy.utilities.pytest import raises
from sympy import oo
f_0, f_1, f_2, f_3, f_4, f_5, f_6 = [ f.to_dense() for f in f_polys() ]
def test_dup_LC():
assert dup_LC([], ZZ) == 0
assert dup_LC([2, 3, 4, 5], ZZ) == 2
def test_dup_TC():
assert dup_TC([], ZZ) == 0
assert dup_TC([2, 3, 4, 5], ZZ) == 5
def test_dmp_LC():
assert dmp_LC([[]], ZZ) == []
assert dmp_LC([[2, 3, 4], [5]], ZZ) == [2, 3, 4]
assert dmp_LC([[[]]], ZZ) == [[]]
assert dmp_LC([[[2], [3, 4]], [[5]]], ZZ) == [[2], [3, 4]]
def test_dmp_TC():
assert dmp_TC([[]], ZZ) == []
assert dmp_TC([[2, 3, 4], [5]], ZZ) == [5]
assert dmp_TC([[[]]], ZZ) == [[]]
assert dmp_TC([[[2], [3, 4]], [[5]]], ZZ) == [[5]]
def test_dmp_ground_LC():
assert dmp_ground_LC([[]], 1, ZZ) == 0
assert dmp_ground_LC([[2, 3, 4], [5]], 1, ZZ) == 2
assert dmp_ground_LC([[[]]], 2, ZZ) == 0
assert dmp_ground_LC([[[2], [3, 4]], [[5]]], 2, ZZ) == 2
def test_dmp_ground_TC():
assert dmp_ground_TC([[]], 1, ZZ) == 0
assert dmp_ground_TC([[2, 3, 4], [5]], 1, ZZ) == 5
assert dmp_ground_TC([[[]]], 2, ZZ) == 0
assert dmp_ground_TC([[[2], [3, 4]], [[5]]], 2, ZZ) == 5
def test_dmp_true_LT():
assert dmp_true_LT([[]], 1, ZZ) == ((0, 0), 0)
assert dmp_true_LT([[7]], 1, ZZ) == ((0, 0), 7)
assert dmp_true_LT([[1, 0]], 1, ZZ) == ((0, 1), 1)
assert dmp_true_LT([[1], []], 1, ZZ) == ((1, 0), 1)
assert dmp_true_LT([[1, 0], []], 1, ZZ) == ((1, 1), 1)
def test_dup_degree():
assert dup_degree([]) == -oo
assert dup_degree([1]) == 0
assert dup_degree([1, 0]) == 1
assert dup_degree([1, 0, 0, 0, 1]) == 4
def test_dmp_degree():
assert dmp_degree([[]], 1) == -oo
assert dmp_degree([[[]]], 2) == -oo
assert dmp_degree([[1]], 1) == 0
assert dmp_degree([[2], [1]], 1) == 1
def test_dmp_degree_in():
assert dmp_degree_in([[[]]], 0, 2) == -oo
assert dmp_degree_in([[[]]], 1, 2) == -oo
assert dmp_degree_in([[[]]], 2, 2) == -oo
assert dmp_degree_in([[[1]]], 0, 2) == 0
assert dmp_degree_in([[[1]]], 1, 2) == 0
assert dmp_degree_in([[[1]]], 2, 2) == 0
assert dmp_degree_in(f_4, 0, 2) == 9
assert dmp_degree_in(f_4, 1, 2) == 12
assert dmp_degree_in(f_4, 2, 2) == 8
assert dmp_degree_in(f_6, 0, 2) == 4
assert dmp_degree_in(f_6, 1, 2) == 4
assert dmp_degree_in(f_6, 2, 2) == 6
assert dmp_degree_in(f_6, 3, 3) == 3
raises(IndexError, lambda: dmp_degree_in([[1]], -5, 1))
def test_dmp_degree_list():
assert dmp_degree_list([[[[ ]]]], 3) == (-oo, -oo, -oo, -oo)
assert dmp_degree_list([[[[1]]]], 3) == ( 0, 0, 0, 0)
assert dmp_degree_list(f_0, 2) == (2, 2, 2)
assert dmp_degree_list(f_1, 2) == (3, 3, 3)
assert dmp_degree_list(f_2, 2) == (5, 3, 3)
assert dmp_degree_list(f_3, 2) == (5, 4, 7)
assert dmp_degree_list(f_4, 2) == (9, 12, 8)
assert dmp_degree_list(f_5, 2) == (3, 3, 3)
assert dmp_degree_list(f_6, 3) == (4, 4, 6, 3)
def test_dup_strip():
assert dup_strip([]) == []
assert dup_strip([0]) == []
assert dup_strip([0, 0, 0]) == []
assert dup_strip([1]) == [1]
assert dup_strip([0, 1]) == [1]
assert dup_strip([0, 0, 0, 1]) == [1]
assert dup_strip([1, 2, 0]) == [1, 2, 0]
assert dup_strip([0, 1, 2, 0]) == [1, 2, 0]
assert dup_strip([0, 0, 0, 1, 2, 0]) == [1, 2, 0]
def test_dmp_strip():
assert dmp_strip([0, 1, 0], 0) == [1, 0]
assert dmp_strip([[]], 1) == [[]]
assert dmp_strip([[], []], 1) == [[]]
assert dmp_strip([[], [], []], 1) == [[]]
assert dmp_strip([[[]]], 2) == [[[]]]
assert dmp_strip([[[]], [[]]], 2) == [[[]]]
assert dmp_strip([[[]], [[]], [[]]], 2) == [[[]]]
assert dmp_strip([[[1]]], 2) == [[[1]]]
assert dmp_strip([[[]], [[1]]], 2) == [[[1]]]
assert dmp_strip([[[]], [[1]], [[]]], 2) == [[[1]], [[]]]
def test_dmp_validate():
assert dmp_validate([]) == ([], 0)
assert dmp_validate([0, 0, 0, 1, 0]) == ([1, 0], 0)
assert dmp_validate([[[]]]) == ([[[]]], 2)
assert dmp_validate([[0], [], [0], [1], [0]]) == ([[1], []], 1)
raises(ValueError, lambda: dmp_validate([[0], 0, [0], [1], [0]]))
def test_dup_reverse():
assert dup_reverse([1, 2, 0, 3]) == [3, 0, 2, 1]
assert dup_reverse([1, 2, 3, 0]) == [3, 2, 1]
def test_dup_copy():
f = [ZZ(1), ZZ(0), ZZ(2)]
g = dup_copy(f)
g[0], g[2] = ZZ(7), ZZ(0)
assert f != g
def test_dmp_copy():
f = [[ZZ(1)], [ZZ(2), ZZ(0)]]
g = dmp_copy(f, 1)
g[0][0], g[1][1] = ZZ(7), ZZ(1)
assert f != g
def test_dup_normal():
assert dup_normal([0, 0, 2, 1, 0, 11, 0], ZZ) == \
[ZZ(2), ZZ(1), ZZ(0), ZZ(11), ZZ(0)]
def test_dmp_normal():
assert dmp_normal([[0], [], [0, 2, 1], [0], [11], []], 1, ZZ) == \
[[ZZ(2), ZZ(1)], [], [ZZ(11)], []]
def test_dup_convert():
K0, K1 = ZZ['x'], ZZ
f = [K0(1), K0(2), K0(0), K0(3)]
assert dup_convert(f, K0, K1) == \
[ZZ(1), ZZ(2), ZZ(0), ZZ(3)]
def test_dmp_convert():
K0, K1 = ZZ['x'], ZZ
f = [[K0(1)], [K0(2)], [], [K0(3)]]
assert dmp_convert(f, 1, K0, K1) == \
[[ZZ(1)], [ZZ(2)], [], [ZZ(3)]]
def test_dup_from_sympy():
assert dup_from_sympy([S(1), S(2)], ZZ) == \
[ZZ(1), ZZ(2)]
assert dup_from_sympy([S(1)/2, S(3)], QQ) == \
[QQ(1, 2), QQ(3, 1)]
def test_dmp_from_sympy():
assert dmp_from_sympy([[S(1), S(2)], [S(0)]], 1, ZZ) == \
[[ZZ(1), ZZ(2)], []]
assert dmp_from_sympy([[S(1)/2, S(2)]], 1, QQ) == \
[[QQ(1, 2), QQ(2, 1)]]
def test_dup_nth():
assert dup_nth([1, 2, 3], 0, ZZ) == 3
assert dup_nth([1, 2, 3], 1, ZZ) == 2
assert dup_nth([1, 2, 3], 2, ZZ) == 1
assert dup_nth([1, 2, 3], 9, ZZ) == 0
raises(IndexError, lambda: dup_nth([3, 4, 5], -1, ZZ))
def test_dmp_nth():
assert dmp_nth([[1], [2], [3]], 0, 1, ZZ) == [3]
assert dmp_nth([[1], [2], [3]], 1, 1, ZZ) == [2]
assert dmp_nth([[1], [2], [3]], 2, 1, ZZ) == [1]
assert dmp_nth([[1], [2], [3]], 9, 1, ZZ) == []
raises(IndexError, lambda: dmp_nth([[3], [4], [5]], -1, 1, ZZ))
def test_dmp_ground_nth():
assert dmp_ground_nth([[]], (0, 0), 1, ZZ) == 0
assert dmp_ground_nth([[1], [2], [3]], (0, 0), 1, ZZ) == 3
assert dmp_ground_nth([[1], [2], [3]], (1, 0), 1, ZZ) == 2
assert dmp_ground_nth([[1], [2], [3]], (2, 0), 1, ZZ) == 1
assert dmp_ground_nth([[1], [2], [3]], (2, 1), 1, ZZ) == 0
assert dmp_ground_nth([[1], [2], [3]], (3, 0), 1, ZZ) == 0
raises(IndexError, lambda: dmp_ground_nth([[3], [4], [5]], (2, -1), 1, ZZ))
def test_dmp_zero_p():
assert dmp_zero_p([], 0) is True
assert dmp_zero_p([[]], 1) is True
assert dmp_zero_p([[[]]], 2) is True
assert dmp_zero_p([[[1]]], 2) is False
def test_dmp_zero():
assert dmp_zero(0) == []
assert dmp_zero(2) == [[[]]]
def test_dmp_one_p():
assert dmp_one_p([1], 0, ZZ) is True
assert dmp_one_p([[1]], 1, ZZ) is True
assert dmp_one_p([[[1]]], 2, ZZ) is True
assert dmp_one_p([[[12]]], 2, ZZ) is False
def test_dmp_one():
assert dmp_one(0, ZZ) == [ZZ(1)]
assert dmp_one(2, ZZ) == [[[ZZ(1)]]]
def test_dmp_ground_p():
assert dmp_ground_p([], 0, 0) is True
assert dmp_ground_p([[]], 0, 1) is True
assert dmp_ground_p([[]], 1, 1) is False
assert dmp_ground_p([[ZZ(1)]], 1, 1) is True
assert dmp_ground_p([[[ZZ(2)]]], 2, 2) is True
assert dmp_ground_p([[[ZZ(2)]]], 3, 2) is False
assert dmp_ground_p([[[ZZ(3)], []]], 3, 2) is False
assert dmp_ground_p([], None, 0) is True
assert dmp_ground_p([[]], None, 1) is True
assert dmp_ground_p([ZZ(1)], None, 0) is True
assert dmp_ground_p([[[ZZ(1)]]], None, 2) is True
assert dmp_ground_p([[[ZZ(3)], []]], None, 2) is False
def test_dmp_ground():
assert dmp_ground(ZZ(0), 2) == [[[]]]
assert dmp_ground(ZZ(7), -1) == ZZ(7)
assert dmp_ground(ZZ(7), 0) == [ZZ(7)]
assert dmp_ground(ZZ(7), 2) == [[[ZZ(7)]]]
def test_dmp_zeros():
assert dmp_zeros(4, 0, ZZ) == [[], [], [], []]
assert dmp_zeros(0, 2, ZZ) == []
assert dmp_zeros(1, 2, ZZ) == [[[[]]]]
assert dmp_zeros(2, 2, ZZ) == [[[[]]], [[[]]]]
assert dmp_zeros(3, 2, ZZ) == [[[[]]], [[[]]], [[[]]]]
assert dmp_zeros(3, -1, ZZ) == [0, 0, 0]
def test_dmp_grounds():
assert dmp_grounds(ZZ(7), 0, 2) == []
assert dmp_grounds(ZZ(7), 1, 2) == [[[[7]]]]
assert dmp_grounds(ZZ(7), 2, 2) == [[[[7]]], [[[7]]]]
assert dmp_grounds(ZZ(7), 3, 2) == [[[[7]]], [[[7]]], [[[7]]]]
assert dmp_grounds(ZZ(7), 3, -1) == [7, 7, 7]
def test_dmp_negative_p():
assert dmp_negative_p([[[]]], 2, ZZ) is False
assert dmp_negative_p([[[1], [2]]], 2, ZZ) is False
assert dmp_negative_p([[[-1], [2]]], 2, ZZ) is True
def test_dmp_positive_p():
assert dmp_positive_p([[[]]], 2, ZZ) is False
assert dmp_positive_p([[[1], [2]]], 2, ZZ) is True
assert dmp_positive_p([[[-1], [2]]], 2, ZZ) is False
def test_dup_from_to_dict():
assert dup_from_raw_dict({}, ZZ) == []
assert dup_from_dict({}, ZZ) == []
assert dup_to_raw_dict([]) == {}
assert dup_to_dict([]) == {}
assert dup_to_raw_dict([], ZZ, zero=True) == {0: ZZ(0)}
assert dup_to_dict([], ZZ, zero=True) == {(0,): ZZ(0)}
f = [3, 0, 0, 2, 0, 0, 0, 0, 8]
g = {8: 3, 5: 2, 0: 8}
h = {(8,): 3, (5,): 2, (0,): 8}
assert dup_from_raw_dict(g, ZZ) == f
assert dup_from_dict(h, ZZ) == f
assert dup_to_raw_dict(f) == g
assert dup_to_dict(f) == h
R, x,y = ring("x,y", ZZ)
K = R.to_domain()
f = [R(3), R(0), R(2), R(0), R(0), R(8)]
g = {5: R(3), 3: R(2), 0: R(8)}
h = {(5,): R(3), (3,): R(2), (0,): R(8)}
assert dup_from_raw_dict(g, K) == f
assert dup_from_dict(h, K) == f
assert dup_to_raw_dict(f) == g
assert dup_to_dict(f) == h
def test_dmp_from_to_dict():
assert dmp_from_dict({}, 1, ZZ) == [[]]
assert dmp_to_dict([[]], 1) == {}
assert dmp_to_dict([], 0, ZZ, zero=True) == {(0,): ZZ(0)}
assert dmp_to_dict([[]], 1, ZZ, zero=True) == {(0, 0): ZZ(0)}
f = [[3], [], [], [2], [], [], [], [], [8]]
g = {(8, 0): 3, (5, 0): 2, (0, 0): 8}
assert dmp_from_dict(g, 1, ZZ) == f
assert dmp_to_dict(f, 1) == g
def test_dmp_swap():
f = dmp_normal([[1, 0, 0], [], [1, 0], [], [1]], 1, ZZ)
g = dmp_normal([[1, 0, 0, 0, 0], [1, 0, 0], [1]], 1, ZZ)
assert dmp_swap(f, 1, 1, 1, ZZ) == f
assert dmp_swap(f, 0, 1, 1, ZZ) == g
assert dmp_swap(g, 0, 1, 1, ZZ) == f
raises(IndexError, lambda: dmp_swap(f, -1, -7, 1, ZZ))
def test_dmp_permute():
f = dmp_normal([[1, 0, 0], [], [1, 0], [], [1]], 1, ZZ)
g = dmp_normal([[1, 0, 0, 0, 0], [1, 0, 0], [1]], 1, ZZ)
assert dmp_permute(f, [0, 1], 1, ZZ) == f
assert dmp_permute(g, [0, 1], 1, ZZ) == g
assert dmp_permute(f, [1, 0], 1, ZZ) == g
assert dmp_permute(g, [1, 0], 1, ZZ) == f
def test_dmp_nest():
assert dmp_nest(ZZ(1), 2, ZZ) == [[[1]]]
assert dmp_nest([[1]], 0, ZZ) == [[1]]
assert dmp_nest([[1]], 1, ZZ) == [[[1]]]
assert dmp_nest([[1]], 2, ZZ) == [[[[1]]]]
def test_dmp_raise():
assert dmp_raise([], 2, 0, ZZ) == [[[]]]
assert dmp_raise([[1]], 0, 1, ZZ) == [[1]]
assert dmp_raise([[1, 2, 3], [], [2, 3]], 2, 1, ZZ) == \
[[[[1]], [[2]], [[3]]], [[[]]], [[[2]], [[3]]]]
def test_dup_deflate():
assert dup_deflate([], ZZ) == (1, [])
assert dup_deflate([2], ZZ) == (1, [2])
assert dup_deflate([1, 2, 3], ZZ) == (1, [1, 2, 3])
assert dup_deflate([1, 0, 2, 0, 3], ZZ) == (2, [1, 2, 3])
assert dup_deflate(dup_from_raw_dict({7: 1, 1: 1}, ZZ), ZZ) == \
(1, [1, 0, 0, 0, 0, 0, 1, 0])
assert dup_deflate(dup_from_raw_dict({7: 1, 0: 1}, ZZ), ZZ) == \
(7, [1, 1])
assert dup_deflate(dup_from_raw_dict({7: 1, 3: 1}, ZZ), ZZ) == \
(1, [1, 0, 0, 0, 1, 0, 0, 0])
assert dup_deflate(dup_from_raw_dict({7: 1, 4: 1}, ZZ), ZZ) == \
(1, [1, 0, 0, 1, 0, 0, 0, 0])
assert dup_deflate(dup_from_raw_dict({8: 1, 4: 1}, ZZ), ZZ) == \
(4, [1, 1, 0])
assert dup_deflate(dup_from_raw_dict({8: 1}, ZZ), ZZ) == \
(8, [1, 0])
assert dup_deflate(dup_from_raw_dict({7: 1}, ZZ), ZZ) == \
(7, [1, 0])
assert dup_deflate(dup_from_raw_dict({1: 1}, ZZ), ZZ) == \
(1, [1, 0])
def test_dmp_deflate():
assert dmp_deflate([[]], 1, ZZ) == ((1, 1), [[]])
assert dmp_deflate([[2]], 1, ZZ) == ((1, 1), [[2]])
f = [[1, 0, 0], [], [1, 0], [], [1]]
assert dmp_deflate(f, 1, ZZ) == ((2, 1), [[1, 0, 0], [1, 0], [1]])
def test_dup_multi_deflate():
assert dup_multi_deflate(([2],), ZZ) == (1, ([2],))
assert dup_multi_deflate(([], []), ZZ) == (1, ([], []))
assert dup_multi_deflate(([1, 2, 3],), ZZ) == (1, ([1, 2, 3],))
assert dup_multi_deflate(([1, 0, 2, 0, 3],), ZZ) == (2, ([1, 2, 3],))
assert dup_multi_deflate(([1, 0, 2, 0, 3], [2, 0, 0]), ZZ) == \
(2, ([1, 2, 3], [2, 0]))
assert dup_multi_deflate(([1, 0, 2, 0, 3], [2, 1, 0]), ZZ) == \
(1, ([1, 0, 2, 0, 3], [2, 1, 0]))
def test_dmp_multi_deflate():
assert dmp_multi_deflate(([[]],), 1, ZZ) == \
((1, 1), ([[]],))
assert dmp_multi_deflate(([[]], [[]]), 1, ZZ) == \
((1, 1), ([[]], [[]]))
assert dmp_multi_deflate(([[1]], [[]]), 1, ZZ) == \
((1, 1), ([[1]], [[]]))
assert dmp_multi_deflate(([[1]], [[2]]), 1, ZZ) == \
((1, 1), ([[1]], [[2]]))
assert dmp_multi_deflate(([[1]], [[2, 0]]), 1, ZZ) == \
((1, 1), ([[1]], [[2, 0]]))
assert dmp_multi_deflate(([[2, 0]], [[2, 0]]), 1, ZZ) == \
((1, 1), ([[2, 0]], [[2, 0]]))
assert dmp_multi_deflate(
([[2]], [[2, 0, 0]]), 1, ZZ) == ((1, 2), ([[2]], [[2, 0]]))
assert dmp_multi_deflate(
([[2, 0, 0]], [[2, 0, 0]]), 1, ZZ) == ((1, 2), ([[2, 0]], [[2, 0]]))
assert dmp_multi_deflate(([2, 0, 0], [1, 0, 4, 0, 1]), 0, ZZ) == \
((2,), ([2, 0], [1, 4, 1]))
f = [[1, 0, 0], [], [1, 0], [], [1]]
g = [[1, 0, 1, 0], [], [1]]
assert dmp_multi_deflate((f,), 1, ZZ) == \
((2, 1), ([[1, 0, 0], [1, 0], [1]],))
assert dmp_multi_deflate((f, g), 1, ZZ) == \
((2, 1), ([[1, 0, 0], [1, 0], [1]],
[[1, 0, 1, 0], [1]]))
def test_dup_inflate():
assert dup_inflate([], 17, ZZ) == []
assert dup_inflate([1, 2, 3], 1, ZZ) == [1, 2, 3]
assert dup_inflate([1, 2, 3], 2, ZZ) == [1, 0, 2, 0, 3]
assert dup_inflate([1, 2, 3], 3, ZZ) == [1, 0, 0, 2, 0, 0, 3]
assert dup_inflate([1, 2, 3], 4, ZZ) == [1, 0, 0, 0, 2, 0, 0, 0, 3]
raises(IndexError, lambda: dup_inflate([1, 2, 3], 0, ZZ))
def test_dmp_inflate():
assert dmp_inflate([1], (3,), 0, ZZ) == [1]
assert dmp_inflate([[]], (3, 7), 1, ZZ) == [[]]
assert dmp_inflate([[2]], (1, 2), 1, ZZ) == [[2]]
assert dmp_inflate([[2, 0]], (1, 1), 1, ZZ) == [[2, 0]]
assert dmp_inflate([[2, 0]], (1, 2), 1, ZZ) == [[2, 0, 0]]
assert dmp_inflate([[2, 0]], (1, 3), 1, ZZ) == [[2, 0, 0, 0]]
assert dmp_inflate([[1, 0, 0], [1], [1, 0]], (2, 1), 1, ZZ) == \
[[1, 0, 0], [], [1], [], [1, 0]]
raises(IndexError, lambda: dmp_inflate([[]], (-3, 7), 1, ZZ))
def test_dmp_exclude():
assert dmp_exclude([[[]]], 2, ZZ) == ([], [[[]]], 2)
assert dmp_exclude([[[7]]], 2, ZZ) == ([], [[[7]]], 2)
assert dmp_exclude([1, 2, 3], 0, ZZ) == ([], [1, 2, 3], 0)
assert dmp_exclude([[1], [2, 3]], 1, ZZ) == ([], [[1], [2, 3]], 1)
assert dmp_exclude([[1, 2, 3]], 1, ZZ) == ([0], [1, 2, 3], 0)
assert dmp_exclude([[1], [2], [3]], 1, ZZ) == ([1], [1, 2, 3], 0)
assert dmp_exclude([[[1, 2, 3]]], 2, ZZ) == ([0, 1], [1, 2, 3], 0)
assert dmp_exclude([[[1]], [[2]], [[3]]], 2, ZZ) == ([1, 2], [1, 2, 3], 0)
def test_dmp_include():
assert dmp_include([1, 2, 3], [], 0, ZZ) == [1, 2, 3]
assert dmp_include([1, 2, 3], [0], 0, ZZ) == [[1, 2, 3]]
assert dmp_include([1, 2, 3], [1], 0, ZZ) == [[1], [2], [3]]
assert dmp_include([1, 2, 3], [0, 1], 0, ZZ) == [[[1, 2, 3]]]
assert dmp_include([1, 2, 3], [1, 2], 0, ZZ) == [[[1]], [[2]], [[3]]]
def test_dmp_inject():
R, x,y = ring("x,y", ZZ)
K = R.to_domain()
assert dmp_inject([], 0, K) == ([[[]]], 2)
assert dmp_inject([[]], 1, K) == ([[[[]]]], 3)
assert dmp_inject([R(1)], 0, K) == ([[[1]]], 2)
assert dmp_inject([[R(1)]], 1, K) == ([[[[1]]]], 3)
assert dmp_inject([R(1), 2*x + 3*y + 4], 0, K) == ([[[1]], [[2], [3, 4]]], 2)
f = [3*x**2 + 7*x*y + 5*y**2, 2*x, R(0), x*y**2 + 11]
g = [[[3], [7, 0], [5, 0, 0]], [[2], []], [[]], [[1, 0, 0], [11]]]
assert dmp_inject(f, 0, K) == (g, 2)
def test_dmp_eject():
R, x,y = ring("x,y", ZZ)
K = R.to_domain()
assert dmp_eject([[[]]], 2, K) == []
assert dmp_eject([[[[]]]], 3, K) == [[]]
assert dmp_eject([[[1]]], 2, K) == [R(1)]
assert dmp_eject([[[[1]]]], 3, K) == [[R(1)]]
assert dmp_eject([[[1]], [[2], [3, 4]]], 2, K) == [R(1), 2*x + 3*y + 4]
f = [3*x**2 + 7*x*y + 5*y**2, 2*x, R(0), x*y**2 + 11]
g = [[[3], [7, 0], [5, 0, 0]], [[2], []], [[]], [[1, 0, 0], [11]]]
assert dmp_eject(g, 2, K) == f
def test_dup_terms_gcd():
assert dup_terms_gcd([], ZZ) == (0, [])
assert dup_terms_gcd([1, 0, 1], ZZ) == (0, [1, 0, 1])
assert dup_terms_gcd([1, 0, 1, 0], ZZ) == (1, [1, 0, 1])
def test_dmp_terms_gcd():
assert dmp_terms_gcd([[]], 1, ZZ) == ((0, 0), [[]])
assert dmp_terms_gcd([1, 0, 1, 0], 0, ZZ) == ((1,), [1, 0, 1])
assert dmp_terms_gcd([[1], [], [1], []], 1, ZZ) == ((1, 0), [[1], [], [1]])
assert dmp_terms_gcd(
[[1, 0], [], [1]], 1, ZZ) == ((0, 0), [[1, 0], [], [1]])
assert dmp_terms_gcd(
[[1, 0], [1, 0, 0], [], []], 1, ZZ) == ((2, 1), [[1], [1, 0]])
def test_dmp_list_terms():
assert dmp_list_terms([[[]]], 2, ZZ) == [((0, 0, 0), 0)]
assert dmp_list_terms([[[1]]], 2, ZZ) == [((0, 0, 0), 1)]
assert dmp_list_terms([1, 2, 4, 3, 5], 0, ZZ) == \
[((4,), 1), ((3,), 2), ((2,), 4), ((1,), 3), ((0,), 5)]
assert dmp_list_terms([[1], [2, 4], [3, 5, 0]], 1, ZZ) == \
[((2, 0), 1), ((1, 1), 2), ((1, 0), 4), ((0, 2), 3), ((0, 1), 5)]
f = [[2, 0, 0, 0], [1, 0, 0], []]
assert dmp_list_terms(f, 1, ZZ, order='lex') == [((2, 3), 2), ((1, 2), 1)]
assert dmp_list_terms(
f, 1, ZZ, order='grlex') == [((2, 3), 2), ((1, 2), 1)]
f = [[2, 0, 0, 0], [1, 0, 0, 0, 0, 0], []]
assert dmp_list_terms(f, 1, ZZ, order='lex') == [((2, 3), 2), ((1, 5), 1)]
assert dmp_list_terms(
f, 1, ZZ, order='grlex') == [((1, 5), 1), ((2, 3), 2)]
def test_dmp_apply_pairs():
h = lambda a, b: a*b
assert dmp_apply_pairs([1, 2, 3], [4, 5, 6], h, [], 0, ZZ) == [4, 10, 18]
assert dmp_apply_pairs([2, 3], [4, 5, 6], h, [], 0, ZZ) == [10, 18]
assert dmp_apply_pairs([1, 2, 3], [5, 6], h, [], 0, ZZ) == [10, 18]
assert dmp_apply_pairs(
[[1, 2], [3]], [[4, 5], [6]], h, [], 1, ZZ) == [[4, 10], [18]]
assert dmp_apply_pairs(
[[1, 2], [3]], [[4], [5, 6]], h, [], 1, ZZ) == [[8], [18]]
assert dmp_apply_pairs(
[[1], [2, 3]], [[4, 5], [6]], h, [], 1, ZZ) == [[5], [18]]
def test_dup_slice():
f = [1, 2, 3, 4]
assert dup_slice(f, 0, 0, ZZ) == []
assert dup_slice(f, 0, 1, ZZ) == [4]
assert dup_slice(f, 0, 2, ZZ) == [3, 4]
assert dup_slice(f, 0, 3, ZZ) == [2, 3, 4]
assert dup_slice(f, 0, 4, ZZ) == [1, 2, 3, 4]
assert dup_slice(f, 0, 4, ZZ) == f
assert dup_slice(f, 0, 9, ZZ) == f
assert dup_slice(f, 1, 0, ZZ) == []
assert dup_slice(f, 1, 1, ZZ) == []
assert dup_slice(f, 1, 2, ZZ) == [3, 0]
assert dup_slice(f, 1, 3, ZZ) == [2, 3, 0]
assert dup_slice(f, 1, 4, ZZ) == [1, 2, 3, 0]
assert dup_slice([1, 2], 0, 3, ZZ) == [1, 2]
def test_dup_random():
f = dup_random(0, -10, 10, ZZ)
assert dup_degree(f) == 0
assert all(-10 <= c <= 10 for c in f)
f = dup_random(1, -20, 20, ZZ)
assert dup_degree(f) == 1
assert all(-20 <= c <= 20 for c in f)
f = dup_random(2, -30, 30, ZZ)
assert dup_degree(f) == 2
assert all(-30 <= c <= 30 for c in f)
f = dup_random(3, -40, 40, ZZ)
assert dup_degree(f) == 3
assert all(-40 <= c <= 40 for c in f)
| bsd-3-clause |
Aloomaio/googleads-python-lib | examples/ad_manager/v201808/proposal_line_item_service/update_proposal_line_items.py | 1 | 2579 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates a proposal line item's notes.
To determine which proposal line items exist,
run get_all_proposal_line_items.py.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
# Set id of the proposal line item to update.
PROPOSAL_LINE_ITEM_ID = 'INSERT_PROPOSAL_LINE_ITEM_ID_HERE'
def main(client, proposal_line_item_id):
# Initialize appropriate service.
proposal_line_item_service = client.GetService(
'ProposalLineItemService', version='v201808')
# Create statement to select a proposal line item.
statement = (ad_manager.StatementBuilder(version='v201808')
.Where('id = :id')
.WithBindVariable('id', long(proposal_line_item_id))
.Limit(1))
# Get proposal line items by statement.
response = proposal_line_item_service.getProposalLineItemsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
# Update each the proposal line item's notes field.
proposal_line_item = response['results'][0]
proposal_line_item['internalNotes'] = 'Proposal line is ready to submit.'
# Update proposal line items remotely.
proposal_line_items = proposal_line_item_service.updateProposalLineItems(
[proposal_line_item])
# Display results.
if proposal_line_items:
for proposal_line_item in proposal_line_items:
print ('Line item with id "%s", belonging to proposal id "%s" and,'
' named "%s" was updated.' % (
proposal_line_item['id'], proposal_line_item['proposalId'],
proposal_line_item['name']))
else:
print 'No proposal line items were updated.'
else:
print 'No proposal line items found to update.'
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, PROPOSAL_LINE_ITEM_ID)
| apache-2.0 |
azverkan/scons | src/engine/SCons/Tool/dvips.py | 2 | 3355 | """SCons.Tool.dvips
Tool-specific initialization for dvips.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Action
import SCons.Builder
import SCons.Tool.dvipdf
import SCons.Util
def DviPsFunction(target = None, source= None, env=None):
result = SCons.Tool.dvipdf.DviPdfPsFunction(PSAction,target,source,env)
return result
def DviPsStrFunction(target = None, source= None, env=None):
"""A strfunction for dvipdf that returns the appropriate
command string for the no_exec options."""
if env.GetOption("no_exec"):
result = env.subst('$PSCOM',0,target,source)
else:
result = ''
return result
PSAction = None
DVIPSAction = None
PSBuilder = None
def generate(env):
"""Add Builders and construction variables for dvips to an Environment."""
global PSAction
if PSAction is None:
PSAction = SCons.Action.Action('$PSCOM', '$PSCOMSTR')
global DVIPSAction
if DVIPSAction is None:
DVIPSAction = SCons.Action.Action(DviPsFunction, strfunction = DviPsStrFunction)
global PSBuilder
if PSBuilder is None:
PSBuilder = SCons.Builder.Builder(action = PSAction,
prefix = '$PSPREFIX',
suffix = '$PSSUFFIX',
src_suffix = '.dvi',
src_builder = 'DVI',
single_source=True)
env['BUILDERS']['PostScript'] = PSBuilder
env['DVIPS'] = 'dvips'
env['DVIPSFLAGS'] = SCons.Util.CLVar('')
# I'm not quite sure I got the directories and filenames right for variant_dir
# We need to be in the correct directory for the sake of latex \includegraphics eps included files.
env['PSCOM'] = 'cd ${TARGET.dir} && $DVIPS $DVIPSFLAGS -o ${TARGET.file} ${SOURCE.file}'
env['PSPREFIX'] = ''
env['PSSUFFIX'] = '.ps'
def exists(env):
return env.Detect('dvips')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
fitermay/intellij-community | python/lib/Lib/string.py | 92 | 16675 | """A collection of string operations (most are no longer used).
Warning: most of the code you see here isn't normally used nowadays.
Beginning with Python 1.6, many of these functions are implemented as
methods on the standard string object. They used to be implemented by
a built-in module called strop, but strop is now obsolete itself.
Public module variables:
whitespace -- a string containing all characters considered whitespace
lowercase -- a string containing all characters considered lowercase letters
uppercase -- a string containing all characters considered uppercase letters
letters -- a string containing all characters considered letters
digits -- a string containing all characters considered decimal digits
hexdigits -- a string containing all characters considered hexadecimal digits
octdigits -- a string containing all characters considered octal digits
punctuation -- a string containing all characters considered punctuation
printable -- a string containing all characters considered printable
"""
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
lowercase = 'abcdefghijklmnopqrstuvwxyz'
uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
letters = lowercase + uppercase
ascii_lowercase = lowercase
ascii_uppercase = uppercase
ascii_letters = ascii_lowercase + ascii_uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + letters + punctuation + whitespace
# Case conversion helpers
# Use str to convert Unicode literal in case of -U
l = map(chr, xrange(256))
_idmap = str('').join(l)
del l
# Functions which aren't available as string methods.
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
def capwords(s, sep=None):
"""capwords(s, [sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. Note that this replaces runs of whitespace characters by
a single space.
"""
return (sep or ' ').join([x.capitalize() for x in s.split(sep)])
# Construct a translation string
_idmapL = None
def maketrans(fromstr, tostr):
"""maketrans(frm, to) -> string
Return a translation table (a string of 256 bytes long)
suitable for use in string.translate. The strings frm and to
must be of the same length.
"""
if len(fromstr) != len(tostr):
raise ValueError, "maketrans arguments must have same length"
global _idmapL
if not _idmapL:
_idmapL = map(None, _idmap)
L = _idmapL[:]
fromstr = map(ord, fromstr)
for i in range(len(fromstr)):
L[fromstr[i]] = tostr[i]
return ''.join(L)
####################################################################
import re as _re
class _multimap:
"""Helper class for combining multiple mappings.
Used by .{safe_,}substitute() to combine the mapping and keyword
arguments.
"""
def __init__(self, primary, secondary):
self._primary = primary
self._secondary = secondary
def __getitem__(self, key):
try:
return self._primary[key]
except KeyError:
return self._secondary[key]
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, _re.IGNORECASE | _re.VERBOSE)
class Template:
"""A string class for supporting $-substitutions."""
__metaclass__ = _TemplateMetaclass
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return self.delimiter + named
braced = mo.group('braced')
if braced is not None:
try:
return '%s' % (mapping[braced],)
except KeyError:
return self.delimiter + '{' + braced + '}'
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return self.delimiter
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
####################################################################
# NOTE: Everything below here is deprecated. Use string methods instead.
# This stuff will go away in Python 3.0.
# Backward compatible names for exceptions
index_error = ValueError
atoi_error = ValueError
atof_error = ValueError
atol_error = ValueError
# convert UPPER CASE letters to lower case
def lower(s):
"""lower(s) -> string
Return a copy of the string s converted to lowercase.
"""
return s.lower()
# Convert lower case letters to UPPER CASE
def upper(s):
"""upper(s) -> string
Return a copy of the string s converted to uppercase.
"""
return s.upper()
# Swap lower case letters and UPPER CASE
def swapcase(s):
"""swapcase(s) -> string
Return a copy of the string s with upper case characters
converted to lowercase and vice versa.
"""
return s.swapcase()
# Strip leading and trailing tabs and spaces
def strip(s, chars=None):
"""strip(s [,chars]) -> string
Return a copy of the string s with leading and trailing
whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping.
"""
return s.strip(chars)
# Strip leading tabs and spaces
def lstrip(s, chars=None):
"""lstrip(s [,chars]) -> string
Return a copy of the string s with leading whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return s.lstrip(chars)
# Strip trailing tabs and spaces
def rstrip(s, chars=None):
"""rstrip(s [,chars]) -> string
Return a copy of the string s with trailing whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return s.rstrip(chars)
# Split a string into a list of space/tab-separated words
def split(s, sep=None, maxsplit=-1):
"""split(s [,sep [,maxsplit]]) -> list of strings
Return a list of the words in the string s, using sep as the
delimiter string. If maxsplit is given, splits at no more than
maxsplit places (resulting in at most maxsplit+1 words). If sep
is not specified or is None, any whitespace string is a separator.
(split and splitfields are synonymous)
"""
return s.split(sep, maxsplit)
splitfields = split
# Split a string into a list of space/tab-separated words
def rsplit(s, sep=None, maxsplit=-1):
"""rsplit(s [,sep [,maxsplit]]) -> list of strings
Return a list of the words in the string s, using sep as the
delimiter string, starting at the end of the string and working
to the front. If maxsplit is given, at most maxsplit splits are
done. If sep is not specified or is None, any whitespace string
is a separator.
"""
return s.rsplit(sep, maxsplit)
# Join fields with optional separator
def join(words, sep = ' '):
"""join(list [,sep]) -> string
Return a string composed of the words in list, with
intervening occurrences of sep. The default separator is a
single space.
(joinfields and join are synonymous)
"""
return sep.join(words)
joinfields = join
# Find substring, raise exception if not found
def index(s, *args):
"""index(s, sub [,start [,end]]) -> int
Like find but raises ValueError when the substring is not found.
"""
return s.index(*args)
# Find last substring, raise exception if not found
def rindex(s, *args):
"""rindex(s, sub [,start [,end]]) -> int
Like rfind but raises ValueError when the substring is not found.
"""
return s.rindex(*args)
# Count non-overlapping occurrences of substring
def count(s, *args):
"""count(s, sub[, start[,end]]) -> int
Return the number of occurrences of substring sub in string
s[start:end]. Optional arguments start and end are
interpreted as in slice notation.
"""
return s.count(*args)
# Find substring, return -1 if not found
def find(s, *args):
"""find(s, sub [,start [,end]]) -> in
Return the lowest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return s.find(*args)
# Find last substring, return -1 if not found
def rfind(s, *args):
"""rfind(s, sub [,start [,end]]) -> int
Return the highest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return s.rfind(*args)
# for a bit of speed
_float = float
_int = int
_long = long
# Convert string to float
def atof(s):
"""atof(s) -> float
Return the floating point number represented by the string s.
"""
return _float(s)
# Convert string to integer
def atoi(s , base=10):
"""atoi(s [,base]) -> int
Return the integer represented by the string s in the given
base, which defaults to 10. The string s must consist of one
or more digits, possibly preceded by a sign. If base is 0, it
is chosen from the leading characters of s, 0 for octal, 0x or
0X for hexadecimal. If base is 16, a preceding 0x or 0X is
accepted.
"""
return _int(s, base)
# Convert string to long integer
def atol(s, base=10):
"""atol(s [,base]) -> long
Return the long integer represented by the string s in the
given base, which defaults to 10. The string s must consist
of one or more digits, possibly preceded by a sign. If base
is 0, it is chosen from the leading characters of s, 0 for
octal, 0x or 0X for hexadecimal. If base is 16, a preceding
0x or 0X is accepted. A trailing L or l is not accepted,
unless base is 0.
"""
return _long(s, base)
# Left-justify a string
def ljust(s, width, *args):
"""ljust(s, width[, fillchar]) -> string
Return a left-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated. If specified the fillchar is used instead of spaces.
"""
return s.ljust(width, *args)
# Right-justify a string
def rjust(s, width, *args):
"""rjust(s, width[, fillchar]) -> string
Return a right-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated. If specified the fillchar is used instead of spaces.
"""
return s.rjust(width, *args)
# Center a string
def center(s, width, *args):
"""center(s, width[, fillchar]) -> string
Return a center version of s, in a field of the specified
width. padded with spaces as needed. The string is never
truncated. If specified the fillchar is used instead of spaces.
"""
return s.center(width, *args)
# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03'
# Decadent feature: the argument may be a string or a number
# (Use of this is deprecated; it should be a string as with ljust c.s.)
def zfill(x, width):
"""zfill(x, width) -> string
Pad a numeric string x with zeros on the left, to fill a field
of the specified width. The string x is never truncated.
"""
if not isinstance(x, basestring):
x = repr(x)
return x.zfill(width)
# Expand tabs in a string.
# Doesn't take non-printing chars into account, but does understand \n.
def expandtabs(s, tabsize=8):
"""expandtabs(s [,tabsize]) -> string
Return a copy of the string s with all tab characters replaced
by the appropriate number of spaces, depending on the current
column, and the tabsize (default 8).
"""
return s.expandtabs(tabsize)
# Character translation through look-up table.
def translate(s, table, deletions=""):
"""translate(s,table [,deletions]) -> string
Return a copy of the string s, where all characters occurring
in the optional argument deletions are removed, and the
remaining characters have been mapped through the given
translation table, which must be a string of length 256. The
deletions argument is not allowed for Unicode strings.
"""
if deletions:
return s.translate(table, deletions)
else:
# Add s[:0] so that if s is Unicode and table is an 8-bit string,
# table is converted to Unicode. This means that table *cannot*
# be a dictionary -- for that feature, use u.translate() directly.
return s.translate(table + s[:0])
# Capitalize a string, e.g. "aBc dEf" -> "Abc def".
def capitalize(s):
"""capitalize(s) -> string
Return a copy of the string s with only its first character
capitalized.
"""
return s.capitalize()
# Substring replacement (global)
def replace(s, old, new, maxsplit=-1):
"""replace (str, old, new[, maxsplit]) -> string
Return a copy of string str with all occurrences of substring
old replaced by new. If the optional argument maxsplit is
given, only the first maxsplit occurrences are replaced.
"""
return s.replace(old, new, maxsplit)
# Try importing optional built-in module "strop" -- if it exists,
# it redefines some string operations that are 100-1000 times faster.
# It also defines values for whitespace, lowercase and uppercase
# that match <ctype.h>'s definitions.
try:
from strop import maketrans, lowercase, uppercase, whitespace
letters = lowercase + uppercase
except ImportError:
pass # Use the original versions
| apache-2.0 |
Lw-Cui/RedBlackBST | lib/gtest/test/gtest_throw_on_failure_test.py | 363 | 5767 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print('Running "%s". . .' % ' '.join(command))
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO(wan@google.com): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
zhangh43/incubator-hawq | src/backend/catalog/core/catheader.py | 9 | 6232 | #!/usr/bin/env python
import re
import os
class PgCatalogHeader(object):
"""This class is a base class for catalog header parser class, and
provides basic methods to parse header files by regular expressions.
The result will be in self.tuplist. To extend this class, set these
three class values.
- header
- hasoid
- prefix
and call self.initialize() in __init__().
"""
catalogdir = '../../../include/catalog'
def initialize(self):
path = self.fullpath(self.header)
self.tuplist = self.readheader(path, self.hasoid, self.prefix)
if not self.tuplist:
raise Exception("no content")
def fullpath(self, filename):
"""Returns the full path name of the catalog file."""
thisdir = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(thisdir, self.catalogdir)
return os.path.join(path, filename)
def readheader(self, header, hasoid, tableprefix):
"""Returns a list of dictionaries as the result of parse.
It finds lines starting with "#define Anum_" and collects
attribute names, then parse DATA() macros. All data is
parsed as string regardless of the column type.
"""
anum = re.compile(r'#define Anum_' + tableprefix + r'_(\w+)')
rebuf = list()
attlist = list()
for line in open(header):
m = anum.match(line)
if m:
# Build up regular expression.
# We capture the group by name to look up later
rebuf.append(r'(?P<' + m.group(1) + r'>\S+|"[^"]+")')
attlist.append(m.group(1))
oidpattern = ''
if hasoid:
oidpattern = r'OID\s*=\s*(?P<oid>\w+)\s*'
attlist.append('oid')
insert = re.compile(r'DATA\(insert\s+' +
oidpattern + r'\(\s*' +
'\s+'.join(rebuf) +
r'\s*\)\);')
# Collect all the DATA() lines and put them into a list
tuplist = list()
for line in open(header):
m = insert.match(line)
if m:
tup = dict()
for att in attlist:
tup[att] = m.group(att)
tuplist.append(tup)
return tuplist
class PgAmop(PgCatalogHeader):
header = 'pg_amop.h'
hasoid = False
prefix = 'pg_amop'
def __init__(self):
self.initialize()
def find_amopopr(self, amopclaid, amopstrategy):
"""Returns the operator oid that matches opclass and strategy."""
for tup in self.tuplist:
if (tup['amopclaid'] == str(amopclaid) and
tup['amopstrategy'] == str(amopstrategy)):
return tup['amopopr']
class PgOpclass(PgCatalogHeader):
header = 'pg_opclass.h'
hasoid = True
prefix = 'pg_opclass'
def __init__(self):
self.initialize()
def find_btree_oid_by_opcintype(self, opcintype):
"""Returns the opclass oid whoose input type is opcintype if it
is a btree opclass and default for the type.
"""
for tup in self.tuplist:
# 403 is the btree access method id
if (tup['opcintype'] == str(opcintype) and
tup['opcamid'] == '403' and
tup['opcdefault'] == 't'):
return tup['oid']
class PgOperator(PgCatalogHeader):
header = 'pg_operator.h'
hasoid = True
prefix = 'pg_operator'
def __init__(self):
self.initialize()
def find_oprcode(self, oid):
"""Returns the procedure oid of the operator."""
for tup in self.tuplist:
if tup['oid'] == str(oid):
return tup['oprcode']
class PgType(PgCatalogHeader):
header = 'pg_type.h'
hasoid = True
prefix = 'pg_type'
def __init__(self):
self.initialize()
self.oid_defs = self._read_oid_defs()
def findtup_by_typname(self, typname):
"""Returns a tuple that matches typname.
The input typname is normalized if it's any of quote_char, boolean,
smallint, integer, bigint, real, or timestamp_with_time_zone.
Also, if typname looks like an array type with '[]', it is normalized
to an array type name with underscore prefix.
"""
basename = typname.rstrip('[]')
isarray = False
if basename != typname:
isarray = True
typname = basename
if typname == 'quoted_char':
typname = 'char'
elif typname == 'boolean':
typname = 'bool'
elif typname == 'smallint':
typname = 'int2'
elif typname == 'integer':
typname = 'int4'
elif typname == 'bigint':
typname = 'int8'
elif typname == 'real':
typname = 'float4'
elif typname == 'timestamp_with_time_zone':
typname = 'timestamptz'
if isarray:
typname = '_' + typname
for tup in self.tuplist:
if tup['typname'] == str(typname):
return tup
def findtup_by_typid(self, typid):
for tup in self.tuplist:
if tup['oid'] == str(typid):
return tup
def oid_to_def(self, oid):
return self.oid_defs.get(int(oid), str(oid))
def _read_oid_defs(self):
"""Reads #define lines in pg_type.sql and builds up a map from
oid(int) to macro string.
"""
filename = os.path.join(self.catalogdir, 'pg_type.sql')
pat = re.compile(r'^.*#define\s+\S*OID\s+\d+')
oidmap = dict()
for line in open(filename):
m = pat.match(line)
if m:
tup = line.split()
oid = int(tup[-1])
oidname = tup[-2]
oidmap[oid] = oidname
return oidmap
class PgProc(PgCatalogHeader):
header = 'pg_proc.h'
hasoid = True
prefix = 'pg_proc'
def __init__(self):
self.initialize()
def find_prosrc_by_proname(self, proname):
for tup in self.tuplist:
if tup['proname'] == str(proname):
return tup['prosrc']
| apache-2.0 |
xinst/NoahGameFrame | Dependencies/protobuf/gtest/test/gtest_list_tests_unittest.py | 1068 | 5415 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER = """FooDeathTest.
Test1
Foo.
Bar1
Bar2
DISABLED_Bar3
Abc.
Xyz
Def
FooBar.
Baz
FooTest.
Test1
DISABLED_Test2
Test3
"""
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO = """FooDeathTest.
Test1
Foo.
Bar1
Bar2
DISABLED_Bar3
FooBar.
Baz
FooTest.
Test1
DISABLED_Test2
Test3
"""
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output: the expected output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
msg = ('when %s is %s, the output of "%s" is "%s".' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output))
if expected_output is not None:
self.assert_(output == expected_output, msg)
else:
self.assert_(output != EXPECTED_OUTPUT_NO_FILTER, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_NO_FILTER,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_NO_FILTER,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_FILTER_FOO,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
javatlacati/codecombat | scripts/devSetup/systemConfiguration.py | 77 | 1375 | from __future__ import division
__author__ = u'schmatz'
import sys
import os
from errors import NotSupportedError
class SystemConfiguration(object):
def __init__(self):
self.operating_system = self.get_operating_system()
self.virtual_memory_address_width = self.get_virtual_memory_address_width()
def get_operating_system(self):
platform = sys.platform
if platform.startswith(u'linux'):
return u"linux"
elif platform.startswith(u'darwin'):
return u"mac"
elif platform.startswith(u'win'):
return u"windows"
else:
raise NotSupportedError(u"Your platform," + sys.platform + u",isn't supported.")
def get_current_working_directory(self):
if sys.version_info.major < 3:
return os.getcwdu()
else:
return os.getcwd()
def get_virtual_memory_address_width(self):
is64Bit = sys.maxsize/3 > 2**32
if is64Bit:
return 64
else:
if self.operating_system == u"mac":
if os.uname()[4] == u"x86_64":
return 64
raise NotSupportedError(u"Your processor is determined to have a maxSize of" + str(sys.maxsize) +
u",\n which doesn't correspond with a 64-bit architecture.")
return 32
| mit |
bakhtout/odoo-educ | addons/purchase/edi/purchase_order.py | 439 | 9703 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.addons.edi import EDIMixin
PURCHASE_ORDER_LINE_EDI_STRUCT = {
'name': True,
'date_planned': True,
'product_id': True,
'product_uom': True,
'price_unit': True,
'product_qty': True,
# fields used for web preview only - discarded on import
'price_subtotal': True,
}
PURCHASE_ORDER_EDI_STRUCT = {
'company_id': True, # -> to be changed into partner
'name': True,
'partner_ref': True,
'origin': True,
'date_order': True,
'partner_id': True,
#custom: 'partner_address',
'notes': True,
'order_line': PURCHASE_ORDER_LINE_EDI_STRUCT,
#custom: currency_id
# fields used for web preview only - discarded on import
'amount_total': True,
'amount_untaxed': True,
'amount_tax': True,
'state':True,
}
class purchase_order(osv.osv, EDIMixin):
_inherit = 'purchase.order'
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Exports a purchase order"""
edi_struct = dict(edi_struct or PURCHASE_ORDER_EDI_STRUCT)
res_company = self.pool.get('res.company')
res_partner_obj = self.pool.get('res.partner')
edi_doc_list = []
for order in records:
# generate the main report
self._edi_generate_report_attachment(cr, uid, order, context=context)
# Get EDI doc based on struct. The result will also contain all metadata fields and attachments.
edi_doc = super(purchase_order,self).edi_export(cr, uid, [order], edi_struct, context)[0]
edi_doc.update({
# force trans-typing to purchase.order upon import
'__import_model': 'sale.order',
'__import_module': 'sale',
'company_address': res_company.edi_export_address(cr, uid, order.company_id, context=context),
'partner_address': res_partner_obj.edi_export(cr, uid, [order.partner_id], context=context)[0],
'currency': self.pool.get('res.currency').edi_export(cr, uid, [order.pricelist_id.currency_id],
context=context)[0],
})
if edi_doc.get('order_line'):
for line in edi_doc['order_line']:
line['__import_model'] = 'sale.order.line'
edi_doc_list.append(edi_doc)
return edi_doc_list
def edi_import_company(self, cr, uid, edi_document, context=None):
# TODO: for multi-company setups, we currently import the document in the
# user's current company, but we should perhaps foresee a way to select
# the desired company among the user's allowed companies
self._edi_requires_attributes(('company_id','company_address'), edi_document)
res_partner = self.pool.get('res.partner')
xid, company_name = edi_document.pop('company_id')
# Retrofit address info into a unified partner info (changed in v7 - used to keep them separate)
company_address_edi = edi_document.pop('company_address')
company_address_edi['name'] = company_name
company_address_edi['is_company'] = True
company_address_edi['__import_model'] = 'res.partner'
company_address_edi['__id'] = xid # override address ID, as of v7 they should be the same anyway
if company_address_edi.get('logo'):
company_address_edi['image'] = company_address_edi.pop('logo')
company_address_edi['supplier'] = True
partner_id = res_partner.edi_import(cr, uid, company_address_edi, context=context)
# modify edi_document to refer to new partner
partner = res_partner.browse(cr, uid, partner_id, context=context)
partner_edi_m2o = self.edi_m2o(cr, uid, partner, context=context)
edi_document['partner_id'] = partner_edi_m2o
edi_document.pop('partner_address', None) # ignored, that's supposed to be our own address!
return partner_id
def _edi_get_pricelist(self, cr, uid, partner_id, currency, context=None):
# TODO: refactor into common place for purchase/sale, e.g. into product module
partner_model = self.pool.get('res.partner')
partner = partner_model.browse(cr, uid, partner_id, context=context)
pricelist = partner.property_product_pricelist_purchase
if not pricelist:
pricelist = self.pool.get('ir.model.data').get_object(cr, uid, 'purchase', 'list0', context=context)
if not pricelist.currency_id == currency:
# look for a pricelist with the right type and currency, or make a new one
pricelist_type = 'purchase'
product_pricelist = self.pool.get('product.pricelist')
match_pricelist_ids = product_pricelist.search(cr, uid,[('type','=',pricelist_type),
('currency_id','=',currency.id)])
if match_pricelist_ids:
pricelist_id = match_pricelist_ids[0]
else:
pricelist_name = _('EDI Pricelist (%s)') % (currency.name,)
pricelist_id = product_pricelist.create(cr, uid, {'name': pricelist_name,
'type': pricelist_type,
'currency_id': currency.id,
})
self.pool.get('product.pricelist.version').create(cr, uid, {'name': pricelist_name,
'pricelist_id': pricelist_id})
pricelist = product_pricelist.browse(cr, uid, pricelist_id)
return self.edi_m2o(cr, uid, pricelist, context=context)
def _edi_get_location(self, cr, uid, partner_id, context=None):
partner_model = self.pool.get('res.partner')
partner = partner_model.browse(cr, uid, partner_id, context=context)
location = partner.property_stock_customer
if not location:
location = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'stock_location_stock', context=context)
return self.edi_m2o(cr, uid, location, context=context)
def edi_import(self, cr, uid, edi_document, context=None):
self._edi_requires_attributes(('company_id','company_address','order_line','date_order','currency'), edi_document)
#import company as a new partner
partner_id = self.edi_import_company(cr, uid, edi_document, context=context)
# currency for rounding the discount calculations and for the pricelist
res_currency = self.pool.get('res.currency')
currency_info = edi_document.pop('currency')
currency_id = res_currency.edi_import(cr, uid, currency_info, context=context)
order_currency = res_currency.browse(cr, uid, currency_id)
partner_ref = edi_document.pop('partner_ref', False)
edi_document['partner_ref'] = edi_document['name']
edi_document['name'] = partner_ref or edi_document['name']
edi_document['pricelist_id'] = self._edi_get_pricelist(cr, uid, partner_id, order_currency, context=context)
edi_document['location_id'] = self._edi_get_location(cr, uid, partner_id, context=context)
# discard web preview fields, if present
edi_document.pop('amount_total', None)
edi_document.pop('amount_tax', None)
edi_document.pop('amount_untaxed', None)
edi_document.pop('payment_term', None)
edi_document.pop('order_policy', None)
edi_document.pop('user_id', None)
for order_line in edi_document['order_line']:
self._edi_requires_attributes(('date_planned', 'product_id', 'product_uom', 'product_qty', 'price_unit'), order_line)
# original sale order contains unit price and discount, but not final line price
discount = order_line.pop('discount', 0.0)
if discount:
order_line['price_unit'] = res_currency.round(cr, uid, order_currency,
(order_line['price_unit'] * (1 - (discount or 0.0) / 100.0)))
# sale order lines have sequence numbers, not purchase order lines
order_line.pop('sequence', None)
# discard web preview fields, if present
order_line.pop('price_subtotal', None)
return super(purchase_order,self).edi_import(cr, uid, edi_document, context=context)
class purchase_order_line(osv.osv, EDIMixin):
_inherit='purchase.order.line'
| agpl-3.0 |
atuljain/odoo | addons/account_analytic_plans/account_analytic_plans.py | 30 | 23439 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
class one2many_mod2(fields.one2many):
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if context is None:
context = {}
res = {}
for id in ids:
res[id] = []
ids2 = None
if 'journal_id' in context:
journal = obj.pool.get('account.journal').browse(cr, user, context['journal_id'], context=context)
pnum = int(name[7]) -1
plan = journal.plan_id
if plan and len(plan.plan_ids) > pnum:
acc_id = plan.plan_ids[pnum].root_analytic_id.id
ids2 = obj.pool[self._obj].search(cr, user, [(self._fields_id,'in',ids),('analytic_account_id','child_of',[acc_id])], limit=self._limit)
if ids2 is None:
ids2 = obj.pool[self._obj].search(cr, user, [(self._fields_id,'in',ids)], limit=self._limit)
for r in obj.pool[self._obj]._read_flat(cr, user, ids2, [self._fields_id], context=context, load='_classic_write'):
res[r[self._fields_id]].append( r['id'] )
return res
class account_analytic_line(osv.osv):
_inherit = 'account.analytic.line'
_description = 'Analytic Line'
def _get_amount(self, cr, uid, ids, name, args, context=None):
res = {}
for id in ids:
res.setdefault(id, 0.0)
for line in self.browse(cr, uid, ids, context=context):
amount = line.move_id and line.move_id.amount_currency * (line.percentage / 100) or 0.0
res[line.id] = amount
return res
_columns = {
'amount_currency': fields.function(_get_amount, string="Amount Currency", type="float", store=True, help="The amount expressed in the related account currency if not equal to the company one.", readonly=True),
'percentage': fields.float('Percentage')
}
class account_analytic_plan(osv.osv):
_name = "account.analytic.plan"
_description = "Analytic Plan"
_columns = {
'name': fields.char('Analytic Plan', size=64, required=True, select=True),
'plan_ids': fields.one2many('account.analytic.plan.line', 'plan_id', 'Analytic Plans'),
}
class account_analytic_plan_line(osv.osv):
_name = "account.analytic.plan.line"
_description = "Analytic Plan Line"
_order = "sequence, id"
_columns = {
'plan_id': fields.many2one('account.analytic.plan','Analytic Plan',required=True),
'name': fields.char('Plan Name', size=64, required=True, select=True),
'sequence': fields.integer('Sequence'),
'root_analytic_id': fields.many2one('account.analytic.account', 'Root Account', help="Root account of this plan.", required=False),
'min_required': fields.float('Minimum Allowed (%)'),
'max_required': fields.float('Maximum Allowed (%)'),
}
_defaults = {
'min_required': 100.0,
'max_required': 100.0,
}
class account_analytic_plan_instance(osv.osv):
_name = "account.analytic.plan.instance"
_description = "Analytic Plan Instance"
_columns = {
'name': fields.char('Analytic Distribution', size=64),
'code': fields.char('Distribution Code', size=16),
'journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal' ),
'account_ids': fields.one2many('account.analytic.plan.instance.line', 'plan_id', 'Account Id'),
'account1_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account1 Id'),
'account2_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account2 Id'),
'account3_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account3 Id'),
'account4_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account4 Id'),
'account5_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account5 Id'),
'account6_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account6 Id'),
'plan_id': fields.many2one('account.analytic.plan', "Model's Plan"),
}
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
if context.get('journal_id', False):
journal = journal_obj.browse(cr, user, [context['journal_id']], context=context)[0]
analytic_journal = journal.analytic_journal_id and journal.analytic_journal_id.id or False
args.append('|')
args.append(('journal_id', '=', analytic_journal))
args.append(('journal_id', '=', False))
res = super(account_analytic_plan_instance, self).search(cr, user, args, offset=offset, limit=limit, order=order,
context=context, count=count)
return res
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'account1_ids':False, 'account2_ids':False, 'account3_ids':False,
'account4_ids':False, 'account5_ids':False, 'account6_ids':False})
return super(account_analytic_plan_instance, self).copy(cr, uid, id, default, context=context)
def _default_journal(self, cr, uid, context=None):
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
if context.has_key('journal_id') and context['journal_id']:
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
if journal.analytic_journal_id:
return journal.analytic_journal_id.id
return False
_defaults = {
'plan_id': False,
'journal_id': _default_journal,
}
def name_get(self, cr, uid, ids, context=None):
res = []
for inst in self.browse(cr, uid, ids, context=context):
name = inst.name or '/'
if name and inst.code:
name=name+' ('+inst.code+')'
res.append((inst.id, name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
args = args or []
if name:
ids = self.search(cr, uid, [('code', '=', name)] + args, limit=limit, context=context or {})
if not ids:
ids = self.search(cr, uid, [('name', operator, name)] + args, limit=limit, context=context or {})
else:
ids = self.search(cr, uid, args, limit=limit, context=context or {})
return self.name_get(cr, uid, ids, context or {})
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:
context = {}
wiz_id = self.pool.get('ir.actions.act_window').search(cr, uid, [("name","=","analytic.plan.create.model.action")], context=context)
res = super(account_analytic_plan_instance,self).fields_view_get(cr, uid, view_id, view_type, context, toolbar=toolbar, submenu=submenu)
journal_obj = self.pool.get('account.journal')
analytic_plan_obj = self.pool.get('account.analytic.plan')
if (res['type']=='form'):
plan_id = False
if context.get('journal_id', False):
plan_id = journal_obj.browse(cr, uid, int(context['journal_id']), context=context).plan_id
elif context.get('plan_id', False):
plan_id = analytic_plan_obj.browse(cr, uid, int(context['plan_id']), context=context)
if plan_id:
i=1
res['arch'] = """<form string="%s">
<field name="name"/>
<field name="code"/>
<field name="journal_id"/>
<button name="%d" string="Save This Distribution as a Model" type="action" colspan="2"/>
"""% (tools.to_xml(plan_id.name), wiz_id[0])
for line in plan_id.plan_ids:
res['arch']+="""
<field name="account%d_ids" string="%s" nolabel="1" colspan="4">
<tree string="%s" editable="bottom">
<field name="rate"/>
<field name="analytic_account_id" domain="[('parent_id','child_of',[%d])]" groups="analytic.group_analytic_accounting"/>
</tree>
</field>
<newline/>"""%(i,tools.to_xml(line.name),tools.to_xml(line.name),line.root_analytic_id and line.root_analytic_id.id or 0)
i+=1
res['arch'] += "</form>"
doc = etree.fromstring(res['arch'].encode('utf8'))
xarch, xfields = self._view_look_dom_arch(cr, uid, doc, view_id, context=context)
res['arch'] = xarch
res['fields'] = xfields
return res
else:
return res
def create(self, cr, uid, vals, context=None):
journal_obj = self.pool.get('account.journal')
ana_plan_instance_obj = self.pool.get('account.analytic.plan.instance')
acct_anal_acct = self.pool.get('account.analytic.account')
acct_anal_plan_line_obj = self.pool.get('account.analytic.plan.line')
if context and 'journal_id' in context:
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
pids = ana_plan_instance_obj.search(cr, uid, [('name','=',vals['name']), ('code','=',vals['code']), ('plan_id','<>',False)], context=context)
if pids:
raise osv.except_osv(_('Error!'), _('A model with this name and code already exists.'))
res = acct_anal_plan_line_obj.search(cr, uid, [('plan_id','=',journal.plan_id.id)], context=context)
for i in res:
total_per_plan = 0
item = acct_anal_plan_line_obj.browse(cr, uid, i, context=context)
temp_list = ['account1_ids','account2_ids','account3_ids','account4_ids','account5_ids','account6_ids']
for l in temp_list:
if vals.has_key(l):
for tempo in vals[l]:
if acct_anal_acct.search(cr, uid, [('parent_id', 'child_of', [item.root_analytic_id.id]), ('id', '=', tempo[2]['analytic_account_id'])], context=context):
total_per_plan += tempo[2]['rate']
if total_per_plan < item.min_required or total_per_plan > item.max_required:
raise osv.except_osv(_('Error!'),_('The total should be between %s and %s.') % (str(item.min_required), str(item.max_required)))
return super(account_analytic_plan_instance, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None, check=True, update_check=True):
if context is None:
context = {}
this = self.browse(cr, uid, ids[0], context=context)
invoice_line_obj = self.pool.get('account.invoice.line')
if this.plan_id and not vals.has_key('plan_id'):
#this instance is a model, so we have to create a new plan instance instead of modifying it
#copy the existing model
temp_id = self.copy(cr, uid, this.id, None, context=context)
#get the list of the invoice line that were linked to the model
lists = invoice_line_obj.search(cr, uid, [('analytics_id','=',this.id)], context=context)
#make them link to the copy
invoice_line_obj.write(cr, uid, lists, {'analytics_id':temp_id}, context=context)
#and finally modify the old model to be not a model anymore
vals['plan_id'] = False
if not vals.has_key('name'):
vals['name'] = this.name and (str(this.name)+'*') or "*"
if not vals.has_key('code'):
vals['code'] = this.code and (str(this.code)+'*') or "*"
return super(account_analytic_plan_instance, self).write(cr, uid, ids, vals, context=context)
class account_analytic_plan_instance_line(osv.osv):
_name = "account.analytic.plan.instance.line"
_description = "Analytic Instance Line"
_rec_name = "analytic_account_id"
_columns = {
'plan_id': fields.many2one('account.analytic.plan.instance', 'Plan Id'),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', required=True, domain=[('type','<>','view')]),
'rate': fields.float('Rate (%)', required=True),
}
_defaults = {
'rate': 100.0
}
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.read(cr, uid, ids, ['analytic_account_id'], context=context)
res = []
for record in reads:
res.append((record['id'], record['analytic_account_id']))
return res
class account_journal(osv.osv):
_inherit = "account.journal"
_name = "account.journal"
_columns = {
'plan_id': fields.many2one('account.analytic.plan', 'Analytic Plans'),
}
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
_name = "account.invoice.line"
_columns = {
'analytics_id': fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
def create(self, cr, uid, vals, context=None):
if 'analytics_id' in vals and isinstance(vals['analytics_id'], tuple):
vals['analytics_id'] = vals['analytics_id'][0]
return super(account_invoice_line, self).create(cr, uid, vals, context=context)
def move_line_get_item(self, cr, uid, line, context=None):
res = super(account_invoice_line, self).move_line_get_item(cr, uid, line, context=context)
res ['analytics_id'] = line.analytics_id and line.analytics_id.id or False
return res
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, context=None, company_id=None):
res_prod = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom_id, qty, name, type, partner_id, fposition_id, price_unit, currency_id, context=context, company_id=company_id)
rec = self.pool.get('account.analytic.default').account_get(cr, uid, product, partner_id, uid, time.strftime('%Y-%m-%d'), context=context)
if rec and rec.analytics_id:
res_prod['value'].update({'analytics_id': rec.analytics_id.id})
return res_prod
class account_move_line(osv.osv):
_inherit = "account.move.line"
_name = "account.move.line"
_columns = {
'analytics_id':fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
def _default_get_move_form_hook(self, cursor, user, data):
data = super(account_move_line, self)._default_get_move_form_hook(cursor, user, data)
if data.has_key('analytics_id'):
del(data['analytics_id'])
return data
def create_analytic_lines(self, cr, uid, ids, context=None):
if context is None:
context = {}
super(account_move_line, self).create_analytic_lines(cr, uid, ids, context=context)
analytic_line_obj = self.pool.get('account.analytic.line')
for line in self.browse(cr, uid, ids, context=context):
if line.analytics_id:
if not line.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal.") % (line.journal_id.name,))
toremove = analytic_line_obj.search(cr, uid, [('move_id','=',line.id)], context=context)
if toremove:
analytic_line_obj.unlink(cr, uid, toremove, context=context)
for line2 in line.analytics_id.account_ids:
val = (line.credit or 0.0) - (line.debit or 0.0)
amt=val * (line2.rate/100)
al_vals={
'name': line.name,
'date': line.date,
'account_id': line2.analytic_account_id.id,
'unit_amount': line.quantity,
'product_id': line.product_id and line.product_id.id or False,
'product_uom_id': line.product_uom_id and line.product_uom_id.id or False,
'amount': amt,
'general_account_id': line.account_id.id,
'move_id': line.id,
'journal_id': line.journal_id.analytic_journal_id.id,
'ref': line.ref,
'percentage': line2.rate
}
analytic_line_obj.create(cr, uid, al_vals, context=context)
return True
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:
context = {}
result = super(account_move_line, self).fields_view_get(cr, uid, view_id, view_type, context, toolbar=toolbar, submenu=submenu)
return result
class account_invoice(osv.osv):
_name = "account.invoice"
_inherit = "account.invoice"
def line_get_convert(self, cr, uid, x, part, date, context=None):
res=super(account_invoice,self).line_get_convert(cr, uid, x, part, date, context=context)
res['analytics_id'] = x.get('analytics_id', False)
return res
def _get_analytic_lines(self, cr, uid, id, context=None):
inv = self.browse(cr, uid, [id])[0]
cur_obj = self.pool.get('res.currency')
invoice_line_obj = self.pool.get('account.invoice.line')
acct_ins_obj = self.pool.get('account.analytic.plan.instance')
company_currency = inv.company_id.currency_id.id
if inv.type in ('out_invoice', 'in_refund'):
sign = 1
else:
sign = -1
iml = invoice_line_obj.move_line_get(cr, uid, inv.id, context=context)
for il in iml:
if il.get('analytics_id', False):
if inv.type in ('in_invoice', 'in_refund'):
ref = inv.reference
else:
ref = self._convert_ref(cr, uid, inv.number)
obj_move_line = acct_ins_obj.browse(cr, uid, il['analytics_id'], context=context)
ctx = context.copy()
ctx.update({'date': inv.date_invoice})
amount_calc = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, il['price'], context=ctx) * sign
qty = il['quantity']
il['analytic_lines'] = []
for line2 in obj_move_line.account_ids:
amt = amount_calc * (line2.rate/100)
qtty = qty* (line2.rate/100)
al_vals = {
'name': il['name'],
'date': inv['date_invoice'],
'unit_amount': qtty,
'product_id': il['product_id'],
'account_id': line2.analytic_account_id.id,
'amount': amt,
'product_uom_id': il['uos_id'],
'general_account_id': il['account_id'],
'journal_id': self._get_journal_analytic(cr, uid, inv.type),
'ref': ref,
}
il['analytic_lines'].append((0, 0, al_vals))
return iml
class account_analytic_plan(osv.osv):
_inherit = "account.analytic.plan"
_columns = {
'default_instance_id': fields.many2one('account.analytic.plan.instance', 'Default Entries'),
}
class analytic_default(osv.osv):
_inherit = "account.analytic.default"
_columns = {
'analytics_id': fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
# Method overridden to set the analytic account by default on criterion match
def invoice_line_create(self, cr, uid, ids, context=None):
create_ids = super(sale_order_line,self).invoice_line_create(cr, uid, ids, context=context)
inv_line_obj = self.pool.get('account.invoice.line')
acct_anal_def_obj = self.pool.get('account.analytic.default')
if ids:
sale_line = self.browse(cr, uid, ids[0], context=context)
for line in inv_line_obj.browse(cr, uid, create_ids, context=context):
rec = acct_anal_def_obj.account_get(cr, uid, line.product_id.id, sale_line.order_id.partner_id.id, uid, time.strftime('%Y-%m-%d'), context)
if rec:
inv_line_obj.write(cr, uid, [line.id], {'analytics_id': rec.analytics_id.id}, context=context)
return create_ids
class account_bank_statement(osv.osv):
_inherit = "account.bank.statement"
_name = "account.bank.statement"
def _prepare_bank_move_line(self, cr, uid, st_line, move_id, amount, company_currency_id, context=None):
result = super(account_bank_statement,self)._prepare_bank_move_line(cr, uid, st_line,
move_id, amount, company_currency_id, context=context)
result['analytics_id'] = st_line.analytics_id.id
return result
def button_confirm_bank(self, cr, uid, ids, context=None):
super(account_bank_statement,self).button_confirm_bank(cr, uid, ids, context=context)
for st in self.browse(cr, uid, ids, context=context):
for st_line in st.line_ids:
if st_line.analytics_id:
if not st.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal.") % (st.journal_id.name,))
if not st_line.amount:
continue
return True
class account_bank_statement_line(osv.osv):
_inherit = "account.bank.statement.line"
_name = "account.bank.statement.line"
_columns = {
'analytics_id': fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gautamMalu/rootfs_xen_arndale | usr/lib/python3.4/bz2.py | 83 | 18839 | """Interface to the libbzip2 compression library.
This module provides a file interface, classes for incremental
(de)compression, and functions for one-shot (de)compression.
"""
__all__ = ["BZ2File", "BZ2Compressor", "BZ2Decompressor",
"open", "compress", "decompress"]
__author__ = "Nadeem Vawda <nadeem.vawda@gmail.com>"
import io
import warnings
try:
from threading import RLock
except ImportError:
from dummy_threading import RLock
from _bz2 import BZ2Compressor, BZ2Decompressor
_MODE_CLOSED = 0
_MODE_READ = 1
_MODE_READ_EOF = 2
_MODE_WRITE = 3
_BUFFER_SIZE = 8192
_builtin_open = open
class BZ2File(io.BufferedIOBase):
"""A file object providing transparent bzip2 (de)compression.
A BZ2File can act as a wrapper for an existing file object, or refer
directly to a named file on disk.
Note that BZ2File provides a *binary* file interface - data read is
returned as bytes, and data to be written should be given as bytes.
"""
def __init__(self, filename, mode="r", buffering=None, compresslevel=9):
"""Open a bzip2-compressed file.
If filename is a str or bytes object, it gives the name
of the file to be opened. Otherwise, it should be a file object,
which will be used to read or write the compressed data.
mode can be 'r' for reading (default), 'w' for (over)writing,
'x' for creating exclusively, or 'a' for appending. These can
equivalently be given as 'rb', 'wb', 'xb', and 'ab'.
buffering is ignored. Its use is deprecated.
If mode is 'w', 'x' or 'a', compresslevel can be a number between 1
and 9 specifying the level of compression: 1 produces the least
compression, and 9 (default) produces the most compression.
If mode is 'r', the input file may be the concatenation of
multiple compressed streams.
"""
# This lock must be recursive, so that BufferedIOBase's
# readline(), readlines() and writelines() don't deadlock.
self._lock = RLock()
self._fp = None
self._closefp = False
self._mode = _MODE_CLOSED
self._pos = 0
self._size = -1
if buffering is not None:
warnings.warn("Use of 'buffering' argument is deprecated",
DeprecationWarning)
if not (1 <= compresslevel <= 9):
raise ValueError("compresslevel must be between 1 and 9")
if mode in ("", "r", "rb"):
mode = "rb"
mode_code = _MODE_READ
self._decompressor = BZ2Decompressor()
self._buffer = b""
self._buffer_offset = 0
elif mode in ("w", "wb"):
mode = "wb"
mode_code = _MODE_WRITE
self._compressor = BZ2Compressor(compresslevel)
elif mode in ("x", "xb"):
mode = "xb"
mode_code = _MODE_WRITE
self._compressor = BZ2Compressor(compresslevel)
elif mode in ("a", "ab"):
mode = "ab"
mode_code = _MODE_WRITE
self._compressor = BZ2Compressor(compresslevel)
else:
raise ValueError("Invalid mode: %r" % (mode,))
if isinstance(filename, (str, bytes)):
self._fp = _builtin_open(filename, mode)
self._closefp = True
self._mode = mode_code
elif hasattr(filename, "read") or hasattr(filename, "write"):
self._fp = filename
self._mode = mode_code
else:
raise TypeError("filename must be a str or bytes object, or a file")
def close(self):
"""Flush and close the file.
May be called more than once without error. Once the file is
closed, any other operation on it will raise a ValueError.
"""
with self._lock:
if self._mode == _MODE_CLOSED:
return
try:
if self._mode in (_MODE_READ, _MODE_READ_EOF):
self._decompressor = None
elif self._mode == _MODE_WRITE:
self._fp.write(self._compressor.flush())
self._compressor = None
finally:
try:
if self._closefp:
self._fp.close()
finally:
self._fp = None
self._closefp = False
self._mode = _MODE_CLOSED
self._buffer = b""
self._buffer_offset = 0
@property
def closed(self):
"""True if this file is closed."""
return self._mode == _MODE_CLOSED
def fileno(self):
"""Return the file descriptor for the underlying file."""
self._check_not_closed()
return self._fp.fileno()
def seekable(self):
"""Return whether the file supports seeking."""
return self.readable() and self._fp.seekable()
def readable(self):
"""Return whether the file was opened for reading."""
self._check_not_closed()
return self._mode in (_MODE_READ, _MODE_READ_EOF)
def writable(self):
"""Return whether the file was opened for writing."""
self._check_not_closed()
return self._mode == _MODE_WRITE
# Mode-checking helper functions.
def _check_not_closed(self):
if self.closed:
raise ValueError("I/O operation on closed file")
def _check_can_read(self):
if self._mode not in (_MODE_READ, _MODE_READ_EOF):
self._check_not_closed()
raise io.UnsupportedOperation("File not open for reading")
def _check_can_write(self):
if self._mode != _MODE_WRITE:
self._check_not_closed()
raise io.UnsupportedOperation("File not open for writing")
def _check_can_seek(self):
if self._mode not in (_MODE_READ, _MODE_READ_EOF):
self._check_not_closed()
raise io.UnsupportedOperation("Seeking is only supported "
"on files open for reading")
if not self._fp.seekable():
raise io.UnsupportedOperation("The underlying file object "
"does not support seeking")
# Fill the readahead buffer if it is empty. Returns False on EOF.
def _fill_buffer(self):
if self._mode == _MODE_READ_EOF:
return False
# Depending on the input data, our call to the decompressor may not
# return any data. In this case, try again after reading another block.
while self._buffer_offset == len(self._buffer):
rawblock = (self._decompressor.unused_data or
self._fp.read(_BUFFER_SIZE))
if not rawblock:
if self._decompressor.eof:
# End-of-stream marker and end of file. We're good.
self._mode = _MODE_READ_EOF
self._size = self._pos
return False
else:
# Problem - we were expecting more compressed data.
raise EOFError("Compressed file ended before the "
"end-of-stream marker was reached")
if self._decompressor.eof:
# Continue to next stream.
self._decompressor = BZ2Decompressor()
try:
self._buffer = self._decompressor.decompress(rawblock)
except OSError:
# Trailing data isn't a valid bzip2 stream. We're done here.
self._mode = _MODE_READ_EOF
self._size = self._pos
return False
else:
self._buffer = self._decompressor.decompress(rawblock)
self._buffer_offset = 0
return True
# Read data until EOF.
# If return_data is false, consume the data without returning it.
def _read_all(self, return_data=True):
# The loop assumes that _buffer_offset is 0. Ensure that this is true.
self._buffer = self._buffer[self._buffer_offset:]
self._buffer_offset = 0
blocks = []
while self._fill_buffer():
if return_data:
blocks.append(self._buffer)
self._pos += len(self._buffer)
self._buffer = b""
if return_data:
return b"".join(blocks)
# Read a block of up to n bytes.
# If return_data is false, consume the data without returning it.
def _read_block(self, n, return_data=True):
# If we have enough data buffered, return immediately.
end = self._buffer_offset + n
if end <= len(self._buffer):
data = self._buffer[self._buffer_offset : end]
self._buffer_offset = end
self._pos += len(data)
return data if return_data else None
# The loop assumes that _buffer_offset is 0. Ensure that this is true.
self._buffer = self._buffer[self._buffer_offset:]
self._buffer_offset = 0
blocks = []
while n > 0 and self._fill_buffer():
if n < len(self._buffer):
data = self._buffer[:n]
self._buffer_offset = n
else:
data = self._buffer
self._buffer = b""
if return_data:
blocks.append(data)
self._pos += len(data)
n -= len(data)
if return_data:
return b"".join(blocks)
def peek(self, n=0):
"""Return buffered data without advancing the file position.
Always returns at least one byte of data, unless at EOF.
The exact number of bytes returned is unspecified.
"""
with self._lock:
self._check_can_read()
if not self._fill_buffer():
return b""
return self._buffer[self._buffer_offset:]
def read(self, size=-1):
"""Read up to size uncompressed bytes from the file.
If size is negative or omitted, read until EOF is reached.
Returns b'' if the file is already at EOF.
"""
with self._lock:
self._check_can_read()
if size == 0:
return b""
elif size < 0:
return self._read_all()
else:
return self._read_block(size)
def read1(self, size=-1):
"""Read up to size uncompressed bytes, while trying to avoid
making multiple reads from the underlying stream.
Returns b'' if the file is at EOF.
"""
# Usually, read1() calls _fp.read() at most once. However, sometimes
# this does not give enough data for the decompressor to make progress.
# In this case we make multiple reads, to avoid returning b"".
with self._lock:
self._check_can_read()
if (size == 0 or
# Only call _fill_buffer() if the buffer is actually empty.
# This gives a significant speedup if *size* is small.
(self._buffer_offset == len(self._buffer) and not self._fill_buffer())):
return b""
if size > 0:
data = self._buffer[self._buffer_offset :
self._buffer_offset + size]
self._buffer_offset += len(data)
else:
data = self._buffer[self._buffer_offset:]
self._buffer = b""
self._buffer_offset = 0
self._pos += len(data)
return data
def readinto(self, b):
"""Read up to len(b) bytes into b.
Returns the number of bytes read (0 for EOF).
"""
with self._lock:
return io.BufferedIOBase.readinto(self, b)
def readline(self, size=-1):
"""Read a line of uncompressed bytes from the file.
The terminating newline (if present) is retained. If size is
non-negative, no more than size bytes will be read (in which
case the line may be incomplete). Returns b'' if already at EOF.
"""
if not isinstance(size, int):
if not hasattr(size, "__index__"):
raise TypeError("Integer argument expected")
size = size.__index__()
with self._lock:
self._check_can_read()
# Shortcut for the common case - the whole line is in the buffer.
if size < 0:
end = self._buffer.find(b"\n", self._buffer_offset) + 1
if end > 0:
line = self._buffer[self._buffer_offset : end]
self._buffer_offset = end
self._pos += len(line)
return line
return io.BufferedIOBase.readline(self, size)
def readlines(self, size=-1):
"""Read a list of lines of uncompressed bytes from the file.
size can be specified to control the number of lines read: no
further lines will be read once the total size of the lines read
so far equals or exceeds size.
"""
if not isinstance(size, int):
if not hasattr(size, "__index__"):
raise TypeError("Integer argument expected")
size = size.__index__()
with self._lock:
return io.BufferedIOBase.readlines(self, size)
def write(self, data):
"""Write a byte string to the file.
Returns the number of uncompressed bytes written, which is
always len(data). Note that due to buffering, the file on disk
may not reflect the data written until close() is called.
"""
with self._lock:
self._check_can_write()
compressed = self._compressor.compress(data)
self._fp.write(compressed)
self._pos += len(data)
return len(data)
def writelines(self, seq):
"""Write a sequence of byte strings to the file.
Returns the number of uncompressed bytes written.
seq can be any iterable yielding byte strings.
Line separators are not added between the written byte strings.
"""
with self._lock:
return io.BufferedIOBase.writelines(self, seq)
# Rewind the file to the beginning of the data stream.
def _rewind(self):
self._fp.seek(0, 0)
self._mode = _MODE_READ
self._pos = 0
self._decompressor = BZ2Decompressor()
self._buffer = b""
self._buffer_offset = 0
def seek(self, offset, whence=0):
"""Change the file position.
The new position is specified by offset, relative to the
position indicated by whence. Values for whence are:
0: start of stream (default); offset must not be negative
1: current stream position
2: end of stream; offset must not be positive
Returns the new file position.
Note that seeking is emulated, so depending on the parameters,
this operation may be extremely slow.
"""
with self._lock:
self._check_can_seek()
# Recalculate offset as an absolute file position.
if whence == 0:
pass
elif whence == 1:
offset = self._pos + offset
elif whence == 2:
# Seeking relative to EOF - we need to know the file's size.
if self._size < 0:
self._read_all(return_data=False)
offset = self._size + offset
else:
raise ValueError("Invalid value for whence: %s" % (whence,))
# Make it so that offset is the number of bytes to skip forward.
if offset < self._pos:
self._rewind()
else:
offset -= self._pos
# Read and discard data until we reach the desired position.
self._read_block(offset, return_data=False)
return self._pos
def tell(self):
"""Return the current file position."""
with self._lock:
self._check_not_closed()
return self._pos
def open(filename, mode="rb", compresslevel=9,
encoding=None, errors=None, newline=None):
"""Open a bzip2-compressed file in binary or text mode.
The filename argument can be an actual filename (a str or bytes
object), or an existing file object to read from or write to.
The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or
"ab" for binary mode, or "rt", "wt", "xt" or "at" for text mode.
The default mode is "rb", and the default compresslevel is 9.
For binary mode, this function is equivalent to the BZ2File
constructor: BZ2File(filename, mode, compresslevel). In this case,
the encoding, errors and newline arguments must not be provided.
For text mode, a BZ2File object is created, and wrapped in an
io.TextIOWrapper instance with the specified encoding, error
handling behavior, and line ending(s).
"""
if "t" in mode:
if "b" in mode:
raise ValueError("Invalid mode: %r" % (mode,))
else:
if encoding is not None:
raise ValueError("Argument 'encoding' not supported in binary mode")
if errors is not None:
raise ValueError("Argument 'errors' not supported in binary mode")
if newline is not None:
raise ValueError("Argument 'newline' not supported in binary mode")
bz_mode = mode.replace("t", "")
binary_file = BZ2File(filename, bz_mode, compresslevel=compresslevel)
if "t" in mode:
return io.TextIOWrapper(binary_file, encoding, errors, newline)
else:
return binary_file
def compress(data, compresslevel=9):
"""Compress a block of data.
compresslevel, if given, must be a number between 1 and 9.
For incremental compression, use a BZ2Compressor object instead.
"""
comp = BZ2Compressor(compresslevel)
return comp.compress(data) + comp.flush()
def decompress(data):
"""Decompress a block of data.
For incremental decompression, use a BZ2Decompressor object instead.
"""
results = []
while data:
decomp = BZ2Decompressor()
try:
res = decomp.decompress(data)
except OSError:
if results:
break # Leftover data is not a valid bzip2 stream; ignore it.
else:
raise # Error on the first iteration; bail out.
results.append(res)
if not decomp.eof:
raise ValueError("Compressed data ended before the "
"end-of-stream marker was reached")
data = decomp.unused_data
return b"".join(results)
| gpl-2.0 |
studio666/gnuradio | gr-filter/python/filter/qa_pm_remez.py | 57 | 5876 | #!/usr/bin/env python
#
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, filter
import sys, math
# ----------------------------------------------------------------
# See optfir for an explanation of these.
def stopband_atten_to_dev (atten_db):
"""Convert a stopband attenuation in dB to an absolute value"""
return 10**(-atten_db/20)
def passband_ripple_to_dev (ripple_db):
"""Convert passband ripple spec expressed in dB to an absolute value"""
return (10**(ripple_db/20)-1)/(10**(ripple_db/20)+1)
# ----------------------------------------------------------------
def remezord (fcuts, mags, devs, fsamp = 2):
'''
FIR order estimator (lowpass, highpass, bandpass, mulitiband).
'''
# get local copies
fcuts = fcuts[:]
mags = mags[:]
devs = devs[:]
for i in range (len (fcuts)):
fcuts[i] = float (fcuts[i]) / fsamp
nf = len (fcuts)
nm = len (mags)
nd = len (devs)
nbands = nm
if nm != nd:
raise ValueError, "Length of mags and devs must be equal"
if nf != 2 * (nbands - 1):
raise ValueError, "Length of f must be 2 * len (mags) - 2"
for i in range (len (mags)):
if mags[i] != 0: # if not stopband, get relative deviation
devs[i] = devs[i] / mags[i]
# separate the passband and stopband edges
f1 = fcuts[0::2]
f2 = fcuts[1::2]
n = 0
min_delta = 2
for i in range (len (f1)):
if f2[i] - f1[i] < min_delta:
n = i
min_delta = f2[i] - f1[i]
if nbands == 2:
# lowpass or highpass case (use formula)
l = lporder (f1[n], f2[n], devs[0], devs[1])
else:
# bandpass or multipass case
# try different lowpasses and take the worst one that
# goes through the BP specs
l = 0
for i in range (1, nbands-1):
l1 = lporder (f1[i-1], f2[i-1], devs[i], devs[i-1])
l2 = lporder (f1[i], f2[i], devs[i], devs[i+1])
l = max (l, l1, l2)
n = int (math.ceil (l)) - 1 # need order, not length for remez
# cook up remez compatible result
ff = [0] + fcuts + [1]
for i in range (1, len (ff) - 1):
ff[i] *= 2
aa = []
for a in mags:
aa = aa + [a, a]
max_dev = max (devs)
wts = [1] * len(devs)
for i in range (len (wts)):
wts[i] = max_dev / devs[i]
return (n, ff, aa, wts)
def lporder (freq1, freq2, delta_p, delta_s):
'''
FIR lowpass filter length estimator.
'''
df = abs (freq2 - freq1)
ddp = math.log10 (delta_p)
dds = math.log10 (delta_s)
a1 = 5.309e-3
a2 = 7.114e-2
a3 = -4.761e-1
a4 = -2.66e-3
a5 = -5.941e-1
a6 = -4.278e-1
b1 = 11.01217
b2 = 0.5124401
t1 = a1 * ddp * ddp
t2 = a2 * ddp
t3 = a4 * ddp * ddp
t4 = a5 * ddp
dinf=((t1 + t2 + a3) * dds) + (t3 + t4 + a6)
ff = b1 + b2 * (ddp - dds)
n = dinf / df - ff * df + 1
return n
# ----------------------------------------------------------------
class test_pm_remez(gr_unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_low_pass(self):
gain = 1
Fs = 1
freq1 = 0.1
freq2 = 0.2
passband_ripple_db = 0.01
stopband_atten_db = 60
passband_dev = passband_ripple_to_dev(passband_ripple_db)
stopband_dev = stopband_atten_to_dev(stopband_atten_db)
desired_ampls = (gain, 0)
(n, fo, ao, w) = remezord([freq1, freq2], desired_ampls,
[passband_dev, stopband_dev], Fs)
new_taps = filter.pm_remez(n + 2, fo, ao, w, "bandpass")
known_taps = (-0.0008370135734511828, -0.0006622211673134374,
0.0008501079576365787, 0.003059609130249229,
0.003202235537205373, -0.001000899296974219,
-0.007589728680590891, -0.009790921118281865,
-0.001524210202628562, 0.014373535837200111,
0.02392881326993834, 0.011798133085019008,
-0.021954446348997188, -0.05293436740264934,
-0.04375787096766848, 0.028038890498420392,
0.14612655590172896, 0.25738578419108626,
0.302967004188747, 0.25738578419108626,
0.14612655590172896, 0.028038890498420392,
-0.04375787096766848, -0.05293436740264934,
-0.021954446348997188, 0.011798133085019008,
0.02392881326993834, 0.014373535837200111,
-0.001524210202628562, -0.009790921118281865,
-0.007589728680590891, -0.001000899296974219,
0.003202235537205373, 0.003059609130249229,
0.0008501079576365787, -0.0006622211673134374,
-0.0008370135734511828)
self.assertFloatTuplesAlmostEqual(known_taps, new_taps, 5)
if __name__ == '__main__':
gr_unittest.run(test_pm_remez, "test_pm_remez.xml")
| gpl-3.0 |
gladsonvm/haystackdemo | lib/python2.7/site-packages/django/core/management/commands/syncdb.py | 76 | 8080 | from optparse import make_option
import sys
import traceback
from django.conf import settings
from django.core.management.base import NoArgsCommand
from django.core.management.color import no_style
from django.core.management.sql import custom_sql_for_model, emit_post_sync_signal
from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from django.utils.datastructures import SortedDict
from django.utils.importlib import import_module
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
)
help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created."
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity'))
interactive = options.get('interactive')
show_traceback = options.get('traceback')
# Stealth option -- 'load_initial_data' is used by the testing setup
# process to disable initial fixture loading.
load_initial_data = options.get('load_initial_data', True)
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError, exc:
# This is slightly hackish. We want to ignore ImportErrors
# if the "management" module itself is missing -- but we don't
# want to ignore the exception if the management module exists
# but raises an ImportError for some reason. The only way we
# can do this is to check the text of the exception. Note that
# we're a bit broad in how we check the text, because different
# Python implementations may not use the same text.
# CPython uses the text "No module named management"
# PyPy uses "No module named myproject.myapp.management"
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
db = options.get('database')
connection = connections[db]
cursor = connection.cursor()
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names()
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app.__name__.split('.')[-2],
[m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)])
for app in models.get_apps()
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = SortedDict(
(app_name, filter(model_installed, model_list))
for app_name, model_list in all_models
)
# Create the tables for each model
if verbosity >= 1:
print "Creating tables ..."
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
if verbosity >= 3:
print "Processing %s.%s model" % (app_name, model._meta.object_name)
sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
seen_models.add(model)
created_models.add(model)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references))
sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
if verbosity >= 1 and sql:
print "Creating table %s" % model._meta.db_table
for statement in sql:
cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
transaction.commit_unless_managed(using=db)
# Send the post_syncdb signal, so individual apps can do whatever they need
# to do at this point.
emit_post_sync_signal(created_models, verbosity, interactive, db)
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
# Install custom SQL for the app (but only if this
# is a model we've just created)
if verbosity >= 1:
print "Installing custom SQL ..."
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, self.style, connection)
if custom_sql:
if verbosity >= 2:
print "Installing custom SQL for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in custom_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
if show_traceback:
traceback.print_exc()
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
else:
if verbosity >= 3:
print "No custom SQL for %s.%s model" % (app_name, model._meta.object_name)
if verbosity >= 1:
print "Installing indexes ..."
# Install SQL indices for all newly created models
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(model, self.style)
if index_sql:
if verbosity >= 2:
print "Installing index for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in index_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install index for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
# Load initial_data fixtures (unless that has been disabled)
if load_initial_data:
from django.core.management import call_command
call_command('loaddata', 'initial_data', verbosity=verbosity, database=db)
| mit |
mkaluza/external_chromium_org | ppapi/native_client/tools/browser_tester/browsertester/server.py | 126 | 10676 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
import cgi
import mimetypes
import os
import os.path
import posixpath
import SimpleHTTPServer
import SocketServer
import threading
import time
import urllib
import urlparse
class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def NormalizePath(self, path):
path = path.split('?', 1)[0]
path = path.split('#', 1)[0]
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
bad = set((os.curdir, os.pardir, ''))
words = [word for word in words if word not in bad]
# The path of the request should always use POSIX-style path separators, so
# that the filename input of --map_file can be a POSIX-style path and still
# match correctly in translate_path().
return '/'.join(words)
def translate_path(self, path):
path = self.NormalizePath(path)
if path in self.server.file_mapping:
return self.server.file_mapping[path]
for extra_dir in self.server.serving_dirs:
# TODO(halyavin): set allowed paths in another parameter?
full_path = os.path.join(extra_dir, os.path.basename(path))
if os.path.isfile(full_path):
return full_path
# Try the complete relative path, not just a basename. This allows the
# user to serve everything recursively under extra_dir, not just one
# level deep.
#
# One use case for this is the Native Client SDK examples. The examples
# expect to be able to access files as relative paths from the root of
# the example directory.
# Sometimes two subdirectories contain files with the same name, so
# including all subdirectories in self.server.serving_dirs will not do
# the correct thing; (i.e. the wrong file will be chosen, even though the
# correct path was given).
full_path = os.path.join(extra_dir, path)
if os.path.isfile(full_path):
return full_path
if not path.endswith('favicon.ico') and not self.server.allow_404:
self.server.listener.ServerError('Cannot find file \'%s\'' % path)
return path
def guess_type(self, path):
# We store the extension -> MIME type mapping in the server instead of the
# request handler so we that can add additional mapping entries via the
# command line.
base, ext = posixpath.splitext(path)
if ext in self.server.extensions_mapping:
return self.server.extensions_mapping[ext]
ext = ext.lower()
if ext in self.server.extensions_mapping:
return self.server.extensions_mapping[ext]
else:
return self.server.extensions_mapping['']
def SendRPCResponse(self, response):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
def HandleRPC(self, name, query):
kargs = {}
for k, v in query.iteritems():
assert len(v) == 1, k
kargs[k] = v[0]
l = self.server.listener
try:
response = getattr(l, name)(**kargs)
except Exception, e:
self.SendRPCResponse('%r' % (e,))
raise
else:
self.SendRPCResponse(response)
# For Last-Modified-based caching, the timestamp needs to be old enough
# for the browser cache to be used (at least 60 seconds).
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html
# Often we clobber and regenerate files for testing, so this is needed
# to actually use the browser cache.
def send_header(self, keyword, value):
if keyword == 'Last-Modified':
last_mod_format = '%a, %d %b %Y %H:%M:%S GMT'
old_value_as_t = time.strptime(value, last_mod_format)
old_value_in_secs = time.mktime(old_value_as_t)
new_value_in_secs = old_value_in_secs - 360
value = time.strftime(last_mod_format,
time.localtime(new_value_in_secs))
SimpleHTTPServer.SimpleHTTPRequestHandler.send_header(self,
keyword,
value)
def do_POST(self):
# Backwards compatible - treat result as tuple without named fields.
_, _, path, _, query, _ = urlparse.urlparse(self.path)
self.server.listener.Log('POST %s (%s)' % (self.path, path))
if path == '/echo':
self.send_response(200)
self.end_headers()
data = self.rfile.read(int(self.headers.getheader('content-length')))
self.wfile.write(data)
elif self.server.output_dir is not None:
# Try to write the file to disk.
path = self.NormalizePath(path)
output_path = os.path.join(self.server.output_dir, path)
try:
outfile = open(output_path, 'w')
except IOError:
error_message = 'File not found: %r' % output_path
self.server.listener.ServerError(error_message)
self.send_error(404, error_message)
return
try:
data = self.rfile.read(int(self.headers.getheader('content-length')))
outfile.write(data)
except IOError, e:
outfile.close()
try:
os.remove(output_path)
except OSError:
# Oh, well.
pass
error_message = 'Can\'t write file: %r\n' % output_path
error_message += 'Exception:\n%s' % str(e)
self.server.listener.ServerError(error_message)
self.send_error(500, error_message)
return
outfile.close()
# Send a success response.
self.send_response(200)
self.end_headers()
else:
error_message = 'File not found: %r' % path
self.server.listener.ServerError(error_message)
self.send_error(404, error_message)
self.server.ResetTimeout()
def do_GET(self):
# Backwards compatible - treat result as tuple without named fields.
_, _, path, _, query, _ = urlparse.urlparse(self.path)
tester = '/TESTER/'
if path.startswith(tester):
# If the path starts with '/TESTER/', the GET is an RPC call.
name = path[len(tester):]
# Supporting Python 2.5 prevents us from using urlparse.parse_qs
query = cgi.parse_qs(query, True)
self.server.rpc_lock.acquire()
try:
self.HandleRPC(name, query)
finally:
self.server.rpc_lock.release()
# Don't reset the timeout. This is not "part of the test", rather it's
# used to tell us if the renderer process is still alive.
if name == 'JavaScriptIsAlive':
self.server.JavaScriptIsAlive()
return
elif path in self.server.redirect_mapping:
dest = self.server.redirect_mapping[path]
self.send_response(301, 'Moved')
self.send_header('Location', dest)
self.end_headers()
self.wfile.write(self.error_message_format %
{'code': 301,
'message': 'Moved',
'explain': 'Object moved permanently'})
self.server.listener.Log('REDIRECT %s (%s -> %s)' %
(self.path, path, dest))
else:
self.server.listener.Log('GET %s (%s)' % (self.path, path))
# A normal GET request for transferring files, etc.
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
self.server.ResetTimeout()
def copyfile(self, source, outputfile):
# Bandwidth values <= 0.0 are considered infinite
if self.server.bandwidth <= 0.0:
return SimpleHTTPServer.SimpleHTTPRequestHandler.copyfile(
self, source, outputfile)
self.server.listener.Log('Simulating %f mbps server BW' %
self.server.bandwidth)
chunk_size = 1500 # What size to use?
bits_per_sec = self.server.bandwidth * 1000000
start_time = time.time()
data_sent = 0
while True:
chunk = source.read(chunk_size)
if len(chunk) == 0:
break
cur_elapsed = time.time() - start_time
target_elapsed = (data_sent + len(chunk)) * 8 / bits_per_sec
if (cur_elapsed < target_elapsed):
time.sleep(target_elapsed - cur_elapsed)
outputfile.write(chunk)
data_sent += len(chunk)
self.server.listener.Log('Streamed %d bytes in %f s' %
(data_sent, time.time() - start_time))
# Disable the built-in logging
def log_message(self, format, *args):
pass
# The ThreadingMixIn allows the server to handle multiple requests
# concurently (or at least as concurently as Python allows). This is desirable
# because server sockets only allow a limited "backlog" of pending connections
# and in the worst case the browser could make multiple connections and exceed
# this backlog - causing the server to drop requests. Using ThreadingMixIn
# helps reduce the chance this will happen.
# There were apparently some problems using this Mixin with Python 2.5, but we
# are no longer using anything older than 2.6.
class Server(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
def Configure(
self, file_mapping, redirect_mapping, extensions_mapping, allow_404,
bandwidth, listener, serving_dirs=[], output_dir=None):
self.file_mapping = file_mapping
self.redirect_mapping = redirect_mapping
self.extensions_mapping.update(extensions_mapping)
self.allow_404 = allow_404
self.bandwidth = bandwidth
self.listener = listener
self.rpc_lock = threading.Lock()
self.serving_dirs = serving_dirs
self.output_dir = output_dir
def TestingBegun(self, timeout):
self.test_in_progress = True
# self.timeout does not affect Python 2.5.
self.timeout = timeout
self.ResetTimeout()
self.JavaScriptIsAlive()
# Have we seen any requests from the browser?
self.received_request = False
def ResetTimeout(self):
self.last_activity = time.time()
self.received_request = True
def JavaScriptIsAlive(self):
self.last_js_activity = time.time()
def TimeSinceJSHeartbeat(self):
return time.time() - self.last_js_activity
def TestingEnded(self):
self.test_in_progress = False
def TimedOut(self, total_time):
return (total_time >= 0.0 and
(time.time() - self.last_activity) >= total_time)
def Create(host, port):
server = Server((host, port), RequestHandler)
server.extensions_mapping = mimetypes.types_map.copy()
server.extensions_mapping.update({
'': 'application/octet-stream' # Default
})
return server
| bsd-3-clause |
2014cdag5/2014cdag5 | wsgi/static/Brython2.1.0-20140419-113919/Lib/keyword.py | 761 | 2049 | #! /usr/bin/env python3
"""Keywords (from "graminit.c")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
./python Lib/keyword.py
"""
__all__ = ["iskeyword", "kwlist"]
kwlist = [
#--start keywords--
'False',
'None',
'True',
'and',
'as',
'assert',
'break',
'class',
'continue',
'def',
'del',
'elif',
'else',
'except',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'nonlocal',
'not',
'or',
'pass',
'raise',
'return',
'try',
'while',
'with',
'yield',
#--end keywords--
]
iskeyword = frozenset(kwlist).__contains__
def main():
import sys, re
args = sys.argv[1:]
iptfile = args and args[0] or "Python/graminit.c"
if len(args) > 1: optfile = args[1]
else: optfile = "Lib/keyword.py"
# scan the source file for keywords
with open(iptfile) as fp:
strprog = re.compile('"([^"]+)"')
lines = []
for line in fp:
if '{1, "' in line:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "',\n")
lines.sort()
# load the output skeleton from the target
with open(optfile) as fp:
format = fp.readlines()
# insert the lines of keywords
try:
start = format.index("#--start keywords--\n") + 1
end = format.index("#--end keywords--\n")
format[start:end] = lines
except ValueError:
sys.stderr.write("target does not contain format markers\n")
sys.exit(1)
# write the output file
fp = open(optfile, 'w')
fp.write(''.join(format))
fp.close()
if __name__ == "__main__":
main()
| gpl-2.0 |
crazy-canux/django | tests/defer/tests.py | 338 | 11262 | from __future__ import unicode_literals
from django.db.models.query_utils import DeferredAttribute, InvalidQuery
from django.test import TestCase
from .models import (
BigChild, Child, ChildProxy, Primary, RefreshPrimaryProxy, Secondary,
)
class AssertionMixin(object):
def assert_delayed(self, obj, num):
"""
Instances with deferred fields look the same as normal instances when
we examine attribute values. Therefore, this method returns the number
of deferred fields on returned instances.
"""
count = 0
for field in obj._meta.fields:
if isinstance(obj.__class__.__dict__.get(field.attname), DeferredAttribute):
count += 1
self.assertEqual(count, num)
class DeferTests(AssertionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Secondary.objects.create(first="x1", second="y1")
cls.p1 = Primary.objects.create(name="p1", value="xx", related=cls.s1)
def test_defer(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
self.assert_delayed(qs.defer("related__first")[0], 0)
self.assert_delayed(qs.defer("name").defer("value")[0], 2)
def test_only(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name")[0], 2)
self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
self.assert_delayed(qs.only("name").only("value")[0], 2)
self.assert_delayed(qs.only("related__first")[0], 2)
# Using 'pk' with only() should result in 3 deferred fields, namely all
# of them except the model's primary key see #15494
self.assert_delayed(qs.only("pk")[0], 3)
# You can use 'pk' with reverse foreign key lookups.
self.assert_delayed(self.s1.primary_set.all().only('pk')[0], 3)
def test_defer_only_chaining(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name", "value").defer("name")[0], 2)
self.assert_delayed(qs.defer("name").only("value", "name")[0], 2)
self.assert_delayed(qs.defer("name").only("value")[0], 2)
self.assert_delayed(qs.only("name").defer("value")[0], 2)
def test_defer_on_an_already_deferred_field(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.defer("name").defer("name")[0], 1)
def test_defer_none_to_clear_deferred_set(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name", "value")[0], 2)
self.assert_delayed(qs.defer(None)[0], 0)
self.assert_delayed(qs.only("name").defer(None)[0], 0)
def test_only_none_raises_error(self):
msg = 'Cannot pass None as an argument to only().'
with self.assertRaisesMessage(TypeError, msg):
Primary.objects.only(None)
def test_defer_extra(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name").extra(select={"a": 1})[0], 1)
self.assert_delayed(qs.extra(select={"a": 1}).defer("name")[0], 1)
def test_defer_values_does_not_defer(self):
# User values() won't defer anything (you get the full list of
# dictionaries back), but it still works.
self.assertEqual(Primary.objects.defer("name").values()[0], {
"id": self.p1.id,
"name": "p1",
"value": "xx",
"related_id": self.s1.id,
})
def test_only_values_does_not_defer(self):
self.assertEqual(Primary.objects.only("name").values()[0], {
"id": self.p1.id,
"name": "p1",
"value": "xx",
"related_id": self.s1.id,
})
def test_get(self):
# Using defer() and only() with get() is also valid.
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
def test_defer_with_select_related(self):
obj = Primary.objects.select_related().defer("related__first", "related__second")[0]
self.assert_delayed(obj.related, 2)
self.assert_delayed(obj, 0)
def test_only_with_select_related(self):
obj = Primary.objects.select_related().only("related__first")[0]
self.assert_delayed(obj, 2)
self.assert_delayed(obj.related, 1)
self.assertEqual(obj.related_id, self.s1.pk)
self.assertEqual(obj.name, "p1")
def test_defer_select_related_raises_invalid_query(self):
# When we defer a field and also select_related it, the query is
# invalid and raises an exception.
with self.assertRaises(InvalidQuery):
Primary.objects.defer("related").select_related("related")[0]
def test_only_select_related_raises_invalid_query(self):
with self.assertRaises(InvalidQuery):
Primary.objects.only("name").select_related("related")[0]
def test_defer_foreign_keys_are_deferred_and_not_traversed(self):
# With a depth-based select_related, all deferred ForeignKeys are
# deferred instead of traversed.
with self.assertNumQueries(3):
obj = Primary.objects.defer("related").select_related()[0]
self.assert_delayed(obj, 1)
self.assertEqual(obj.related.id, self.s1.pk)
def test_saving_object_with_deferred_field(self):
# Saving models with deferred fields is possible (but inefficient,
# since every field has to be retrieved first).
Primary.objects.create(name="p2", value="xy", related=self.s1)
obj = Primary.objects.defer("value").get(name="p2")
obj.name = "a new name"
obj.save()
self.assertQuerysetEqual(
Primary.objects.all(), [
"p1", "a new name",
],
lambda p: p.name,
ordered=False,
)
def test_defer_baseclass_when_subclass_has_no_added_fields(self):
# Regression for #10572 - A subclass with no extra fields can defer
# fields from the base class
Child.objects.create(name="c1", value="foo", related=self.s1)
# You can defer a field on a baseclass when the subclass has no fields
obj = Child.objects.defer("value").get(name="c1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
def test_only_baseclass_when_subclass_has_no_added_fields(self):
# You can retrieve a single column on a base class with no fields
Child.objects.create(name="c1", value="foo", related=self.s1)
obj = Child.objects.only("name").get(name="c1")
# on an inherited model, its PK is also fetched, hence '3' deferred fields.
self.assert_delayed(obj, 3)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
class BigChildDeferTests(AssertionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Secondary.objects.create(first="x1", second="y1")
BigChild.objects.create(name="b1", value="foo", related=cls.s1, other="bar")
def test_defer_baseclass_when_subclass_has_added_field(self):
# You can defer a field on a baseclass
obj = BigChild.objects.defer("value").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_defer_subclass(self):
# You can defer a field on a subclass
obj = BigChild.objects.defer("other").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_only_baseclass_when_subclass_has_added_field(self):
# You can retrieve a single field on a baseclass
obj = BigChild.objects.only("name").get(name="b1")
# when inherited model, its PK is also fetched, hence '4' deferred fields.
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_only_sublcass(self):
# You can retrieve a single field on a subclass
obj = BigChild.objects.only("other").get(name="b1")
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
class TestDefer2(AssertionMixin, TestCase):
def test_defer_proxy(self):
"""
Ensure select_related together with only on a proxy model behaves
as expected. See #17876.
"""
related = Secondary.objects.create(first='x1', second='x2')
ChildProxy.objects.create(name='p1', value='xx', related=related)
children = ChildProxy.objects.all().select_related().only('id', 'name')
self.assertEqual(len(children), 1)
child = children[0]
self.assert_delayed(child, 2)
self.assertEqual(child.name, 'p1')
self.assertEqual(child.value, 'xx')
def test_defer_inheritance_pk_chaining(self):
"""
When an inherited model is fetched from the DB, its PK is also fetched.
When getting the PK of the parent model it is useful to use the already
fetched parent model PK if it happens to be available. Tests that this
is done.
"""
s1 = Secondary.objects.create(first="x1", second="y1")
bc = BigChild.objects.create(name="b1", value="foo", related=s1,
other="bar")
bc_deferred = BigChild.objects.only('name').get(pk=bc.pk)
with self.assertNumQueries(0):
bc_deferred.id
self.assertEqual(bc_deferred.pk, bc_deferred.id)
def test_eq(self):
s1 = Secondary.objects.create(first="x1", second="y1")
s1_defer = Secondary.objects.only('pk').get(pk=s1.pk)
self.assertEqual(s1, s1_defer)
self.assertEqual(s1_defer, s1)
def test_refresh_not_loading_deferred_fields(self):
s = Secondary.objects.create()
rf = Primary.objects.create(name='foo', value='bar', related=s)
rf2 = Primary.objects.only('related', 'value').get()
rf.name = 'new foo'
rf.value = 'new bar'
rf.save()
with self.assertNumQueries(1):
rf2.refresh_from_db()
self.assertEqual(rf2.value, 'new bar')
with self.assertNumQueries(1):
self.assertEqual(rf2.name, 'new foo')
def test_custom_refresh_on_deferred_loading(self):
s = Secondary.objects.create()
rf = RefreshPrimaryProxy.objects.create(name='foo', value='bar', related=s)
rf2 = RefreshPrimaryProxy.objects.only('related').get()
rf.name = 'new foo'
rf.value = 'new bar'
rf.save()
with self.assertNumQueries(1):
# Customized refresh_from_db() reloads all deferred fields on
# access of any of them.
self.assertEqual(rf2.name, 'new foo')
self.assertEqual(rf2.value, 'new bar')
| bsd-3-clause |
gregswift/ansible-modules-extras | network/a10/a10_virtual_server.py | 11 | 11543 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage A10 Networks slb virtual server objects
(c) 2014, Mischa Peters <mpeters@a10networks.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: a10_virtual_server
version_added: 1.8
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices
description:
- Manage slb virtual server objects on A10 Networks devices via aXAPI
author: "Mischa Peters (@mischapeters)"
notes:
- Requires A10 Networks aXAPI 2.1
requirements: []
options:
host:
description:
- hostname or ip of your A10 Networks device
required: true
default: null
aliases: []
choices: []
username:
description:
- admin account of your A10 Networks device
required: true
default: null
aliases: ['user', 'admin']
choices: []
password:
description:
- admin password of your A10 Networks device
required: true
default: null
aliases: ['pass', 'pwd']
choices: []
virtual_server:
description:
- slb virtual server name
required: true
default: null
aliases: ['vip', 'virtual']
choices: []
virtual_server_ip:
description:
- slb virtual server ip address
required: false
default: null
aliases: ['ip', 'address']
choices: []
virtual_server_status:
description:
- slb virtual server status
required: false
default: enable
aliases: ['status']
choices: ['enabled', 'disabled']
virtual_server_ports:
description:
- A list of ports to create for the virtual server. Each list item should be a
dictionary which specifies the C(port:) and C(type:), but can also optionally
specify the C(service_group:) as well as the C(status:). See the examples
below for details. This parameter is required when C(state) is C(present).
required: false
write_config:
description:
- If C(yes), any changes will cause a write of the running configuration
to non-volatile memory. This will save I(all) configuration changes,
including those that may have been made manually or through other modules,
so care should be taken when specifying C(yes).
required: false
default: "no"
choices: ["yes", "no"]
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Create a new virtual server
- a10_virtual_server:
host: a10.mydomain.com
username: myadmin
password: mypassword
virtual_server: vserver1
virtual_server_ip: 1.1.1.1
virtual_server_ports:
- port: 80
protocol: TCP
service_group: sg-80-tcp
- port: 443
protocol: HTTPS
service_group: sg-443-https
- port: 8080
protocol: http
status: disabled
'''
VALID_PORT_FIELDS = ['port', 'protocol', 'service_group', 'status']
def validate_ports(module, ports):
for item in ports:
for key in item:
if key not in VALID_PORT_FIELDS:
module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS)))
# validate the port number is present and an integer
if 'port' in item:
try:
item['port'] = int(item['port'])
except:
module.fail_json(msg="port definitions must be integers")
else:
module.fail_json(msg="port definitions must define the port field")
# validate the port protocol is present, and convert it to
# the internal API integer value (and validate it)
if 'protocol' in item:
protocol = axapi_get_vport_protocol(item['protocol'])
if not protocol:
module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_VPORT_PROTOCOLS))
else:
item['protocol'] = protocol
else:
module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_VPORT_PROTOCOLS))
# convert the status to the internal API integer value
if 'status' in item:
item['status'] = axapi_enabled_disabled(item['status'])
else:
item['status'] = 1
# ensure the service_group field is at least present
if 'service_group' not in item:
item['service_group'] = ''
def main():
argument_spec = a10_argument_spec()
argument_spec.update(url_argument_spec())
argument_spec.update(
dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
virtual_server=dict(type='str', aliases=['vip', 'virtual'], required=True),
virtual_server_ip=dict(type='str', aliases=['ip', 'address'], required=True),
virtual_server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']),
virtual_server_ports=dict(type='list', required=True),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False
)
host = module.params['host']
username = module.params['username']
password = module.params['password']
state = module.params['state']
write_config = module.params['write_config']
slb_virtual = module.params['virtual_server']
slb_virtual_ip = module.params['virtual_server_ip']
slb_virtual_status = module.params['virtual_server_status']
slb_virtual_ports = module.params['virtual_server_ports']
if slb_virtual is None:
module.fail_json(msg='virtual_server is required')
validate_ports(module, slb_virtual_ports)
axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host
session_url = axapi_authenticate(module, axapi_base_url, username, password)
slb_virtual_data = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
slb_virtual_exists = not axapi_failure(slb_virtual_data)
changed = False
if state == 'present':
json_post = {
'virtual_server': {
'name': slb_virtual,
'address': slb_virtual_ip,
'status': axapi_enabled_disabled(slb_virtual_status),
'vport_list': slb_virtual_ports,
}
}
# before creating/updating we need to validate that any
# service groups defined in the ports list exist since
# since the API will still create port definitions for
# them while indicating a failure occurred
checked_service_groups = []
for port in slb_virtual_ports:
if 'service_group' in port and port['service_group'] not in checked_service_groups:
# skip blank service group entries
if port['service_group'] == '':
continue
result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': port['service_group']}))
if axapi_failure(result):
module.fail_json(msg="the service group %s specified in the ports list does not exist" % port['service_group'])
checked_service_groups.append(port['service_group'])
if not slb_virtual_exists:
result = axapi_call(module, session_url + '&method=slb.virtual_server.create', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
changed = True
else:
def needs_update(src_ports, dst_ports):
'''
Checks to determine if the port definitions of the src_ports
array are in or different from those in dst_ports. If there is
a difference, this function returns true, otherwise false.
'''
for src_port in src_ports:
found = False
different = False
for dst_port in dst_ports:
if src_port['port'] == dst_port['port']:
found = True
for valid_field in VALID_PORT_FIELDS:
if src_port[valid_field] != dst_port[valid_field]:
different = True
break
if found or different:
break
if not found or different:
return True
# every port from the src exists in the dst, and none of them were different
return False
defined_ports = slb_virtual_data.get('virtual_server', {}).get('vport_list', [])
# we check for a needed update both ways, in case ports
# are missing from either the ones specified by the user
# or from those on the device
if needs_update(defined_ports, slb_virtual_ports) or needs_update(slb_virtual_ports, defined_ports):
result = axapi_call(module, session_url + '&method=slb.virtual_server.update', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
changed = True
# if we changed things, get the full info regarding
# the service group for the return data below
if changed:
result = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
else:
result = slb_virtual_data
elif state == 'absent':
if slb_virtual_exists:
result = axapi_call(module, session_url + '&method=slb.virtual_server.delete', json.dumps({'name': slb_virtual}))
changed = True
else:
result = dict(msg="the virtual server was not present")
# if the config has changed, save the config unless otherwise requested
if changed and write_config:
write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
# log out of the session nicely and exit
axapi_call(module, session_url + '&method=session.close')
module.exit_json(changed=changed, content=result)
# standard ansible module imports
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.a10 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
shaon/eutester | testcases/cloud_user/s3/object_tests.py | 5 | 49627 | #!/usr/bin/env python
#
############################################
# #
# objectstorage/S3 Object Test Cases #
# #
############################################
#Author: Zach Hill <zach@eucalyptus.com>
#Author: Vic Iglesias <vic@eucalyptus.com>
import base64
import time
import random
import os
import tempfile
from datetime import timedelta
from datetime import datetime
import hashlib
import json
import hmac
from io import BytesIO
from boto.s3.key import Key
from boto.s3.prefix import Prefix
from boto.exception import S3ResponseError
import boto.s3, boto.s3.connection
import dateutil.parser
from eucaops import Eucaops
from eutester.eutestcase import EutesterTestCase
from eucaops import S3ops
import requests
class ObjectTestSuite(EutesterTestCase):
data_size = 1000
def __init__(self, tester=None, **kwargs):
self.setuptestcase()
self.setup_parser()
self.parser.add_argument("--endpoint", default=None)
self.tester = tester
self.get_args()
# Allow __init__ to get args from __init__'s kwargs or through command line parser...
for kw in kwargs:
print 'Setting kwarg:'+str(kw)+" to "+str(kwargs[kw])
self.set_arg(kw ,kwargs[kw])
self.show_args()
# Setup basic eutester object
if not self.tester:
if self.args.endpoint:
self.tester = S3ops(credpath=self.args.credpath, endpoint=self.args.endpoint)
else:
self.tester = Eucaops(credpath=self.args.credpath,
config_file=self.args.config,
password=self.args.password)
self.bucket_prefix = "eutester-" + str(int(time.time())) + "-"
self.buckets_used = set()
random.seed(time.time())
self.test_bucket_name = self.bucket_prefix + str(random.randint(0,100))
self.test_bucket = self.tester.create_bucket(self.test_bucket_name)
self.buckets_used.add(self.test_bucket_name)
#Create some test data for the objects
def ensure_bucket_exists():
try:
self.tester.s3.get_bucket(self.test_bucket_name)
return True
except Exception:
return False
self.tester.wait_for_result(ensure_bucket_exists, True)
self.test_object_data = ""
for i in range(0, self.data_size):
self.test_object_data += chr(random.randint(32,126))
print "Generated data for objects: " + self.test_object_data
def print_key_info(self, keys=None):
for key in keys:
self.tester.info("Key=" + str(key.key) + " -- version= " + str(key.version_id) + " -- eTag= " + str(key.etag)
+ " -- ACL= " + str(key.get_xml_acl()))
def put_object(self, bucket=None, object_key=None, object_data=None):
"""Puts an object with the specified name and data in the specified bucket"""
if bucket == None:
raise Exception("Cannot put object without proper bucket reference")
try :
key = Key(bucket=bucket,name=object_key)
key.set_contents_from_string(object_data)
return key.etag
except Exception as e:
self.tester.debug("Exception occured during 'PUT' of object " + object_key + " into bucket " + bucket.name + ": " + e.message)
raise e
def post_object(self, bucket_name=None, object_key=None, object_data=None, policy=None, acl=None):
"""Uploads an object using POST + form upload"""
fields = {
'key' : object_key,
'acl' : acl,
'AWSAccessKeyId': self.tester.get_access_key(),
'Policy' : policy,
'Signature': self.sign_policy(sak=self.tester.get_secret_key(), b64_policy_json=policy)
}
self.tester.info('Fields: ' + str(fields))
url = 'http://' + self.tester.s3.host + ':' + str(self.tester.s3.port) \
+ '/' + self.tester.s3.path + '/' + bucket_name
self.tester.debug('Sending POST request to: ' + url)
response = requests.post(url, data=fields, files={'file': BytesIO(object_data)})
return response
#return None
def post_object_sts(self, bucket_name=None, object_key=None, object_data=None, policy=None, acl=None, credentials=None):
"""Uploads an object using POST + form upload using an STS token"""
self.assertNotEqual(credentials, None, msg='Credentials missing')
fields = {
'key': object_key,
'acl': acl,
'AWSAccessKeyId': credentials.access_key,
'Policy': policy,
'x-amz-security-token': credentials.session_token,
'Signature': self.sign_policy(sak=str(credentials.secret_key), b64_policy_json=policy)
}
self.tester.info('Fields: ' + str(fields))
url = 'http://' + self.tester.s3.host + ':' + str(self.tester.s3.port) \
+ '/' + self.tester.s3.path + '/' + bucket_name
self.tester.debug('Sending POST request to: ' + url)
response = requests.post(url, data=fields, files={'file': BytesIO(object_data)})
return response
#return None
def generate_default_policy_b64(self, bucket, key, acl, token=None):
delta = timedelta(hours=1)
expire_time = (datetime.utcnow() + delta).replace(microsecond=0)
policy = {'conditions': [{'acl': acl},
{'bucket': bucket},
{'key': key},
],
'expiration': time.strftime('%Y-%m-%dT%H:%M:%SZ',
expire_time.timetuple())}
if token is not None:
policy['conditions'][0]['x-amz-security-token'] = token
policy_json = json.dumps(policy)
self.tester.info('generated default policy: %s', policy_json)
return base64.b64encode(policy_json)
def sign_policy(self, sak=None, b64_policy_json=None):
my_hmac = hmac.new(sak, digestmod=hashlib.sha1)
my_hmac.update(b64_policy_json)
return base64.b64encode(my_hmac.digest())
def enable_versioning(self, bucket):
"""Enable versioning on the bucket, checking that it is not already enabled and that the operation succeeds."""
vstatus = bucket.get_versioning_status()
if vstatus != None and len(vstatus.keys()) > 0 and vstatus['Versioning'] != None and vstatus['Versioning'] != 'Disabled':
self.tester.info("Versioning status should be null/Disabled, found: " + vstatus['Versioning'])
return False
else:
self.tester.info("Bucket versioning is Disabled")
#Enable versioning
bucket.configure_versioning(True)
if bucket.get_versioning_status()['Versioning'] == 'Enabled':
self.tester.info("Versioning status correctly set to enabled")
return True
else:
self.tester.info("Versioning status not enabled, should be.")
return False
return False
def suspend_versioning(self, bucket):
"""Suspend versioning on the bucket, checking that it is previously enabled and that the operation succeeds."""
if bucket.get_versioning_status()['Versioning'] == 'Enabled':
self.tester.info("Versioning status correctly set to enabled")
else:
self.tester.info("Versioning status not enabled, should be. Can't suspend if not enabled....")
return False
#Enable versioning
bucket.configure_versioning(False)
if bucket.get_versioning_status()['Versioning'] == 'Suspended':
self.tester.info("Versioning status correctly set to suspended")
return True
else:
self.tester.info("Versioning status not suspended.")
return False
return False
def check_version_listing(self, version_list, total_expected_length):
"""Checks a version listing for both completeness and ordering as well as pagination if required"""
self.tester.info("Checking bucket version listing. Listing is " + str(len(version_list)) + " entries long")
if total_expected_length >= 1000:
assert(len(version_list) == 999)
else:
assert(len(version_list) == total_expected_length)
prev_obj = None
should_fail = None
for obj in version_list:
if isinstance(obj,Key):
self.tester.info("Key: " + obj.name + " -- " + obj.version_id + "--" + obj.last_modified)
if prev_obj != None:
if self.compare_versions(prev_obj, obj) > 0:
should_fail = obj
prev_obj = obj
else:
self.tester.info("Not a key, skipping: " + str(obj))
return should_fail
def compare_versions(self, key1, key2):
"""
Returns -1 if key1 < key2, 0 if equal, and 1 if key1 > key2.
Compares names lexicographically, if equal, compares date_modified if versions are different.
If version_id and name are equal then key1 = key2
If an error occurs or something is wrong, returns None
"""
if key1.name < key2.name:
#self.debug("Key1: " + key1.name + " is less than " + key2.name)
return 1
elif key1.name > key2.name:
#self.debug("Key1: " + key1.name + " is greater than " + key2.name)
return -1
else:
if key1.version_id == key2.version_id:
#self.debug("Key1: " + key1.name + " is the same version as " + key2.name)
return 0
else:
if dateutil.parser.parse(key1.last_modified) > dateutil.parser.parse(key2.last_modified):
#self.debug("Key1: " + key1.last_modified + " last modified is greater than " + key2.last_modified)
return 1
elif dateutil.parser.parse(key1.last_modified) < dateutil.parser.parse(key2.last_modified):
#self.debug("Key1: " + key1.last_modified + " last modified is less than " + key2.last_modified)
return -1
return None
def test_object_basic_ops(self):
"""
Tests basic operations on objects: simple GET,PUT,HEAD,DELETE.
"""
self.tester.info("Basic Object Operations Test (GET/PUT/HEAD)")
if self.test_bucket == None:
self.fail("Error: test_bucket not set, cannot run test")
#Test PUT & GET
testkey="testkey1-" + str(int(time.time()))
self.put_object(bucket=self.test_bucket, object_key=testkey, object_data=self.test_object_data)
ret_key = self.test_bucket.get_key(testkey)
ret_content = ret_key.get_contents_as_string()
if ret_content == self.test_object_data:
self.tester.info("Set content = get content, put passed")
else:
if ret_content != None:
self.tester.info("Got content: " + ret_content)
else:
self.tester.info("No content returned")
self.tester.info("Expected content: " + self.test_object_data)
self.fail("Put content not the same as what was returned")
#Test HEAD
key_meta = self.test_bucket.get_key(testkey)
if key_meta.key != ret_key.key or key_meta.etag != ret_key.etag or key_meta.size != ret_key.size:
self.tester.info("Something is wrong, the HEAD operation returned different metadata than the GET operation")
self.tester.info("Expected key " + ret_key.key + " etag: " + ret_key.etag + " Got: " + key_meta.key + " etag: " + key_meta.etag)
else:
self.tester.info("HEAD meta = GET meta, all is good")
#Test copy operation (GET w/source headers)
self.tester.info("Testing COPY object")
new_key = "testkey2"
self.test_bucket.copy_key(new_key_name=new_key, src_bucket_name=self.test_bucket_name, src_key_name=testkey)
keylist = self.test_bucket.list()
counter = 0
for k in keylist:
if isinstance(k, Prefix):
self.tester.info("Prefix: " + "NULL" if k == None else k.name)
else:
self.tester.info("Key: " + k.name + " Etag: " + k.etag)
counter += 1
if counter != 2:
self.fail("Expected 2 keys after copy operation, found only: " + len(keylist))
try:
ret_key = self.test_bucket.get_key(new_key)
except:
self.fail("Could not get object copy")
if ret_key == None:
self.fail("Could not get object copy")
if self.test_bucket.get_key(testkey).get_contents_as_string() != ret_key.get_contents_as_string():
self.fail("Contents of original key and copy don't match")
else:
self.tester.info("Copy key contents match original!")
#Test DELETE
self.test_bucket.delete_key(testkey)
ret_key = None
ret_key = self.test_bucket.get_key(testkey)
if ret_key:
self.tester.info("Erroneously got: " + ret_key.name)
raise S3ResponseError(404, "Should have thrown exception for getting a non-existent object")
self.tester.info("Finishing basic ops test")
def test_object_byte_offset_read(self):
"""Tests fetching specific byte offsets of the object"""
self.tester.info("Byte-range Offset GET Test")
self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
testkey = "rangetestkey-" + str(int(time.time()))
source_bytes = bytearray(self.test_object_data)
#Put the object initially
self.put_object(bucket=self.test_bucket, object_key=testkey, object_data=self.test_object_data)
#Test range for first 100 bytes of object
print "Trying start-range object get"
try:
data_str = Key(bucket=self.test_bucket,name=testkey).get_contents_as_string(headers={"Range":"bytes=0-99"})
except:
self.fail("Failed range object get first 100 bytes")
startrangedata = bytearray(data_str)
print "Got: " + startrangedata
print "Expected: " + str(source_bytes[:100])
start = 0
for i in range(0,100):
if startrangedata[i-start] != source_bytes[i]:
print "Byte: " + startrangedata[i] + " differs!"
self.fail("Start-range Ranged-get failed")
print "Trying mid-object range"
try:
data_str = Key(bucket=self.test_bucket,name=testkey).get_contents_as_string(headers={"Range":"bytes=500-599"})
except:
self.fail("Failed range object get for middle 100 bytes")
midrangedata = bytearray(data_str)
start = 500
for i in range(start,start+100):
if midrangedata[i-start] != source_bytes[i]:
print "Byte: " + midrangedata[i] + "differs!"
self.fail("Mid-range Ranged-get failed")
print "Trying end-range object get"
#Test range for last 100 bytes of object
try:
data_str = Key(bucket=self.test_bucket,name=testkey).get_contents_as_string(headers={"Range":"bytes=800-899"})
except:
self.fail("Failed range object get for last 100 bytes")
endrangedata = bytearray(data_str)
print "Got: " + str(endrangedata)
start = 800
try:
for i in range(start,start+100):
if endrangedata[i-start] != source_bytes[i]:
print "Byte: " + endrangedata[i] + "differs!"
self.fail("End-range Ranged-get failed")
except Exception as e:
print "Exception! Received: " + e
print "Range test complete"
def test_object_post(self):
"""Test the POST method for putting objects, requires a pre-signed upload policy and url"""
self.tester.info("Testing POST form upload on bucket" + self.test_bucket_name)
self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
itr = 1
self.tester.info('Doing ' + str(itr) + ' POST upload iterations')
acl = 'ec2-bundle-read'
for k in xrange(0, itr):
key = 'postkey1' + str(k)
data = os.urandom(512)
computed_md5 = '"' + hashlib.md5(data).hexdigest() + '"'
self.tester.info('Data md5: ' + computed_md5 + ' data length: ' + str(len(computed_md5)))
self.tester.info('Uploading object ' + self.test_bucket_name + '/' + key + ' via POST with acl : ' + acl)
response = self.post_object(bucket_name=self.test_bucket_name,
object_key=key,
object_data=data,
acl=acl,
policy=self.generate_default_policy_b64(self.test_bucket_name, key, acl=acl))
self.tester.info('Got response for POST: ' + str(response.status_code) + ': ' + str(response.text))
assert(response.status_code == 204)
fetched_key = self.test_bucket.get_key(key)
fetched_content = fetched_key.get_contents_as_string()
self.tester.info('Got fetched md5: ' + fetched_key.etag)
self.tester.info('Calculated md5: ' + computed_md5 + ' recieved md5 ' + fetched_key.etag)
assert(fetched_key.etag == computed_md5)
assert(fetched_content == data)
self.tester.info("Done with upload test")
def test_object_post_sts(self):
"""Test the POST method for putting objects using STS tokens, requires a pre-signed upload policy and url"""
self.tester.info("Testing POST form upload on bucket with STS token" + self.test_bucket_name)
self.tester.info("Getting STS credential for test")
credentials = self.tester.issue_session_token()
self.assertNotEqual(credentials, None,msg='Could not get credentials')
self.assertNotEqual(credentials.access_key, None, msg='Credentials missing access_key')
self.assertNotEqual(credentials.secret_key, None, msg='Credentials missing secret_key')
self.assertNotEqual(credentials.session_token, None, msg='Credentials missing session_token')
self.assertNotEqual(credentials.expiration, None, msg='Credentials missing expiration')
self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
itr = 1
self.tester.info('Doing ' + str(itr) + ' POST upload iterations')
acl = 'ec2-bundle-read'
for k in xrange(0, itr):
key = 'postkey1' + str(k)
data = os.urandom(512)
computed_md5 = '"' + hashlib.md5(data).hexdigest() + '"'
self.tester.info('Data md5: ' + computed_md5 + ' data length: ' + str(len(computed_md5)))
self.tester.info('Uploading object ' + self.test_bucket_name + '/' + key + ' via POST with acl : ' + acl)
response = self.post_object_sts(bucket_name=self.test_bucket_name,
object_key=key,
object_data=data,
acl=acl,
policy=self.generate_default_policy_b64(self.test_bucket_name, key, acl=acl,token=credentials.session_token), credentials=credentials)
self.tester.info('Got response for POST: ' + str(response.status_code) + ': ' + str(response.text))
assert(response.status_code == 204)
fetched_key = self.test_bucket.get_key(key)
fetched_content = fetched_key.get_contents_as_string()
self.tester.info('Got fetched md5: ' + fetched_key.etag)
self.tester.info('Calculated md5: ' + computed_md5 + ' recieved md5 ' + fetched_key.etag)
assert(fetched_key.etag == computed_md5)
assert(fetched_content == data)
self.tester.info("Done with POST w/sts upload test")
def test_object_post_large(self):
"""Test the POST method for putting objects, requires a pre-signed upload policy and url"""
self.tester.info("Testing POST form upload on bucket" + self.test_bucket_name)
self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
self.tester.info("Testing POST form upload of 10MB data on bucket" + self.test_bucket_name)
itr = 1
large_obj_size_bytes = 10 * 1024 * 1024 #10MB content
self.tester.info('Doing ' + str(itr) + ' iterations of large object of size ' + str(large_obj_size_bytes) + ' with POST')
acl = 'ec2-bundle-read'
for i in xrange(0, itr):
key = 'postkey_10mb_' + str(i)
self.tester.info('Generating ' + str(large_obj_size_bytes) + ' bytes for upload')
#Create some test data
data = str(os.urandom(large_obj_size_bytes))
self.tester.info("Data length: " + str(len(data)))
computed_md5 = '"' + hashlib.md5(data).hexdigest() + '"'
self.tester.info('Data md5 is: ' + computed_md5)
self.tester.info('Uploading object via POST using acl: ' + acl)
response = self.post_object(bucket_name=self.test_bucket.name,
object_key=key,
object_data=data,
policy=self.generate_default_policy_b64(self.test_bucket.name, key, acl=acl),
acl=acl)
self.tester.info('Got response for POST: ' + str(response.status_code) + ': ' + str(response.text))
assert(response.status_code == 204)
self.tester.info('Fetching the content for verification')
fetched_key = self.test_bucket.get_key(key_name=key)
self.tester.info('Got fetched content length : ' + str(fetched_key.size) + ' Expected ' + str(len(data)))
assert(fetched_key.size == len(data))
self.tester.info('Got fetched md5: ' + fetched_key.etag)
self.tester.info('Calculated md5: ' + computed_md5 + ' recieved md5 ' + fetched_key.etag)
assert(fetched_key.etag == computed_md5)
fetched_content = fetched_key.get_contents_as_string()
assert(fetched_content == data)
def test_object_large_objects(self):
"""Test operations on large objects (>1MB), but not so large that we must use the multi-part upload interface"""
self.tester.info("Testing large-ish objects over 1MB in size on bucket" + self.test_bucket_name)
self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
test_data = None
large_obj_size_bytes = 5 * 1024 * 1024 #5MB
self.tester.info("Generating " + str(large_obj_size_bytes) + " bytes of data")
#Create some test data
#for i in range(0, large_obj_size_bytes):
# test_data += chr(random.randint(32,126))
test_data = str(os.urandom(large_obj_size_bytes))
self.tester.info("Uploading object content of size: " + str(large_obj_size_bytes) + " bytes")
keyname = "largeobj-" + str(int(time.time()))
self.put_object(bucket=self.test_bucket, object_key=keyname, object_data=test_data)
self.tester.info("Done uploading object")
ret_key = self.test_bucket.get_key(keyname)
ret_data = ret_key.get_contents_as_string()
if ret_data != test_data:
self.fail("Fetched data and generated data don't match")
else:
self.tester.info("Data matches!")
self.tester.info("Removing large object")
self.test_bucket.delete_key(ret_key)
self.tester.info("Complete large object test")
pass
def test_object_multipart(self):
"""Tests basic multipart upload functionality"""
self.tester.info("Testing Multipart")
self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
self.test_multipart_upload()
self.test_abort_multipart_upload()
def test_multipart_upload(self):
'''Basic multipart upload'''
self.tester.info("Testing multipart upload")
self.tester.info("Creating random file representing part...")
temp_file = tempfile.NamedTemporaryFile(mode="w+b", prefix="multipart")
temp_file.write(os.urandom(5 * 1024 * 1024))
keyname="multi-" + str(int(time.time()))
self.tester.info("Initiating multipart upload...much upload")
reply = self.initiate_multipart_upload(keyname)
self.tester.info("Uploading parts...Such Parts")
for partnum in range(1, 11):
temp_file.seek(0, os.SEEK_SET)
reply.upload_part_from_file(temp_file, partnum)
self.tester.info("Listing parts...")
self.test_bucket.get_all_multipart_uploads()
self.tester.info("Completing upload...So OSG")
reply.complete_upload()
temp_file.close()
self.tester.info("HEAD request...")
returned_key = self.test_bucket.get_key(keyname)
download_temp_file = tempfile.NamedTemporaryFile(mode="w+b", prefix="mpu-download")
self.tester.info("Downloading object...very mpu");
returned_key.get_contents_to_file(download_temp_file);
self.tester.info("Deleting object...WOW")
self.test_bucket.delete_key(keyname)
def test_abort_multipart_upload(self):
'''Basic multipart upload'''
self.tester.info("Testing abort multipart upload")
temp_file = tempfile.NamedTemporaryFile(mode="w+b", prefix="multipart")
temp_file.write(os.urandom(5 * 1024 * 1024))
keyname="multi-" + str(int(time.time()))
reply = self.initiate_multipart_upload(keyname)
for partnum in range(1, 11):
temp_file.seek(0, os.SEEK_SET)
reply.upload_part_from_file(temp_file, partnum)
self.test_bucket.get_all_multipart_uploads()
self.tester.info("Canceling upload")
reply.cancel_upload()
temp_file.close()
def initiate_multipart_upload(self, keyname):
self.tester.info("Initiating multipart upload " + keyname)
return self.test_bucket.initiate_multipart_upload(keyname)
def test_object_versioning_enabled(self):
"""Tests object versioning for get/put/delete on a versioned bucket"""
self.tester.info("Testing bucket Versioning-Enabled")
self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
if not self.enable_versioning(self.test_bucket):
self.fail("Could not properly enable versioning")
#Create some keys
keyname = "versionkey-" + str(int(time.time()))
#Multiple versions of the data
v1data = self.test_object_data + "--version1"
v2data = self.test_object_data + "--version2"
v3data = self.test_object_data + "--version3"
#Test sequence: put v1, get v1, put v2, put v3, get v3, delete v3, restore with v1 (copy), put v3 again, delete v2 explicitly
self.put_object(bucket=self.test_bucket, object_key=keyname, object_data=v1data)
#Get v1
obj_v1 = self.test_bucket.get_key(keyname)
self.tester.check_md5(eTag=obj_v1.etag,data=v1data)
self.tester.info("Initial bucket state after object uploads with versioning enabled:")
self.print_key_info(keys=[obj_v1])
self.tester.info("Adding another version")
#Put v2 (and get/head to confirm success)
self.put_object(bucket=self.test_bucket, object_key=keyname,object_data=v2data)
obj_v2 = self.test_bucket.get_key(keyname)
self.tester.check_md5(eTag=obj_v2.etag,data=v2data)
self.print_key_info(keys=[obj_v1, obj_v2])
self.tester.info("Adding another version")
#Put v3 (and get/head to confirm success)
self.put_object(bucket=self.test_bucket, object_key=keyname,object_data=v3data)
obj_v3 = self.test_bucket.get_key(keyname)
self.tester.check_md5(eTag=obj_v3.etag,data=v3data)
self.print_key_info(keys=[obj_v1, obj_v2, obj_v3])
self.tester.info("Getting specific version")
#Get a specific version, v1
v1_return = self.test_bucket.get_key(key_name=keyname,version_id=obj_v1.version_id)
self.print_key_info(keys=[v1_return])
#Delete current latest version (v3)
self.test_bucket.delete_key(keyname)
del_obj = self.test_bucket.get_key(keyname)
if del_obj:
self.tester.info("Erroneously got: " + del_obj.name)
raise S3ResponseError(404, "Should have thrown this exception for getting a non-existent object")
#Restore v1 using copy
self.tester.info("Restoring version")
try:
self.test_bucket.copy_key(new_key_name=obj_v1.key,src_bucket_name=self.test_bucket_name,src_key_name=keyname,src_version_id=obj_v1.version_id)
except S3ResponseError as e:
self.fail("Failed to restore key from previous version using copy got error: " + str(e.status))
restored_obj = self.test_bucket.get_key(keyname)
assert(restored_obj != None)
self.tester.check_md5(eTag=restored_obj.etag,data=v1data)
self.print_key_info(keys=[restored_obj])
#Put v3 again
self.tester.info("Adding another version")
self.put_object(bucket=self.test_bucket, object_key=keyname,object_data=v3data)
self.tester.check_md5(eTag=obj_v3.etag,data=v3data)
self.print_key_info([self.test_bucket.get_key(keyname)])
#Delete v2 explicitly
self.test_bucket.delete_key(key_name=obj_v2.key,version_id=obj_v2.version_id)
del_obj = self.test_bucket.get_key(keyname,version_id=obj_v2.version_id)
if del_obj:
raise S3ResponseError("Should have gotten 404 not-found error, but got: " + del_obj.key + " instead",404)
#Show what's on top
top_obj = self.test_bucket.get_key(keyname)
self.print_key_info([top_obj])
self.tester.check_md5(eTag=top_obj.etag,data=v3data)
self.tester.info("Finished the versioning enabled test. Success!!")
def clear_and_rebuild_bucket(self, bucket_name):
self.tester.clear_bucket(bucket_name)
return self.tester.create_bucket(bucket_name)
def test_object_versionlisting(self):
"""
Tests object version listing from a bucket
"""
version_max = 3
keyrange = 20
self.tester.info("Testing listing versions in a bucket and pagination using " + str(keyrange) + " keys with " + str(version_max) + " versions per key")
self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
if not self.enable_versioning(self.test_bucket):
self.fail("Could not enable versioning properly. Failing")
key = "testkey-" + str(int(time.time()))
keys = [ key + str(k) for k in range(0,keyrange)]
contents = [ self.test_object_data + "--v" + str(v) for v in range(0,version_max)]
try:
for keyname in keys:
#Put version_max versions of each key
for v in range(0,version_max):
self.tester.info("Putting: " + keyname + " version " + str(v))
self.test_bucket.new_key(keyname).set_contents_from_string(contents[v])
except S3ResponseError as e:
self.fail("Failed putting object versions for test: " + str(e.status))
listing = self.test_bucket.get_all_versions()
self.tester.info("Bucket version listing is " + str(len(listing)) + " entries long")
if keyrange * version_max >= 1000:
if not len(listing) == 999:
self.test_bucket.configure_versioning(False)
self.tester.debug(str(listing))
raise Exception("Bucket version listing did not limit the response to 999. Instead: " + str(len(listing)))
else:
if not len(listing) == keyrange * version_max:
self.test_bucket.configure_versioning(False)
self.tester.debug(str(listing))
raise Exception("Bucket version listing did not equal the number uploaded. Instead: " + str(len(listing)))
prev_obj = None
for obj in listing:
if isinstance(obj,Key):
self.tester.info("Key: " + obj.name + " -- " + obj.version_id + "--" + obj.last_modified)
if prev_obj != None:
if self.compare_versions(prev_obj, obj) <= 0:
raise Exception("Version listing not sorted correctly, offending key: " + obj.name + " version: " + obj.version_id + " date: " + obj.last_modified)
prev_obj = obj
else:
self.tester.info("Not a key, skipping: " + str(obj))
def test_object_versioning_suspended(self):
"""Tests object versioning on a suspended bucket, a more complicated test than the Enabled test"""
self.tester.info("Testing bucket Versioning-Suspended")
self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
#Create some keys
keyname1 = "versionkey1-" + str(int(time.time()))
keyname2 = "versionkey2-" + str(int(time.time()))
keyname3 = "versionkey3-" + str(int(time.time()))
keyname4 = "versionkey4-" + str(int(time.time()))
keyname5 = "versionkey5-" + str(int(time.time()))
v1data = self.test_object_data + "--version1"
v2data = self.test_object_data + "--version2"
v3data = self.test_object_data + "--version3"
vstatus = self.test_bucket.get_versioning_status()
if vstatus:
self.fail("Versioning status should be null/Disabled but was: " + str(vstatus))
else:
self.tester.info("Bucket versioning is Disabled")
self.put_object(bucket=self.test_bucket, object_key=keyname1, object_data=v1data)
self.put_object(bucket=self.test_bucket, object_key=keyname2, object_data=v1data)
self.put_object(bucket=self.test_bucket, object_key=keyname3, object_data=v1data)
self.put_object(bucket=self.test_bucket, object_key=keyname4, object_data=v1data)
self.put_object(bucket=self.test_bucket, object_key=keyname5, object_data=v1data)
key1 = self.test_bucket.get_key(keyname1)
key2 = self.test_bucket.get_key(keyname2)
key3 = self.test_bucket.get_key(keyname3)
key4 = self.test_bucket.get_key(keyname4)
key5 = self.test_bucket.get_key(keyname5)
self.tester.info("Initial bucket state after object uploads without versioning:")
self.print_key_info(keys=[key1,key2,key3,key4,key5])
#Enable versioning
self.test_bucket.configure_versioning(True)
if self.test_bucket.get_versioning_status():
self.tester.info("Versioning status correctly set to enabled")
else:
self.tester.info("Versionign status not enabled, should be.")
#Update a subset of the keys
key1_etag2=self.put_object(bucket=self.test_bucket, object_key=keyname1,object_data=v2data)
key2_etag2=self.put_object(bucket=self.test_bucket, object_key=keyname2,object_data=v2data)
key3_etag2=self.put_object(bucket=self.test_bucket, object_key=keyname3,object_data=v2data)
key3_etag3=self.put_object(bucket=self.test_bucket, object_key=keyname3,object_data=v3data)
#Delete a key
self.test_bucket.delete_key(keyname5)
#Suspend versioning
self.test_bucket.configure_versioning(False)
#Get latest of each key
key1=self.test_bucket.get_key(keyname1)
key2=self.test_bucket.get_key(keyname2)
key3=self.test_bucket.get_key(keyname3)
key4=self.test_bucket.get_key(keyname4)
key5=self.test_bucket.get_key(keyname5)
#Delete a key
#Add a key
#Add same key again
#Fetch each key
def test_object_acl(self):
"""Tests object acl get/set and manipulation"""
self.fail("Test not implemented")
#TODO: test custom and canned acls that are both valid an invalid
def test_object_acl_negative_test(self):
"""Tests error conditions and response for acl issues."""
self.fail("Test not implemented")
def test_object_torrent(self):
"""Tests object torrents"""
self.fail("Feature not implemented yet")
def clean_method(self):
'''This is the teardown method'''
#Delete the testing bucket if it is left-over
self.tester.info('Deleting the buckets used for testing')
for bucket in self.buckets_used:
try:
self.tester.info('Checking bucket ' + bucket + ' for possible cleaning/delete')
if self.tester.s3.head_bucket(bucket) != None:
self.tester.info('Found bucket exists, cleaning it')
self.tester.clear_bucket(bucket)
else:
self.tester.info('Bucket ' + bucket + ' not found, skipping')
except Exception as e:
self.tester.info('Exception checking bucket ' + bucket + ' Exception msg: ' + e.message)
return
def test_multipart_upload(self):
'''Basic multipart upload'''
self.tester.info("Testing multipart upload")
self.tester.info("Creating random file representing part...")
temp_file = tempfile.NamedTemporaryFile(mode="w+b", prefix="multipart")
temp_file.write(os.urandom(5 * 1024 * 1024))
keyname="multi-" + str(int(time.time()))
self.tester.info("Initiating multipart upload...much upload")
reply = self.initiate_multipart_upload(keyname)
self.tester.info("Uploading parts...Such Parts")
for partnum in range(1, 11):
temp_file.seek(0, os.SEEK_SET)
reply.upload_part_from_file(temp_file, partnum)
self.tester.info("Listing parts...")
self.test_bucket.get_all_multipart_uploads()
self.tester.info("Completing upload...So OSG")
reply.complete_upload()
temp_file.close()
self.tester.info("HEAD request...");
returned_key = self.test_bucket.get_key(keyname)
download_temp_file = tempfile.NamedTemporaryFile(mode="w+b", prefix="mpu-download")
self.tester.info("Downloading object...very mpu");
returned_key.get_contents_to_file(download_temp_file);
self.tester.info("Deleting object...WOW")
self.test_bucket.delete_key(keyname)
def test_abort_multipart_upload(self):
'''Basic multipart upload'''
self.tester.info("Testing abort multipart upload")
temp_file = tempfile.NamedTemporaryFile(mode="w+b", prefix="multipart")
temp_file.write(os.urandom(5 * 1024 * 1024))
keyname="multi-" + str(int(time.time()))
reply = self.initiate_multipart_upload(keyname)
for partnum in range(1, 11):
temp_file.seek(0, os.SEEK_SET)
reply.upload_part_from_file(temp_file, partnum)
self.test_bucket.get_all_multipart_uploads()
self.tester.info("!!!!!!!!!!!!!!!!!!!!!!!!!!DO NOT WANT!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1")
self.tester.info("Canceling upload")
reply.cancel_upload()
temp_file.close()
def initiate_multipart_upload(self, keyname):
self.tester.info("Initiating multipart upload " + keyname)
return self.test_bucket.initiate_multipart_upload(keyname)
def test_presigned_url(self):
"""Tests presigned url operations on the service using regular access/secret keys"""
self.tester.info("Testing presigned url usage with regular access/secret keys")
self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
oneMinute = 1 * 60
objectKey1 = 'presignedurltestobject'
test_headers = {'x-amz-acl': 'public-read', 'x-amz-meta-key1': 'my blah value'}
#Test PUT
httpMethod = 'PUT'
presigned_url = ''
def key_exists():
found_key = self.test_bucket.get_key('presignedurltestobject')
if found_key:
return True
else:
return False
try:
self.tester.info('Port = ' + str(self.tester.s3.port))
presigned_url = self.tester.s3.generate_url(expires_in=oneMinute, method=httpMethod, bucket=self.test_bucket_name, key=objectKey1, query_auth=True, headers=test_headers, response_headers=None, expires_in_absolute=False)
self.tester.info('Using presigned url for PUT: ' + presigned_url)
response = requests.put(url=presigned_url, data='testingcontent123')
self.tester.info('Response: ' + str(response.status_code) + ' - ' + response.text)
if response.status_code != 200:
raise Exception('Error response from server: ' + str(response.status_code))
except Exception as e:
self.fail("Failed on pre-signed put with url: " + presigned_url + ' with: ' + e.message)
self.tester.wait_for_result(key_exists, True, timeout=60, poll_wait=5)
#Test GET
httpMethod = 'GET'
presigned_url = ''
try:
presigned_url = self.tester.s3.generate_url(expires_in=oneMinute, method=httpMethod, bucket=self.test_bucket_name, key=objectKey1, query_auth=True, headers=test_headers, response_headers=None, expires_in_absolute=False)
self.tester.info('Using GET presigned_url: ' + presigned_url)
response = requests.get(url=presigned_url)
self.tester.info('Got response on GET: ' + str(response.status_code) + ' Body: ' + response.text)
if response.status_code != 200:
raise Exception('Error response from server: ' + str(response.status_code))
except Exception as e:
self.fail("Failed on pre-signed put with url: " + presigned_url + ' with exception: ' + e.message)
#Test HEAD
httpMethod = 'HEAD'
presigned_url = ''
try:
presigned_url = self.tester.s3.generate_url(expires_in=oneMinute, method=httpMethod, bucket=self.test_bucket_name, key=objectKey1, query_auth=True, headers=test_headers, response_headers=None, expires_in_absolute=False)
self.tester.info('Using HEAD presigned_url: ' + presigned_url)
response = requests.head(url=presigned_url)
self.tester.info('Got response on HEAD: ' + str(response.status_code) + ' Body: ' + response.text)
if response.status_code != 200:
raise Exception('Error response from server: ' + str(response.status_code))
except Exception as e:
self.fail("Failed on pre-signed put with url: " + presigned_url + ' with exception: ' + e.message)
#Test DELETE
httpMethod = 'DELETE'
presigned_url = ''
try:
presigned_url = self.tester.s3.generate_url(expires_in=oneMinute, method=httpMethod, bucket=self.test_bucket_name, key=objectKey1, query_auth=True, headers=None, response_headers=None, expires_in_absolute=False)
self.tester.info('Using DELETE presigned_url: ' + presigned_url)
response = requests.delete(url=presigned_url)
self.tester.info('Got response on DELETE: ' + str(response.status_code) + ' Body: ' + response.text)
if response.status_code != 204:
raise Exception('Error response from server: ' + str(response.status_code))
except Exception as e:
self.fail("Failed on pre-signed put with url: " + presigned_url + ' with exception: ' + e.message)
def test_presigned_url_sts(self):
"""Tests presigned urls using STS session tokens"""
self.tester.info("Testing presigned url usage with sts session tokens")
self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
oneMinute = 1 * 60
objectKey1 = 'presignedurltestobject'
credentials = self.tester.get_session_token()
calling_format = boto.s3.connection.OrdinaryCallingFormat()
s3connection = boto.connect_s3(
aws_access_key_id=credentials.access_key,
aws_secret_access_key=credentials.secret_key,
security_token=credentials.session_token,
host=self.tester.s3.host,
port=self.tester.s3.port,
path=self.tester.s3.path,
is_secure=self.tester.s3.is_secure,
calling_format=calling_format)
#Test PUT
httpMethod = 'PUT'
presigned_url = ''
try:
self.tester.info('Port = ' + str(self.tester.s3.port))
presigned_url = s3connection.generate_url(expires_in=oneMinute, method=httpMethod, bucket=self.test_bucket_name, key=objectKey1, query_auth=True, headers=None, response_headers=None, expires_in_absolute=False)
self.tester.info('Using presigned url for PUT: ' + presigned_url)
response = requests.put(url=presigned_url, data='testingcontent123')
self.tester.info('Response: ' + str(response.status_code) + ' - ' + response.text)
if response.status_code != 200:
raise Exception('Error response from server: ' + str(response.status_code))
except Exception as e:
self.fail("Failed on pre-signed put with url: " + presigned_url + ' with: ' + e.message)
#Test GET
httpMethod = 'GET'
presigned_url = ''
try:
presigned_url = s3connection.generate_url(expires_in=oneMinute, method=httpMethod, bucket=self.test_bucket_name, key=objectKey1, query_auth=True, headers=None, response_headers=None, expires_in_absolute=False)
self.tester.info('Using GET presigned_url: ' + presigned_url)
response = requests.get(url=presigned_url)
self.tester.info('Got response on GET: ' + str(response.status_code) + ' Body: ' + response.text)
if response.status_code != 200:
raise Exception('Error response from server: ' + str(response.status_code))
except Exception as e:
s3connection.close()
self.fail("Failed on pre-signed put with url: " + presigned_url + ' with exception: ' + e.message)
#Test HEAD
httpMethod = 'HEAD'
presigned_url = ''
try:
presigned_url = s3connection.generate_url(expires_in=oneMinute, method=httpMethod, bucket=self.test_bucket_name, key=objectKey1, query_auth=True, headers=None, response_headers=None, expires_in_absolute=False)
self.tester.info('Using HEAD presigned_url: ' + presigned_url)
response = requests.head(url=presigned_url)
self.tester.info('Got response on HEAD: ' + str(response.status_code) + ' Body: ' + response.text)
if response.status_code != 200:
raise Exception('Error response from server: ' + str(response.status_code))
except Exception as e:
s3connection.close()
self.fail("Failed on pre-signed put with url: " + presigned_url + ' with exception: ' + e.message)
#Test DELETE
httpMethod = 'DELETE'
presigned_url = ''
try:
presigned_url = s3connection.generate_url(expires_in=oneMinute, method=httpMethod, bucket=self.test_bucket_name, key=objectKey1, query_auth=True, headers=None, response_headers=None, expires_in_absolute=False)
self.tester.info('Using DELETE presigned_url: ' + presigned_url)
response = requests.delete(url=presigned_url)
self.tester.info('Got response on DELETE: ' + str(response.status_code) + ' Body: ' + response.text)
if response.status_code != 204:
raise Exception('Error response from server: ' + str(response.status_code))
except Exception as e:
s3connection.close()
self.fail("Failed on pre-signed put with url: " + presigned_url + ' with exception: ' + e.message)
s3connection.close()
if __name__ == "__main__":
testcase = ObjectTestSuite()
### Either use the list of tests passed from config/command line to determine what subset of tests to run
list = testcase.args.tests or ['test_object_basic_ops',
### 'test_object_byte_offset_read',
'test_object_large_objects',
'test_object_versionlisting',
'test_object_versioning_enabled',
'test_object_versioning_suspended',
'test_object_multipart',
'test_object_post',
'test_object_post_large',
'test_object_post_sts',
'test_presigned_url',
'test_presigned_url_sts']
### Convert test suite methods to EutesterUnitTest objects
unit_list = [ ]
for test in list:
unit_list.append( testcase.create_testunit_by_name(test) )
### Run the EutesterUnitTest objects
result = testcase.run_test_case_list(unit_list,clean_on_exit=True)
exit(result)
| bsd-2-clause |
kevclarx/ansible | lib/ansible/modules/cloud/ovirt/ovirt_snapshots_facts.py | 45 | 4381 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_snapshots_facts
short_description: Retrieve facts about one or more oVirt/RHV virtual machine snapshots
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV virtual machine snapshots."
notes:
- "This module creates a new top-level C(ovirt_snapshots) fact, which
contains a list of snapshots."
options:
vm:
description:
- "Name of the VM with snapshot."
required: true
description:
description:
- "Description of the snapshot, can be used as glob expression."
snapshot_id:
description:
- "Id of the snaphost we want to retrieve facts about."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all snapshots which description start with C(update) for VM named C(centos7):
- ovirt_snapshots_facts:
vm: centos7
description: update*
- debug:
var: ovirt_snapshots
'''
RETURN = '''
ovirt_snapshots:
description: "List of dictionaries describing the snapshot. Snapshot attribtues are mapped to dictionary keys,
all snapshot attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/snapshot."
returned: On success.
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
search_by_name,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
vm=dict(required=True),
description=dict(default=None),
snapshot_id=dict(default=None),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
vms_service = connection.system_service().vms_service()
vm_name = module.params['vm']
vm = search_by_name(vms_service, vm_name)
if vm is None:
raise Exception("VM '%s' was not found." % vm_name)
snapshots_service = vms_service.service(vm.id).snapshots_service()
if module.params['description']:
snapshots = [
e for e in snapshots_service.list()
if fnmatch.fnmatch(e.description, module.params['description'])
]
elif module.params['snapshot_id']:
snapshots = [
snapshots_service.snapshot_service(module.params['snapshot_id']).get()
]
else:
snapshots = snapshots_service.list()
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_snapshots=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in snapshots
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| gpl-3.0 |
LeetCoinTeam/lc_tactoe | jinja2/exceptions.py | 398 | 4530 | # -*- coding: utf-8 -*-
"""
jinja2.exceptions
~~~~~~~~~~~~~~~~~
Jinja exceptions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
class TemplateError(Exception):
"""Baseclass for all template errors."""
def __init__(self, message=None):
if message is not None:
message = unicode(message).encode('utf-8')
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message.decode('utf-8', 'replace')
class TemplateNotFound(IOError, LookupError, TemplateError):
"""Raised if a template does not exist."""
# looks weird, but removes the warning descriptor that just
# bogusly warns us about message being deprecated
message = None
def __init__(self, name, message=None):
IOError.__init__(self)
if message is None:
message = name
self.message = message
self.name = name
self.templates = [name]
def __str__(self):
return self.message.encode('utf-8')
# unicode goes after __str__ because we configured 2to3 to rename
# __unicode__ to __str__. because the 2to3 tree is not designed to
# remove nodes from it, we leave the above __str__ around and let
# it override at runtime.
def __unicode__(self):
return self.message
class TemplatesNotFound(TemplateNotFound):
"""Like :class:`TemplateNotFound` but raised if multiple templates
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
.. versionadded:: 2.2
"""
def __init__(self, names=(), message=None):
if message is None:
message = u'non of the templates given were found: ' + \
u', '.join(map(unicode, names))
TemplateNotFound.__init__(self, names and names[-1] or None, message)
self.templates = list(names)
class TemplateSyntaxError(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
def __init__(self, message, lineno, name=None, filename=None):
TemplateError.__init__(self, message)
self.lineno = lineno
self.name = name
self.filename = filename
self.source = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
def __str__(self):
return unicode(self).encode('utf-8')
# unicode goes after __str__ because we configured 2to3 to rename
# __unicode__ to __str__. because the 2to3 tree is not designed to
# remove nodes from it, we leave the above __str__ around and let
# it override at runtime.
def __unicode__(self):
# for translated errors we only return the message
if self.translated:
return self.message
# otherwise attach some stuff
location = 'line %d' % self.lineno
name = self.filename or self.name
if name:
location = 'File "%s", %s' % (name, location)
lines = [self.message, ' ' + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
line = None
if line:
lines.append(' ' + line.strip())
return u'\n'.join(lines)
class TemplateAssertionError(TemplateSyntaxError):
"""Like a template syntax error, but covers cases where something in the
template caused an error at compile time that wasn't necessarily caused
by a syntax error. However it's a direct subclass of
:exc:`TemplateSyntaxError` and has the same attributes.
"""
class TemplateRuntimeError(TemplateError):
"""A generic runtime error in the template engine. Under some situations
Jinja may raise this exception.
"""
class UndefinedError(TemplateRuntimeError):
"""Raised if a template tries to operate on :class:`Undefined`."""
class SecurityError(TemplateRuntimeError):
"""Raised if a template tries to do something insecure if the
sandbox is enabled.
"""
class FilterArgumentError(TemplateRuntimeError):
"""This error is raised if a filter was called with inappropriate
arguments
"""
| apache-2.0 |
jeremyfix/pylearn2 | pylearn2/sandbox/cuda_convnet/tests/test_filter_acts_strided.py | 44 | 5625 | from __future__ import print_function
__authors__ = "Heng Luo"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
from theano.compat.six.moves import xrange
from theano import shared
from theano.tensor import grad, constant
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from theano.sandbox.cuda import gpu_from_host
from theano.sandbox.cuda import host_from_gpu
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.tensor.nnet.conv import conv2d
from theano import function
from theano import tensor as T
import warnings
from theano.sandbox import cuda
from theano.sandbox.cuda.var import float32_shared_constructor
def FilterActs_python(images,
filters,
stride=1,
):
if int(stride) != stride:
raise TypeError('stride must be an int', stride)
stride = int(stride)
channels, rows, cols, batch_size = images.shape
_channels, filter_rows, filter_cols, num_filters = filters.shape
assert rows >= filter_rows
assert cols >= filter_cols
assert filter_cols == filter_rows
assert channels == _channels
assert stride <= filter_rows and stride >= 1
if stride > 1:
if (rows - filter_rows)%stride == 0:
stride_padding_rows = 0
else:
stride_padding_rows = ((rows - filter_rows)/stride + 1)*stride + filter_rows - rows
idx_rows = (rows + stride_padding_rows - filter_rows)/stride
if (cols - filter_cols)%stride == 0:
stride_padding_cols = 0
else:
stride_padding_cols = ((cols - filter_cols)/stride + 1)*stride + filter_cols - cols
idx_cols = (cols + stride_padding_cols - filter_cols)/stride
new_rows = rows + stride_padding_rows
new_cols = cols + stride_padding_cols
idx_rows = (new_rows - filter_rows)/stride
idx_cols = (new_cols - filter_cols)/stride
new_images = np.zeros((channels, new_rows, new_cols, batch_size),dtype='float32')
new_images[:,:rows,:cols,:] = images
h_shape = (num_filters,
idx_rows+1,
idx_cols+1,
batch_size
)
else:
new_images = images
h_shape = (num_filters,
rows - filter_rows + 1,
cols - filter_cols + 1,
batch_size
)
h = np.zeros(h_shape,dtype='float32')
n_dim_filter = channels*filter_rows*filter_cols
vector_filters = filters.reshape(n_dim_filter,num_filters).T
for idx_h_rows in xrange(h_shape[1]):
for idx_h_cols in xrange(h_shape[2]):
rc_images = new_images[:,
idx_h_rows*stride:idx_h_rows*stride+filter_rows,
idx_h_cols*stride:idx_h_cols*stride+filter_cols,
:]
rc_hidacts = np.dot(
vector_filters,
rc_images.reshape(n_dim_filter, batch_size))
h[:,idx_h_rows,idx_h_cols,:] = rc_hidacts
#import pdb;pdb.set_trace()
return h
def test_filter_acts_strided():
# Tests that FilterActs with all possible strides
rng = np.random.RandomState([2012,10,9])
#Each list in shape_list :
#[img_shape,filter_shape]
#[(channels, rows, cols, batch_size),(channels, filter_rows, filter_cols, num_filters)]
shape_list = [[(1, 7, 8, 5), (1, 2, 2, 16)],
[(3, 7, 8, 5), (3, 3, 3, 16)],
[(16, 11, 11, 4), (16, 4, 4, 16)],
[(3, 20, 20, 3), (3, 5, 5, 16)],
[(3, 21, 21, 3), (3, 6, 6, 16)],
]
for test_idx in xrange(len(shape_list)):
images = rng.uniform(-1., 1., shape_list[test_idx][0]).astype('float32')
filters = rng.uniform(-1., 1., shape_list[test_idx][1]).astype('float32')
gpu_images = float32_shared_constructor(images,name='images')
gpu_filters = float32_shared_constructor(filters,name='filters')
print("test case %d..."%(test_idx+1))
for ii in xrange(filters.shape[1]):
stride = ii + 1
output = FilterActs(stride=stride)(gpu_images, gpu_filters)
output = host_from_gpu(output)
f = function([], output)
output_val = f()
output_python = FilterActs_python(images,filters,stride)
if np.abs(output_val - output_python).max() > 8.6e-6:
assert type(output_val) == type(output_python)
assert output_val.dtype == output_python.dtype
if output_val.shape != output_python.shape:
print('cuda-convnet shape: ',output_val.shape)
print('python conv shape: ',output_python.shape)
assert False
err = np.abs(output_val - output_python)
print('stride %d'%stride)
print('absolute error range: ', (err.min(), err.max()))
print('mean absolute error: ', err.mean())
print('cuda-convnet value range: ', (output_val.min(), output_val.max()))
print('python conv value range: ', (output_python.min(), output_python.max()))
#assert False
#print "pass"
if __name__ == '__main__':
test_filter_acts_strided()
| bsd-3-clause |
wkschwartz/django | tests/nested_foreign_keys/tests.py | 62 | 9495 | from django.test import TestCase
from .models import (
Event, Movie, Package, PackageNullFK, Person, Screening, ScreeningNullFK,
)
# These are tests for #16715. The basic scheme is always the same: 3 models with
# 2 relations. The first relation may be null, while the second is non-nullable.
# In some cases, Django would pick the wrong join type for the second relation,
# resulting in missing objects in the queryset.
#
# Model A
# | (Relation A/B : nullable)
# Model B
# | (Relation B/C : non-nullable)
# Model C
#
# Because of the possibility of NULL rows resulting from the LEFT OUTER JOIN
# between Model A and Model B (i.e. instances of A without reference to B),
# the second join must also be LEFT OUTER JOIN, so that we do not ignore
# instances of A that do not reference B.
#
# Relation A/B can either be an explicit foreign key or an implicit reverse
# relation such as introduced by one-to-one relations (through multi-table
# inheritance).
class NestedForeignKeysTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.director = Person.objects.create(name='Terry Gilliam / Terry Jones')
cls.movie = Movie.objects.create(title='Monty Python and the Holy Grail', director=cls.director)
# This test failed in #16715 because in some cases INNER JOIN was selected
# for the second foreign key relation instead of LEFT OUTER JOIN.
def test_inheritance(self):
Event.objects.create()
Screening.objects.create(movie=self.movie)
self.assertEqual(len(Event.objects.all()), 2)
self.assertEqual(len(Event.objects.select_related('screening')), 2)
# This failed.
self.assertEqual(len(Event.objects.select_related('screening__movie')), 2)
self.assertEqual(len(Event.objects.values()), 2)
self.assertEqual(len(Event.objects.values('screening__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__title')), 2)
# This failed.
self.assertEqual(len(Event.objects.values('screening__movie__pk', 'screening__movie__title')), 2)
# Simple filter/exclude queries for good measure.
self.assertEqual(Event.objects.filter(screening__movie=self.movie).count(), 1)
self.assertEqual(Event.objects.exclude(screening__movie=self.movie).count(), 1)
# These all work because the second foreign key in the chain has null=True.
def test_inheritance_null_FK(self):
Event.objects.create()
ScreeningNullFK.objects.create(movie=None)
ScreeningNullFK.objects.create(movie=self.movie)
self.assertEqual(len(Event.objects.all()), 3)
self.assertEqual(len(Event.objects.select_related('screeningnullfk')), 3)
self.assertEqual(len(Event.objects.select_related('screeningnullfk__movie')), 3)
self.assertEqual(len(Event.objects.values()), 3)
self.assertEqual(len(Event.objects.values('screeningnullfk__pk')), 3)
self.assertEqual(len(Event.objects.values('screeningnullfk__movie__pk')), 3)
self.assertEqual(len(Event.objects.values('screeningnullfk__movie__title')), 3)
self.assertEqual(len(Event.objects.values('screeningnullfk__movie__pk', 'screeningnullfk__movie__title')), 3)
self.assertEqual(Event.objects.filter(screeningnullfk__movie=self.movie).count(), 1)
self.assertEqual(Event.objects.exclude(screeningnullfk__movie=self.movie).count(), 2)
def test_null_exclude(self):
screening = ScreeningNullFK.objects.create(movie=None)
ScreeningNullFK.objects.create(movie=self.movie)
self.assertEqual(
list(ScreeningNullFK.objects.exclude(movie__id=self.movie.pk)),
[screening])
# This test failed in #16715 because in some cases INNER JOIN was selected
# for the second foreign key relation instead of LEFT OUTER JOIN.
def test_explicit_ForeignKey(self):
Package.objects.create()
screening = Screening.objects.create(movie=self.movie)
Package.objects.create(screening=screening)
self.assertEqual(len(Package.objects.all()), 2)
self.assertEqual(len(Package.objects.select_related('screening')), 2)
self.assertEqual(len(Package.objects.select_related('screening__movie')), 2)
self.assertEqual(len(Package.objects.values()), 2)
self.assertEqual(len(Package.objects.values('screening__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__title')), 2)
# This failed.
self.assertEqual(len(Package.objects.values('screening__movie__pk', 'screening__movie__title')), 2)
self.assertEqual(Package.objects.filter(screening__movie=self.movie).count(), 1)
self.assertEqual(Package.objects.exclude(screening__movie=self.movie).count(), 1)
# These all work because the second foreign key in the chain has null=True.
def test_explicit_ForeignKey_NullFK(self):
PackageNullFK.objects.create()
screening = ScreeningNullFK.objects.create(movie=None)
screening_with_movie = ScreeningNullFK.objects.create(movie=self.movie)
PackageNullFK.objects.create(screening=screening)
PackageNullFK.objects.create(screening=screening_with_movie)
self.assertEqual(len(PackageNullFK.objects.all()), 3)
self.assertEqual(len(PackageNullFK.objects.select_related('screening')), 3)
self.assertEqual(len(PackageNullFK.objects.select_related('screening__movie')), 3)
self.assertEqual(len(PackageNullFK.objects.values()), 3)
self.assertEqual(len(PackageNullFK.objects.values('screening__pk')), 3)
self.assertEqual(len(PackageNullFK.objects.values('screening__movie__pk')), 3)
self.assertEqual(len(PackageNullFK.objects.values('screening__movie__title')), 3)
self.assertEqual(len(PackageNullFK.objects.values('screening__movie__pk', 'screening__movie__title')), 3)
self.assertEqual(PackageNullFK.objects.filter(screening__movie=self.movie).count(), 1)
self.assertEqual(PackageNullFK.objects.exclude(screening__movie=self.movie).count(), 2)
# Some additional tests for #16715. The only difference is the depth of the
# nesting as we now use 4 models instead of 3 (and thus 3 relations). This
# checks if promotion of join types works for deeper nesting too.
class DeeplyNestedForeignKeysTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.director = Person.objects.create(name='Terry Gilliam / Terry Jones')
cls.movie = Movie.objects.create(title='Monty Python and the Holy Grail', director=cls.director)
def test_inheritance(self):
Event.objects.create()
Screening.objects.create(movie=self.movie)
self.assertEqual(len(Event.objects.all()), 2)
self.assertEqual(len(Event.objects.select_related('screening__movie__director')), 2)
self.assertEqual(len(Event.objects.values()), 2)
self.assertEqual(len(Event.objects.values('screening__movie__director__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__director__name')), 2)
self.assertEqual(
len(Event.objects.values('screening__movie__director__pk', 'screening__movie__director__name')),
2
)
self.assertEqual(len(Event.objects.values('screening__movie__pk', 'screening__movie__director__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__pk', 'screening__movie__director__name')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__title', 'screening__movie__director__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__title', 'screening__movie__director__name')), 2)
self.assertEqual(Event.objects.filter(screening__movie__director=self.director).count(), 1)
self.assertEqual(Event.objects.exclude(screening__movie__director=self.director).count(), 1)
def test_explicit_ForeignKey(self):
Package.objects.create()
screening = Screening.objects.create(movie=self.movie)
Package.objects.create(screening=screening)
self.assertEqual(len(Package.objects.all()), 2)
self.assertEqual(len(Package.objects.select_related('screening__movie__director')), 2)
self.assertEqual(len(Package.objects.values()), 2)
self.assertEqual(len(Package.objects.values('screening__movie__director__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__director__name')), 2)
self.assertEqual(
len(Package.objects.values('screening__movie__director__pk', 'screening__movie__director__name')),
2
)
self.assertEqual(len(Package.objects.values('screening__movie__pk', 'screening__movie__director__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__pk', 'screening__movie__director__name')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__title', 'screening__movie__director__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__title', 'screening__movie__director__name')), 2)
self.assertEqual(Package.objects.filter(screening__movie__director=self.director).count(), 1)
self.assertEqual(Package.objects.exclude(screening__movie__director=self.director).count(), 1)
| bsd-3-clause |
vit2/vit-e2 | lib/python/Plugins/Extensions/MediaPlayer/settings.py | 9 | 4987 | from Screens.Screen import Screen
from Screens.HelpMenu import HelpableScreen
from Components.FileList import FileList
from Components.Sources.StaticText import StaticText
from Components.MediaPlayer import PlayList
from Components.config import config, getConfigListEntry, ConfigSubsection, configfile, ConfigText, ConfigYesNo, ConfigDirectory
from Components.ConfigList import ConfigListScreen
from Components.ActionMap import ActionMap
config.mediaplayer = ConfigSubsection()
config.mediaplayer.repeat = ConfigYesNo(default=False)
config.mediaplayer.savePlaylistOnExit = ConfigYesNo(default=True)
config.mediaplayer.saveDirOnExit = ConfigYesNo(default=False)
config.mediaplayer.defaultDir = ConfigDirectory()
config.mediaplayer.useAlternateUserAgent = ConfigYesNo(default=False)
config.mediaplayer.alternateUserAgent = ConfigText(default="")
config.mediaplayer.sortPlaylists = ConfigYesNo(default=False)
config.mediaplayer.alwaysHideInfoBar = ConfigYesNo(default=True)
config.mediaplayer.onMainMenu = ConfigYesNo(default=False)
class DirectoryBrowser(Screen, HelpableScreen):
def __init__(self, session, currDir):
Screen.__init__(self, session)
# for the skin: first try MediaPlayerDirectoryBrowser, then FileBrowser, this allows individual skinning
self.skinName = ["MediaPlayerDirectoryBrowser", "FileBrowser" ]
HelpableScreen.__init__(self)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Use"))
self.filelist = FileList(currDir, matchingPattern="")
self["filelist"] = self.filelist
self["FilelistActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.use,
"red": self.exit,
"ok": self.ok,
"cancel": self.exit
})
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("Directory browser"))
def ok(self):
if self.filelist.canDescent():
self.filelist.descent()
def use(self):
if self["filelist"].getCurrentDirectory() is not None:
if self.filelist.canDescent() and self["filelist"].getFilename() and len(self["filelist"].getFilename()) > len(self["filelist"].getCurrentDirectory()):
self.filelist.descent()
self.close(self["filelist"].getCurrentDirectory())
else:
self.close(self["filelist"].getFilename())
def exit(self):
self.close(False)
class MediaPlayerSettings(Screen,ConfigListScreen):
def __init__(self, session, parent):
Screen.__init__(self, session)
# for the skin: first try MediaPlayerSettings, then Setup, this allows individual skinning
self.skinName = ["MediaPlayerSettings", "Setup" ]
self.setup_title = _("Edit settings")
self.onChangedEntry = [ ]
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
ConfigListScreen.__init__(self, [], session = session, on_change = self.changedEntry)
self.parent = parent
self.initConfigList()
config.mediaplayer.saveDirOnExit.addNotifier(self.initConfigList)
self["setupActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.save,
"red": self.cancel,
"cancel": self.cancel,
"ok": self.ok,
}, -2)
def layoutFinished(self):
self.setTitle(self.setup_title)
def initConfigList(self, element=None):
print "[initConfigList]", element
try:
self.list = []
self.list.append(getConfigListEntry(_("repeat playlist"), config.mediaplayer.repeat))
self.list.append(getConfigListEntry(_("save playlist on exit"), config.mediaplayer.savePlaylistOnExit))
self.list.append(getConfigListEntry(_("save last directory on exit"), config.mediaplayer.saveDirOnExit))
if not config.mediaplayer.saveDirOnExit.getValue():
self.list.append(getConfigListEntry(_("start directory"), config.mediaplayer.defaultDir))
self.list.append(getConfigListEntry(_("sorting of playlists"), config.mediaplayer.sortPlaylists))
self.list.append(getConfigListEntry(_("Always hide infobar"), config.mediaplayer.alwaysHideInfoBar))
self.list.append(getConfigListEntry(_("show mediaplayer on mainmenu"), config.mediaplayer.onMainMenu))
self["config"].setList(self.list)
except KeyError:
print "keyError"
def changedConfigList(self):
self.initConfigList()
def ok(self):
if self["config"].getCurrent()[1] == config.mediaplayer.defaultDir:
self.session.openWithCallback(self.DirectoryBrowserClosed, DirectoryBrowser, self.parent.filelist.getCurrentDirectory())
def DirectoryBrowserClosed(self, path):
print "PathBrowserClosed:" + str(path)
if path != False:
config.mediaplayer.defaultDir.setValue(path)
def save(self):
for x in self["config"].list:
x[1].save()
self.close()
def cancel(self):
self.close()
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
| gpl-2.0 |
danieljaouen/ansible | lib/ansible/modules/cloud/google/gcp_pubsub_topic.py | 12 | 6369 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_pubsub_topic
description:
- A named resource to which messages are sent by publishers.
short_description: Creates a GCP Topic
version_added: 2.6
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices: ['present', 'absent']
default: 'present'
name:
description:
- Name of the topic.
required: false
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: create a topic
gcp_pubsub_topic:
name: test-topic1
project: "test_project"
auth_kind: "service_account"
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
name:
description:
- Name of the topic.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(type='str')
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/pubsub']
state = module.params['state']
fetch = fetch_resource(module, self_link(module))
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
fetch = update(module, self_link(module))
changed = True
else:
delete(module, self_link(module))
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, self_link(module))
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link):
auth = GcpSession(module, 'pubsub')
return return_if_object(module, auth.put(link, resource_to_request(module)))
def update(module, link):
auth = GcpSession(module, 'pubsub')
return return_if_object(module, auth.put(link, resource_to_request(module)))
def delete(module, link):
auth = GcpSession(module, 'pubsub')
return return_if_object(module, auth.delete(link))
def resource_to_request(module):
request = {
u'name': module.params.get('name')
}
request = encode_request(request, module)
return_vals = {}
for k, v in request.items():
if v:
return_vals[k] = v
return return_vals
def fetch_resource(module, link):
auth = GcpSession(module, 'pubsub')
return return_if_object(module, auth.get(link))
def self_link(module):
return "https://pubsub.googleapis.com/v1/projects/{project}/topics/{name}".format(**module.params)
def collection(module):
return "https://pubsub.googleapis.com/v1/projects/{project}/topics".format(**module.params)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
result = decode_request(result, module)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
request = decode_request(request, module)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'name': response.get(u'name')
}
def decode_request(response, module):
if 'name' in response:
response['name'] = response['name'].split('/')[-1]
return response
def encode_request(request, module):
request['name'] = '/'.join(['projects', module.params['project'],
'topics', module.params['name']])
return request
if __name__ == '__main__':
main()
| gpl-3.0 |
JamesGuthrie/libcloud | libcloud/storage/drivers/dummy.py | 46 | 18778 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import random
import hashlib
from libcloud.utils.py3 import PY3
from libcloud.utils.py3 import b
if PY3:
from io import FileIO as file
from libcloud.common.types import LibcloudError
from libcloud.storage.base import Object, Container, StorageDriver
from libcloud.storage.types import ContainerAlreadyExistsError
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import ObjectDoesNotExistError
class DummyFileObject(file):
def __init__(self, yield_count=5, chunk_len=10):
self._yield_count = yield_count
self._chunk_len = chunk_len
def read(self, size):
i = 0
while i < self._yield_count:
yield self._get_chunk(self._chunk_len)
i += 1
raise StopIteration
def _get_chunk(self, chunk_len):
chunk = [str(x) for x in random.randint(97, 120)]
return chunk
def __len__(self):
return self._yield_count * self._chunk_len
class DummyIterator(object):
def __init__(self, data=None):
self.hash = hashlib.md5()
self._data = data or []
self._current_item = 0
def get_md5_hash(self):
return self.hash.hexdigest()
def next(self):
if self._current_item == len(self._data):
raise StopIteration
value = self._data[self._current_item]
self.hash.update(b(value))
self._current_item += 1
return value
def __next__(self):
return self.next()
class DummyStorageDriver(StorageDriver):
"""
Dummy Storage driver.
>>> from libcloud.storage.drivers.dummy import DummyStorageDriver
>>> driver = DummyStorageDriver('key', 'secret')
>>> container = driver.create_container(container_name='test container')
>>> container
<Container: name=test container, provider=Dummy Storage Provider>
>>> container.name
'test container'
>>> container.extra['object_count']
0
"""
name = 'Dummy Storage Provider'
website = 'http://example.com'
def __init__(self, api_key, api_secret):
"""
:param api_key: API key or username to used (required)
:type api_key: ``str``
:param api_secret: Secret password to be used (required)
:type api_secret: ``str``
:rtype: ``None``
"""
self._containers = {}
def get_meta_data(self):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> driver.get_meta_data()['object_count']
0
>>> driver.get_meta_data()['container_count']
0
>>> driver.get_meta_data()['bytes_used']
0
>>> container_name = 'test container 1'
>>> container = driver.create_container(container_name=container_name)
>>> container_name = 'test container 2'
>>> container = driver.create_container(container_name=container_name)
>>> obj = container.upload_object_via_stream(
... object_name='test object', iterator=DummyFileObject(5, 10),
... extra={})
>>> driver.get_meta_data()['object_count']
1
>>> driver.get_meta_data()['container_count']
2
>>> driver.get_meta_data()['bytes_used']
50
:rtype: ``dict``
"""
container_count = len(self._containers)
object_count = sum([len(self._containers[container]['objects']) for
container in self._containers])
bytes_used = 0
for container in self._containers:
objects = self._containers[container]['objects']
for _, obj in objects.items():
bytes_used += obj.size
return {'container_count': int(container_count),
'object_count': int(object_count),
'bytes_used': int(bytes_used)}
def iterate_containers(self):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> list(driver.iterate_containers())
[]
>>> container_name = 'test container 1'
>>> container = driver.create_container(container_name=container_name)
>>> container
<Container: name=test container 1, provider=Dummy Storage Provider>
>>> container.name
'test container 1'
>>> container_name = 'test container 2'
>>> container = driver.create_container(container_name=container_name)
>>> container
<Container: name=test container 2, provider=Dummy Storage Provider>
>>> container = driver.create_container(
... container_name='test container 2')
... #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ContainerAlreadyExistsError:
>>> container_list=list(driver.iterate_containers())
>>> sorted([c.name for c in container_list])
['test container 1', 'test container 2']
@inherits: :class:`StorageDriver.iterate_containers`
"""
for container in list(self._containers.values()):
yield container['container']
def list_container_objects(self, container):
container = self.get_container(container.name)
return container.objects
def get_container(self, container_name):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> driver.get_container('unknown') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ContainerDoesNotExistError:
>>> container_name = 'test container 1'
>>> container = driver.create_container(container_name=container_name)
>>> container
<Container: name=test container 1, provider=Dummy Storage Provider>
>>> container.name
'test container 1'
>>> driver.get_container('test container 1')
<Container: name=test container 1, provider=Dummy Storage Provider>
@inherits: :class:`StorageDriver.get_container`
"""
if container_name not in self._containers:
raise ContainerDoesNotExistError(driver=self, value=None,
container_name=container_name)
return self._containers[container_name]['container']
def get_container_cdn_url(self, container):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> driver.get_container('unknown') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ContainerDoesNotExistError:
>>> container_name = 'test container 1'
>>> container = driver.create_container(container_name=container_name)
>>> container
<Container: name=test container 1, provider=Dummy Storage Provider>
>>> container.name
'test container 1'
>>> container.get_cdn_url()
'http://www.test.com/container/test_container_1'
@inherits: :class:`StorageDriver.get_container_cdn_url`
"""
if container.name not in self._containers:
raise ContainerDoesNotExistError(driver=self, value=None,
container_name=container.name)
return self._containers[container.name]['cdn_url']
def get_object(self, container_name, object_name):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> driver.get_object('unknown', 'unknown')
... #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ContainerDoesNotExistError:
>>> container_name = 'test container 1'
>>> container = driver.create_container(container_name=container_name)
>>> container
<Container: name=test container 1, provider=Dummy Storage Provider>
>>> driver.get_object(
... 'test container 1', 'unknown') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ObjectDoesNotExistError:
>>> obj = container.upload_object_via_stream(object_name='test object',
... iterator=DummyFileObject(5, 10), extra={})
>>> obj.name
'test object'
>>> obj.size
50
@inherits: :class:`StorageDriver.get_object`
"""
self.get_container(container_name)
container_objects = self._containers[container_name]['objects']
if object_name not in container_objects:
raise ObjectDoesNotExistError(object_name=object_name, value=None,
driver=self)
return container_objects[object_name]
def get_object_cdn_url(self, obj):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> container_name = 'test container 1'
>>> container = driver.create_container(container_name=container_name)
>>> container
<Container: name=test container 1, provider=Dummy Storage Provider>
>>> obj = container.upload_object_via_stream(
... object_name='test object 5',
... iterator=DummyFileObject(5, 10), extra={})
>>> obj.name
'test object 5'
>>> obj.get_cdn_url()
'http://www.test.com/object/test_object_5'
@inherits: :class:`StorageDriver.get_object_cdn_url`
"""
container_name = obj.container.name
container_objects = self._containers[container_name]['objects']
if obj.name not in container_objects:
raise ObjectDoesNotExistError(object_name=obj.name, value=None,
driver=self)
return container_objects[obj.name].meta_data['cdn_url']
def create_container(self, container_name):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> container_name = 'test container 1'
>>> container = driver.create_container(container_name=container_name)
>>> container
<Container: name=test container 1, provider=Dummy Storage Provider>
>>> container = driver.create_container(
... container_name='test container 1')
... #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ContainerAlreadyExistsError:
@inherits: :class:`StorageDriver.create_container`
"""
if container_name in self._containers:
raise ContainerAlreadyExistsError(container_name=container_name,
value=None, driver=self)
extra = {'object_count': 0}
container = Container(name=container_name, extra=extra, driver=self)
self._containers[container_name] = {'container': container,
'objects': {},
'cdn_url':
'http://www.test.com/container/%s'
%
(container_name.replace(' ', '_'))
}
return container
def delete_container(self, container):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> container = Container(name = 'test container',
... extra={'object_count': 0}, driver=driver)
>>> driver.delete_container(container=container)
... #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ContainerDoesNotExistError:
>>> container = driver.create_container(
... container_name='test container 1')
... #doctest: +IGNORE_EXCEPTION_DETAIL
>>> len(driver._containers)
1
>>> driver.delete_container(container=container)
True
>>> len(driver._containers)
0
>>> container = driver.create_container(
... container_name='test container 1')
... #doctest: +IGNORE_EXCEPTION_DETAIL
>>> obj = container.upload_object_via_stream(
... object_name='test object', iterator=DummyFileObject(5, 10),
... extra={})
>>> driver.delete_container(container=container)
... #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ContainerIsNotEmptyError:
@inherits: :class:`StorageDriver.delete_container`
"""
container_name = container.name
if container_name not in self._containers:
raise ContainerDoesNotExistError(container_name=container_name,
value=None, driver=self)
container = self._containers[container_name]
if len(container['objects']) > 0:
raise ContainerIsNotEmptyError(container_name=container_name,
value=None, driver=self)
del self._containers[container_name]
return True
def download_object(self, obj, destination_path, overwrite_existing=False,
delete_on_failure=True):
kwargs_dict = {'obj': obj,
'response': DummyFileObject(),
'destination_path': destination_path,
'overwrite_existing': overwrite_existing,
'delete_on_failure': delete_on_failure}
return self._save_object(**kwargs_dict)
def download_object_as_stream(self, obj, chunk_size=None):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> container = driver.create_container(
... container_name='test container 1')
... #doctest: +IGNORE_EXCEPTION_DETAIL
>>> obj = container.upload_object_via_stream(object_name='test object',
... iterator=DummyFileObject(5, 10), extra={})
>>> stream = container.download_object_as_stream(obj)
>>> stream #doctest: +ELLIPSIS
<...closed...>
@inherits: :class:`StorageDriver.download_object_as_stream`
"""
return DummyFileObject()
def upload_object(self, file_path, container, object_name, extra=None,
file_hash=None):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> container_name = 'test container 1'
>>> container = driver.create_container(container_name=container_name)
>>> container.upload_object(file_path='/tmp/inexistent.file',
... object_name='test') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
LibcloudError:
>>> file_path = path = os.path.abspath(__file__)
>>> file_size = os.path.getsize(file_path)
>>> obj = container.upload_object(file_path=file_path,
... object_name='test')
>>> obj #doctest: +ELLIPSIS
<Object: name=test, size=...>
>>> obj.size == file_size
True
@inherits: :class:`StorageDriver.upload_object`
:param file_hash: File hash
:type file_hash: ``str``
"""
if not os.path.exists(file_path):
raise LibcloudError(value='File %s does not exist' % (file_path),
driver=self)
size = os.path.getsize(file_path)
return self._add_object(container=container, object_name=object_name,
size=size, extra=extra)
def upload_object_via_stream(self, iterator, container,
object_name, extra=None):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> container = driver.create_container(
... container_name='test container 1')
... #doctest: +IGNORE_EXCEPTION_DETAIL
>>> obj = container.upload_object_via_stream(
... object_name='test object', iterator=DummyFileObject(5, 10),
... extra={})
>>> obj #doctest: +ELLIPSIS
<Object: name=test object, size=50, ...>
@inherits: :class:`StorageDriver.upload_object_via_stream`
"""
size = len(iterator)
return self._add_object(container=container, object_name=object_name,
size=size, extra=extra)
def delete_object(self, obj):
"""
>>> driver = DummyStorageDriver('key', 'secret')
>>> container = driver.create_container(
... container_name='test container 1')
... #doctest: +IGNORE_EXCEPTION_DETAIL
>>> obj = container.upload_object_via_stream(object_name='test object',
... iterator=DummyFileObject(5, 10), extra={})
>>> obj #doctest: +ELLIPSIS
<Object: name=test object, size=50, ...>
>>> container.delete_object(obj=obj)
True
>>> obj = Object(name='test object 2',
... size=1000, hash=None, extra=None,
... meta_data=None, container=container,driver=None)
>>> container.delete_object(obj=obj) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ObjectDoesNotExistError:
@inherits: :class:`StorageDriver.delete_object`
"""
container_name = obj.container.name
object_name = obj.name
obj = self.get_object(container_name=container_name,
object_name=object_name)
del self._containers[container_name]['objects'][object_name]
return True
def _add_object(self, container, object_name, size, extra=None):
container = self.get_container(container.name)
extra = extra or {}
meta_data = extra.get('meta_data', {})
meta_data.update({'cdn_url': 'http://www.test.com/object/%s' %
(object_name.replace(' ', '_'))})
obj = Object(name=object_name, size=size, extra=extra, hash=None,
meta_data=meta_data, container=container, driver=self)
self._containers[container.name]['objects'][object_name] = obj
return obj
if __name__ == "__main__":
import doctest
doctest.testmod()
| apache-2.0 |
sourcesimian/MailSort | mailsort/resources/__init__.py | 1 | 1273 |
def write_resource(resource, filename):
from pkg_resources import Requirement, resource_stream
with open(filename, 'wt') as fh:
file = resource_stream(Requirement.parse("mailsort"), resource)
fh.write(file.read())
def setup_user():
import os
did_setup = False
config_root = os.path.expanduser('~/.config/mailsort/filters')
if not os.path.exists(config_root):
os.makedirs(config_root)
did_setup = True
print "* Config dir created: ~/.config/mailsort"
creds_file = os.path.expanduser('~/.config/mailsort/creds.py')
if not os.path.exists(creds_file):
write_resource("mailsort/resources/creds.py", creds_file)
did_setup = True
print "* Default credentials script: ~/.config/mailsort/creds.py"
print " - You need to edit this to supply the required credentials"
filters_file = os.path.expanduser('~/.config/mailsort/filters/spam.py')
if not os.path.exists(filters_file):
write_resource("mailsort/resources/spam.py", filters_file)
did_setup = True
print "* Example filter script: ~/.config/mailsort/filters/spam.py.example"
print " - This is where and what your filters should look like"
if did_setup:
exit(1)
| mit |
CMUSV-VisTrails/WorkflowRecommendation | vistrails/core/mashup/alias.py | 1 | 6306 | ###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from core.mashup import XMLObject
from core.mashup.component import Component
from core.system import get_elementtree_library
ElementTree = get_elementtree_library()
################################################################################
class Alias(XMLObject):
def __init__(self, id, name, component=None):
self.id = id
self.name = name
self.component = component
def __copy__(self):
return Alias.doCopy(self)
def doCopy(self, new_ids=False, id_scope=None, id_remap=None):
"""doCopy() -> Alias
returns a clone of itself"""
cp = Alias(id=self.id, name=self.name)
cp.component = self.component.doCopy(new_ids, id_scope, id_remap)
# set new ids
if new_ids:
new_id = id_scope.getNewId('alias')
if 'alias' in id_scope.remap:
id_remap[(id_scope.remap['alias'], self.id)] = new_id
else:
id_remap[('alias', self.id)] = new_id
cp.id = new_id
return cp
##########################################################################
# Serialization / Unserialization
def toXml(self, node=None):
"""toXml(node: ElementTree.Element) -> ElementTree.Element
writes itself to xml
"""
if node is None:
node = ElementTree.Element('alias')
#set attributes
node.set('id', self.convert_to_str(self.id,'long'))
node.set('name', self.convert_to_str(self.name,'str'))
child_ = ElementTree.SubElement(node, 'component')
self.component.toXml(child_)
return node
@staticmethod
def fromXml(node):
if node.tag != 'alias':
return None
#read attributes
data = node.get('id', None)
id = Alias.convert_from_str(data, 'long')
data = node.get('name', None)
name = Alias.convert_from_str(data, 'str')
for child in node.getchildren():
if child.tag == "component":
component = Component.fromXml(child)
alias = Alias(id,name,component)
return alias
##########################################################################
# Operators
def __str__(self):
""" __str__() -> str - Returns a string representation of itself """
return ("(Alias id='%s' name='%s' component=%s)@%X" %
(self.id,
self.name,
self.component,
id(self)))
def __eq__(self, other):
""" __eq__(other: Alias) -> boolean
Returns True if self and other have the same attributes. Used by ==
operator.
"""
if type(self) != type(other):
return False
if self.name != other.name:
return False
if self.component != other.component:
return False
return True
def __ne__(self, other):
""" __ne__(other: Component) -> boolean
Returns True if self and other don't have the same attributes.
Used by != operator.
"""
return not self.__eq__(other)
################################################################################
import unittest
from db.domain import IdScope
import copy
class TestAlias(unittest.TestCase):
def create_alias(self, id_scope=IdScope()):
c1 = Component(id=id_scope.getNewId('component'),
vttype='parameter', param_id=15L,
parent_vttype='function', parent_id=3L, mid=4L,
type='String', value='test', p_pos=0, pos=1,
strvaluelist='test1,test2', widget="text")
a1 = Alias(id=id_scope.getNewId('alias'), name='alias1', component=c1)
return a1
def test_copy(self):
id_scope = IdScope()
a1 = self.create_alias(id_scope)
a2 = copy.copy(a1)
self.assertEqual(a1,a2)
self.assertEqual(a1.id, a2.id)
a3 = a2.doCopy(True, id_scope, {})
self.assertEqual(a1,a3)
self.assertNotEqual(a1.id, a3.id)
def test_serialization(self):
a1 = self.create_alias()
node = a1.toXml()
a2 = Alias.fromXml(node)
self.assertEqual(a1, a2)
self.assertEqual(a1.id, a2.id)
def test_str(self):
a1 = self.create_alias()
str(a1)
| bsd-3-clause |
SartoNess/BitcoinUnlimited | qa/rpc-tests/mempool_limit.py | 2 | 2341 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2015-2016 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test mempool limiting together/eviction with the wallet
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class MempoolLimitTest(BitcoinTestFramework):
def __init__(self):
self.txouts = gen_return_txouts()
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxmempool=5", "-spendzeroconfchange=0", "-debug"]))
self.is_network_split = False
self.sync_all()
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 2)
def run_test(self):
txids = []
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], 90)
#create a mempool tx that will be evicted
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
self.nodes[0].settxfee(self.relayfee) # specifically fund this tx with low fee
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0) # return to automatic fee selection
txFS = self.nodes[0].signrawtransaction(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
self.nodes[0].lockunspent(True, [us0])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in xrange (4):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[30*i:30*i+30], (i+1)*base_fee)
# by now, the tx should be evicted, check confirmation state
assert(txid not in self.nodes[0].getrawmempool())
txdata = self.nodes[0].gettransaction(txid)
assert(txdata['confirmations'] == 0) #confirmation should still be 0
if __name__ == '__main__':
MempoolLimitTest().main()
| mit |
clumsy/intellij-community | python/helpers/profiler/profilerpy3/ttypes.py | 45 | 19680 | #
# Autogenerated by Thrift Compiler (1.0.0-dev)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thriftpy3.Thrift import TType, TMessageType, TException, TApplicationException
from thriftpy3.transport import TTransport
from thriftpy3.protocol import TBinaryProtocol, TProtocol
try:
from thriftpy3.protocol import fastbinary
except:
fastbinary = None
class FuncStat:
"""
Attributes:
- file
- line
- func_name
- calls_count
- total_time
- own_time
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'file', None, None, ), # 1
(2, TType.I32, 'line', None, None, ), # 2
(3, TType.STRING, 'func_name', None, None, ), # 3
(4, TType.I32, 'calls_count', None, None, ), # 4
(5, TType.DOUBLE, 'total_time', None, None, ), # 5
(6, TType.DOUBLE, 'own_time', None, None, ), # 6
)
def __init__(self, file=None, line=None, func_name=None, calls_count=None, total_time=None, own_time=None,):
self.file = file
self.line = line
self.func_name = func_name
self.calls_count = calls_count
self.total_time = total_time
self.own_time = own_time
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.file = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.line = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.func_name = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.calls_count = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.DOUBLE:
self.total_time = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.DOUBLE:
self.own_time = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('FuncStat')
if self.file is not None:
oprot.writeFieldBegin('file', TType.STRING, 1)
oprot.writeString(self.file)
oprot.writeFieldEnd()
if self.line is not None:
oprot.writeFieldBegin('line', TType.I32, 2)
oprot.writeI32(self.line)
oprot.writeFieldEnd()
if self.func_name is not None:
oprot.writeFieldBegin('func_name', TType.STRING, 3)
oprot.writeString(self.func_name)
oprot.writeFieldEnd()
if self.calls_count is not None:
oprot.writeFieldBegin('calls_count', TType.I32, 4)
oprot.writeI32(self.calls_count)
oprot.writeFieldEnd()
if self.total_time is not None:
oprot.writeFieldBegin('total_time', TType.DOUBLE, 5)
oprot.writeDouble(self.total_time)
oprot.writeFieldEnd()
if self.own_time is not None:
oprot.writeFieldBegin('own_time', TType.DOUBLE, 6)
oprot.writeDouble(self.own_time)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.file is None:
raise TProtocol.TProtocolException(message='Required field file is unset!')
if self.func_name is None:
raise TProtocol.TProtocolException(message='Required field func_name is unset!')
if self.calls_count is None:
raise TProtocol.TProtocolException(message='Required field calls_count is unset!')
if self.total_time is None:
raise TProtocol.TProtocolException(message='Required field total_time is unset!')
if self.own_time is None:
raise TProtocol.TProtocolException(message='Required field own_time is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.file)
value = (value * 31) ^ hash(self.line)
value = (value * 31) ^ hash(self.func_name)
value = (value * 31) ^ hash(self.calls_count)
value = (value * 31) ^ hash(self.total_time)
value = (value * 31) ^ hash(self.own_time)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Function:
"""
Attributes:
- func_stat
- callers
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'func_stat', (FuncStat, FuncStat.thrift_spec), None, ), # 1
(2, TType.LIST, 'callers', (TType.STRUCT,(FuncStat, FuncStat.thrift_spec)), None, ), # 2
)
def __init__(self, func_stat=None, callers=None,):
self.func_stat = func_stat
self.callers = callers
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.func_stat = FuncStat()
self.func_stat.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.callers = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in range(_size0):
_elem5 = FuncStat()
_elem5.read(iprot)
self.callers.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Function')
if self.func_stat is not None:
oprot.writeFieldBegin('func_stat', TType.STRUCT, 1)
self.func_stat.write(oprot)
oprot.writeFieldEnd()
if self.callers is not None:
oprot.writeFieldBegin('callers', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.callers))
for iter6 in self.callers:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.func_stat is None:
raise TProtocol.TProtocolException(message='Required field func_stat is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.func_stat)
value = (value * 31) ^ hash(self.callers)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Stats:
"""
Attributes:
- func_stats
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'func_stats', (TType.STRUCT,(Function, Function.thrift_spec)), None, ), # 1
)
def __init__(self, func_stats=None,):
self.func_stats = func_stats
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.func_stats = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in range(_size7):
_elem12 = Function()
_elem12.read(iprot)
self.func_stats.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Stats')
if self.func_stats is not None:
oprot.writeFieldBegin('func_stats', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.func_stats))
for iter13 in self.func_stats:
iter13.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.func_stats is None:
raise TProtocol.TProtocolException(message='Required field func_stats is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.func_stats)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Stats_Req:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Stats_Req')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SaveSnapshot_Req:
"""
Attributes:
- filepath
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'filepath', None, None, ), # 1
)
def __init__(self, filepath=None,):
self.filepath = filepath
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.filepath = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('SaveSnapshot_Req')
if self.filepath is not None:
oprot.writeFieldBegin('filepath', TType.STRING, 1)
oprot.writeString(self.filepath)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.filepath is None:
raise TProtocol.TProtocolException(message='Required field filepath is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.filepath)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ProfilerRequest:
"""
Attributes:
- id
- ystats
- save_snapshot
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'id', None, None, ), # 1
(2, TType.STRUCT, 'ystats', (Stats_Req, Stats_Req.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'save_snapshot', (SaveSnapshot_Req, SaveSnapshot_Req.thrift_spec), None, ), # 3
)
def __init__(self, id=None, ystats=None, save_snapshot=None,):
self.id = id
self.ystats = ystats
self.save_snapshot = save_snapshot
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ystats = Stats_Req()
self.ystats.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.save_snapshot = SaveSnapshot_Req()
self.save_snapshot.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ProfilerRequest')
if self.id is not None:
oprot.writeFieldBegin('id', TType.I32, 1)
oprot.writeI32(self.id)
oprot.writeFieldEnd()
if self.ystats is not None:
oprot.writeFieldBegin('ystats', TType.STRUCT, 2)
self.ystats.write(oprot)
oprot.writeFieldEnd()
if self.save_snapshot is not None:
oprot.writeFieldBegin('save_snapshot', TType.STRUCT, 3)
self.save_snapshot.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id is None:
raise TProtocol.TProtocolException(message='Required field id is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
value = (value * 31) ^ hash(self.ystats)
value = (value * 31) ^ hash(self.save_snapshot)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ProfilerResponse:
"""
Attributes:
- id
- ystats
- snapshot_filepath
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'id', None, None, ), # 1
(2, TType.STRUCT, 'ystats', (Stats, Stats.thrift_spec), None, ), # 2
(3, TType.STRING, 'snapshot_filepath', None, None, ), # 3
)
def __init__(self, id=None, ystats=None, snapshot_filepath=None,):
self.id = id
self.ystats = ystats
self.snapshot_filepath = snapshot_filepath
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ystats = Stats()
self.ystats.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.snapshot_filepath = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ProfilerResponse')
if self.id is not None:
oprot.writeFieldBegin('id', TType.I32, 1)
oprot.writeI32(self.id)
oprot.writeFieldEnd()
if self.ystats is not None:
oprot.writeFieldBegin('ystats', TType.STRUCT, 2)
self.ystats.write(oprot)
oprot.writeFieldEnd()
if self.snapshot_filepath is not None:
oprot.writeFieldBegin('snapshot_filepath', TType.STRING, 3)
oprot.writeString(self.snapshot_filepath)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id is None:
raise TProtocol.TProtocolException(message='Required field id is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
value = (value * 31) ^ hash(self.ystats)
value = (value * 31) ^ hash(self.snapshot_filepath)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| apache-2.0 |
ontana/Plan-job | plan/exceptions.py | 5 | 1317 | # -*- coding: utf-8 -*-
"""
plan.exceptions
~~~~~~~~~~~~~~~
Plan exceptions.
:copyright: (c) 2014 by Shipeng Feng.
:license: BSD, see LICENSE for more details.
"""
from ._compat import text_type, PY2
class BaseError(Exception):
"""Baseclass for all Plan errors."""
if PY2:
def __init__(self, message=None):
if message is not None:
message = text_type(message).encode('utf-8')
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message.decode('utf-8', 'replace')
def __unicode__(self):
return self.message or u''
else:
def __init__(self, message=None):
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message
class PlanError(BaseError):
"""Plan error.
.. versionadded:: 0.4
"""
class ParseError(BaseError):
"""Plan job every and at value parse error."""
class ValidationError(BaseError):
"""Plan job every and at value validation error."""
| bsd-3-clause |
kanghtta/zerorpc-python | zerorpc/core.py | 53 | 15303 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import traceback
import gevent.pool
import gevent.queue
import gevent.event
import gevent.local
import gevent.lock
import gevent_zmq as zmq
from .exceptions import TimeoutExpired, RemoteError, LostRemote
from .channel import ChannelMultiplexer, BufferedChannel
from .socket import SocketBase
from .heartbeat import HeartBeatOnChannel
from .context import Context
from .decorators import DecoratorBase, rep
import patterns
from logging import getLogger
logger = getLogger(__name__)
class ServerBase(object):
def __init__(self, channel, methods=None, name=None, context=None,
pool_size=None, heartbeat=5):
self._multiplexer = ChannelMultiplexer(channel)
if methods is None:
methods = self
self._context = context or Context.get_instance()
self._name = name or self._extract_name()
self._task_pool = gevent.pool.Pool(size=pool_size)
self._acceptor_task = None
self._methods = self._filter_methods(ServerBase, self, methods)
self._inject_builtins()
self._heartbeat_freq = heartbeat
for (k, functor) in self._methods.items():
if not isinstance(functor, DecoratorBase):
self._methods[k] = rep(functor)
@staticmethod
def _filter_methods(cls, self, methods):
if hasattr(methods, '__getitem__'):
return methods
server_methods = set(getattr(self, k) for k in dir(cls) if not
k.startswith('_'))
return dict((k, getattr(methods, k))
for k in dir(methods)
if (callable(getattr(methods, k))
and not k.startswith('_')
and getattr(methods, k) not in server_methods
))
@staticmethod
def _extract_name(methods):
return getattr(type(methods), '__name__', None) or repr(methods)
def close(self):
self.stop()
self._multiplexer.close()
def _format_args_spec(self, args_spec, r=None):
if args_spec:
r = [dict(name=name) for name in args_spec[0]]
default_values = args_spec[3]
if default_values is not None:
for arg, def_val in zip(reversed(r), reversed(default_values)):
arg['default'] = def_val
return r
def _zerorpc_inspect(self):
methods = dict((m, f) for m, f in self._methods.items()
if not m.startswith('_'))
detailled_methods = dict((m,
dict(args=self._format_args_spec(f._zerorpc_args()),
doc=f._zerorpc_doc())) for (m, f) in methods.items())
return {'name': self._name,
'methods': detailled_methods}
def _inject_builtins(self):
self._methods['_zerorpc_list'] = lambda: [m for m in self._methods
if not m.startswith('_')]
self._methods['_zerorpc_name'] = lambda: self._name
self._methods['_zerorpc_ping'] = lambda: ['pong', self._name]
self._methods['_zerorpc_help'] = lambda m: \
self._methods[m]._zerorpc_doc()
self._methods['_zerorpc_args'] = \
lambda m: self._methods[m]._zerorpc_args()
self._methods['_zerorpc_inspect'] = self._zerorpc_inspect
def __call__(self, method, *args):
if method not in self._methods:
raise NameError(method)
return self._methods[method](*args)
def _print_traceback(self, protocol_v1, exc_infos):
logger.exception('')
exc_type, exc_value, exc_traceback = exc_infos
if protocol_v1:
return (repr(exc_value),)
human_traceback = traceback.format_exc()
name = exc_type.__name__
human_msg = str(exc_value)
return (name, human_msg, human_traceback)
def _async_task(self, initial_event):
protocol_v1 = initial_event.header.get('v', 1) < 2
channel = self._multiplexer.channel(initial_event)
hbchan = HeartBeatOnChannel(channel, freq=self._heartbeat_freq,
passive=protocol_v1)
bufchan = BufferedChannel(hbchan)
exc_infos = None
event = bufchan.recv()
try:
self._context.hook_load_task_context(event.header)
functor = self._methods.get(event.name, None)
if functor is None:
raise NameError(event.name)
functor.pattern.process_call(self._context, bufchan, event, functor)
except LostRemote:
exc_infos = list(sys.exc_info())
self._print_traceback(protocol_v1, exc_infos)
except Exception:
exc_infos = list(sys.exc_info())
human_exc_infos = self._print_traceback(protocol_v1, exc_infos)
reply_event = bufchan.create_event('ERR', human_exc_infos,
self._context.hook_get_task_context())
self._context.hook_server_inspect_exception(event, reply_event, exc_infos)
bufchan.emit_event(reply_event)
finally:
del exc_infos
bufchan.close()
def _acceptor(self):
while True:
initial_event = self._multiplexer.recv()
self._task_pool.spawn(self._async_task, initial_event)
def run(self):
self._acceptor_task = gevent.spawn(self._acceptor)
try:
self._acceptor_task.get()
finally:
self.stop()
self._task_pool.join(raise_error=True)
def stop(self):
if self._acceptor_task is not None:
self._acceptor_task.kill()
self._acceptor_task = None
class ClientBase(object):
def __init__(self, channel, context=None, timeout=30, heartbeat=5,
passive_heartbeat=False):
self._multiplexer = ChannelMultiplexer(channel,
ignore_broadcast=True)
self._context = context or Context.get_instance()
self._timeout = timeout
self._heartbeat_freq = heartbeat
self._passive_heartbeat = passive_heartbeat
def close(self):
self._multiplexer.close()
def _handle_remote_error(self, event):
exception = self._context.hook_client_handle_remote_error(event)
if not exception:
if event.header.get('v', 1) >= 2:
(name, msg, traceback) = event.args
exception = RemoteError(name, msg, traceback)
else:
(msg,) = event.args
exception = RemoteError('RemoteError', msg, None)
return exception
def _select_pattern(self, event):
for pattern in patterns.patterns_list:
if pattern.accept_answer(event):
return pattern
msg = 'Unable to find a pattern for: {0}'.format(event)
raise RuntimeError(msg)
def _process_response(self, request_event, bufchan, timeout):
try:
reply_event = bufchan.recv(timeout)
pattern = self._select_pattern(reply_event)
return pattern.process_answer(self._context, bufchan, request_event,
reply_event, self._handle_remote_error)
except TimeoutExpired:
bufchan.close()
ex = TimeoutExpired(timeout,
'calling remote method {0}'.format(request_event.name))
self._context.hook_client_after_request(request_event, None, ex)
raise ex
except:
bufchan.close()
raise
def __call__(self, method, *args, **kargs):
timeout = kargs.get('timeout', self._timeout)
channel = self._multiplexer.channel()
hbchan = HeartBeatOnChannel(channel, freq=self._heartbeat_freq,
passive=self._passive_heartbeat)
bufchan = BufferedChannel(hbchan, inqueue_size=kargs.get('slots', 100))
xheader = self._context.hook_get_task_context()
request_event = bufchan.create_event(method, args, xheader)
self._context.hook_client_before_request(request_event)
bufchan.emit_event(request_event)
try:
if kargs.get('async', False) is False:
return self._process_response(request_event, bufchan, timeout)
async_result = gevent.event.AsyncResult()
gevent.spawn(self._process_response, request_event, bufchan,
timeout).link(async_result)
return async_result
except:
# XXX: This is going to be closed twice if async is false and
# _process_response raises an exception. I wonder if the above
# async branch can raise an exception too, if no we can just remove
# this code.
bufchan.close()
raise
def __getattr__(self, method):
return lambda *args, **kargs: self(method, *args, **kargs)
class Server(SocketBase, ServerBase):
def __init__(self, methods=None, name=None, context=None, pool_size=None,
heartbeat=5):
SocketBase.__init__(self, zmq.ROUTER, context)
if methods is None:
methods = self
name = name or ServerBase._extract_name(methods)
methods = ServerBase._filter_methods(Server, self, methods)
ServerBase.__init__(self, self._events, methods, name, context,
pool_size, heartbeat)
def close(self):
ServerBase.close(self)
SocketBase.close(self)
class Client(SocketBase, ClientBase):
def __init__(self, connect_to=None, context=None, timeout=30, heartbeat=5,
passive_heartbeat=False):
SocketBase.__init__(self, zmq.DEALER, context=context)
ClientBase.__init__(self, self._events, context, timeout, heartbeat,
passive_heartbeat)
if connect_to:
self.connect(connect_to)
def close(self):
ClientBase.close(self)
SocketBase.close(self)
class Pusher(SocketBase):
def __init__(self, context=None, zmq_socket=zmq.PUSH):
super(Pusher, self).__init__(zmq_socket, context=context)
def __call__(self, method, *args):
self._events.emit(method, args,
self._context.hook_get_task_context())
def __getattr__(self, method):
return lambda *args: self(method, *args)
class Puller(SocketBase):
def __init__(self, methods=None, context=None, zmq_socket=zmq.PULL):
super(Puller, self).__init__(zmq_socket, context=context)
if methods is None:
methods = self
self._methods = ServerBase._filter_methods(Puller, self, methods)
self._receiver_task = None
def close(self):
self.stop()
super(Puller, self).close()
def __call__(self, method, *args):
if method not in self._methods:
raise NameError(method)
return self._methods[method](*args)
def _receiver(self):
while True:
event = self._events.recv()
try:
if event.name not in self._methods:
raise NameError(event.name)
self._context.hook_load_task_context(event.header)
self._context.hook_server_before_exec(event)
self._methods[event.name](*event.args)
# In Push/Pull their is no reply to send, hence None for the
# reply_event argument
self._context.hook_server_after_exec(event, None)
except Exception:
exc_infos = sys.exc_info()
try:
logger.exception('')
self._context.hook_server_inspect_exception(event, None, exc_infos)
finally:
del exc_infos
def run(self):
self._receiver_task = gevent.spawn(self._receiver)
try:
self._receiver_task.get()
finally:
self._receiver_task = None
def stop(self):
if self._receiver_task is not None:
self._receiver_task.kill(block=False)
class Publisher(Pusher):
def __init__(self, context=None):
super(Publisher, self).__init__(context=context, zmq_socket=zmq.PUB)
class Subscriber(Puller):
def __init__(self, methods=None, context=None):
super(Subscriber, self).__init__(methods=methods, context=context,
zmq_socket=zmq.SUB)
self._events.setsockopt(zmq.SUBSCRIBE, '')
def fork_task_context(functor, context=None):
'''Wrap a functor to transfer context.
Usage example:
gevent.spawn(zerorpc.fork_task_context(myfunction), args...)
The goal is to permit context "inheritance" from a task to another.
Consider the following example:
zerorpc.Server receive a new event
- task1 is created to handle this event this task will be linked
to the initial event context. zerorpc.Server does that for you.
- task1 make use of some zerorpc.Client instances, the initial
event context is transfered on every call.
- task1 spawn a new task2.
- task2 make use of some zerorpc.Client instances, it's a fresh
context. Thus there is no link to the initial context that
spawned task1.
- task1 spawn a new fork_task_context(task3).
- task3 make use of some zerorpc.Client instances, the initial
event context is transfered on every call.
A real use case is a distributed tracer. Each time a new event is
created, a trace_id is injected in it or copied from the current task
context. This permit passing the trace_id from a zerorpc.Server to
another via zerorpc.Client.
The simple rule to know if a task need to be wrapped is:
- if the new task will make any zerorpc call, it should be wrapped.
'''
context = context or Context.get_instance()
header = context.hook_get_task_context()
def wrapped(*args, **kargs):
context.hook_load_task_context(header)
return functor(*args, **kargs)
return wrapped
| mit |
xfournet/intellij-community | plugins/hg4idea/testData/bin/hgext/factotum.py | 91 | 4221 | # factotum.py - Plan 9 factotum integration for Mercurial
#
# Copyright (C) 2012 Steven Stallion <sstallion@gmail.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''http authentication with factotum
This extension allows the factotum(4) facility on Plan 9 from Bell Labs
platforms to provide authentication information for HTTP access. Configuration
entries specified in the auth section as well as authentication information
provided in the repository URL are fully supported. If no prefix is specified,
a value of "*" will be assumed.
By default, keys are specified as::
proto=pass service=hg prefix=<prefix> user=<username> !password=<password>
If the factotum extension is unable to read the required key, one will be
requested interactively.
A configuration section is available to customize runtime behavior. By
default, these entries are::
[factotum]
executable = /bin/auth/factotum
mountpoint = /mnt/factotum
service = hg
The executable entry defines the full path to the factotum binary. The
mountpoint entry defines the path to the factotum file service. Lastly, the
service entry controls the service name used when reading keys.
'''
from mercurial.i18n import _
from mercurial.url import passwordmgr
from mercurial import httpconnection, util
import os, urllib2
ERRMAX = 128
def auth_getkey(self, params):
if not self.ui.interactive():
raise util.Abort(_('factotum not interactive'))
if 'user=' not in params:
params = '%s user?' % params
params = '%s !password?' % params
os.system("%s -g '%s'" % (_executable, params))
def auth_getuserpasswd(self, getkey, params):
params = 'proto=pass %s' % params
while True:
fd = os.open('%s/rpc' % _mountpoint, os.O_RDWR)
try:
try:
os.write(fd, 'start %s' % params)
l = os.read(fd, ERRMAX).split()
if l[0] == 'ok':
os.write(fd, 'read')
l = os.read(fd, ERRMAX).split()
if l[0] == 'ok':
return l[1:]
except (OSError, IOError):
raise util.Abort(_('factotum not responding'))
finally:
os.close(fd)
getkey(self, params)
def monkeypatch_method(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
@monkeypatch_method(passwordmgr)
def find_user_password(self, realm, authuri):
user, passwd = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
self, realm, authuri)
if user and passwd:
self._writedebug(user, passwd)
return (user, passwd)
prefix = ''
res = httpconnection.readauthforuri(self.ui, authuri, user)
if res:
_, auth = res
prefix = auth.get('prefix')
user, passwd = auth.get('username'), auth.get('password')
if not user or not passwd:
if not prefix:
prefix = '*'
params = 'service=%s prefix=%s' % (_service, prefix)
if user:
params = '%s user=%s' % (params, user)
user, passwd = auth_getuserpasswd(self, auth_getkey, params)
self.add_password(realm, authuri, user, passwd)
self._writedebug(user, passwd)
return (user, passwd)
def uisetup(ui):
global _executable
_executable = ui.config('factotum', 'executable', '/bin/auth/factotum')
global _mountpoint
_mountpoint = ui.config('factotum', 'mountpoint', '/mnt/factotum')
global _service
_service = ui.config('factotum', 'service', 'hg')
| apache-2.0 |
nasirali1/CerebralCortex | cerebralcortex/data_processor/cStress.py | 1 | 6648 | # Copyright (c) 2016, MD2K Center of Excellence
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from pyspark import RDD
from cerebralcortex.data_processor.feature.ecg import ecg_feature_computation
from cerebralcortex.data_processor.feature.rip import rip_feature_computation
from cerebralcortex.data_processor.signalprocessing import rip
from cerebralcortex.data_processor.signalprocessing.accelerometer import accelerometer_features
from cerebralcortex.data_processor.signalprocessing.alignment import timestamp_correct, autosense_sequence_align
from cerebralcortex.data_processor.signalprocessing.dataquality import compute_outlier_ecg
from cerebralcortex.data_processor.signalprocessing.dataquality import ecg_data_quality
from cerebralcortex.data_processor.signalprocessing.dataquality import rip_data_quality
from cerebralcortex.data_processor.signalprocessing.ecg import compute_rr_intervals
def fix_two_joins(nested_data):
key = nested_data[0]
base_value = nested_data[1][0]
new_value = (nested_data[1][1],)
return key, base_value + new_value
def join_feature_vector(*argsrdd):
"""
:param argsrdd: Input feature dataStreams
:return: Joined feature vector
"""
frdd = argsrdd[0]
if len(argsrdd) > 1:
for erdd in range(len(argsrdd) - 1):
frdd = frdd.join(argsrdd[erdd + 1])
return frdd
else:
return frdd
def cStress(rdd: RDD) -> RDD:
# TODO: TWH Temporary
ecg_sampling_frequency = 64.0
rip_sampling_frequency = 64.0
accel_sampling_frequency = 64.0 / 6.0
# Timestamp correct datastreams
ecg_corrected = rdd.map(lambda ds: (
ds['participant'], timestamp_correct(datastream=ds['ecg'], sampling_frequency=ecg_sampling_frequency)))
rip_corrected = rdd.map(lambda ds: (
ds['participant'], timestamp_correct(datastream=ds['rip'], sampling_frequency=rip_sampling_frequency)))
accelx_corrected = rdd.map(lambda ds: (
ds['participant'], timestamp_correct(datastream=ds['accelx'], sampling_frequency=accel_sampling_frequency)))
accely_corrected = rdd.map(lambda ds: (
ds['participant'], timestamp_correct(datastream=ds['accely'], sampling_frequency=accel_sampling_frequency)))
accelz_corrected = rdd.map(lambda ds: (
ds['participant'], timestamp_correct(datastream=ds['accelz'], sampling_frequency=accel_sampling_frequency)))
ecg_quality = ecg_corrected.map(lambda ds: (ds[0], ecg_data_quality(ds[1])))
rip_quality = rip_corrected.map(lambda ds: (ds[0], rip_data_quality(ds[1])))
accel_group = accelx_corrected.join(accely_corrected).join(accelz_corrected).map(fix_two_joins)
accel = accel_group.map(lambda ds: (ds[0], autosense_sequence_align(datastreams=[ds[1][0], ds[1][1], ds[1][2]],
sampling_frequency=accel_sampling_frequency)))
# Accelerometer Feature Computation
accel_features = accel.map(lambda ds: (ds[0], accelerometer_features(ds[1], window_length=10.0)))
# windowed_accel_features = accel_features.map(lambda ds: (ds[0], window_accel(ds[1], window_size=60)))
rip_corrected_and_quality = rip_corrected.join(rip_quality)
# rip features
peak_valley = rip_corrected_and_quality.map(
lambda ds: (ds[0], rip.compute_peak_valley(rip=ds[1][0], rip_quality=ds[1][1])))
rip_cycle_features = peak_valley.map(lambda ds: (ds[0], rip_feature_computation(ds[1][0])))
windowed_rip_features = rip_cycle_features.map(lambda ds: (ds[0], window_rip(inspiration_duration=ds[1][0],
expiration_duration=ds[1][1], ...,
window_size=60)))
ecg_corrected_and_quality = ecg_corrected.join(ecg_quality)
# r-peak datastream computation
ecg_rr_rdd = ecg_corrected_and_quality.map(lambda ds:
(ds[0], compute_rr_intervals(ecg=ds[1][0], ecg_quality=ds[1][1],
fs=ecg_sampling_frequency)))
ecg_rr_quality = ecg_rr_rdd.map(lambda ds: (ds[0], compute_outlier_ecg(ds[1])))
ecg_rr_and_quality = ecg_rr_rdd.join(ecg_rr_quality)
windowed_ecg_features = ecg_rr_and_quality.map(
lambda ds: (ds[0], ecg_feature_computation(datastream=ds[1][0], quality_datastream=ds[1][1],
window_size=60, window_offset=60)))
peak_valley_rr_int = peak_valley.join(ecg_rr_rdd) # TODO: Add RR_Quality here?
rsa_cycle_features = peak_valley_rr_int.map(
lambda ds: (ds[0], compute_rsa_cycle_feature(valleys=ds[1][1], rr_int=ds[1][2])))
windowed_rsa_features = rsa_cycle_features.map(lambda ds: (ds[0], window_rsa(ds[0][0], window_size=60)))
combined_features = windowed_accel_features.join(windowed_ecg_features).join(windowed_rip_features).join(
windowed_rsa_features)
# Fix joins here
feature_vector_ecg_rip = combined_features.map(
lambda ds: (ds[0], generate_cStress_feature_vector(accel=ds[1][0], ecg=ds[1][1], rip=ds[1][2], rsa=ds[1][3])))
return feature_vector_ecg_rip # Data stream with data points (ST, ET, [...37 values...])
| bsd-2-clause |
Vishluck/sympy | sympy/combinatorics/partitions.py | 82 | 19983 | from __future__ import print_function, division
from sympy.core import Basic, Dict, sympify
from sympy.core.compatibility import as_int, default_sort_key, range
from sympy.functions.combinatorial.numbers import bell
from sympy.matrices import zeros
from sympy.sets.sets import FiniteSet
from sympy.utilities.iterables import has_dups, flatten, group
from collections import defaultdict
class Partition(FiniteSet):
"""
This class represents an abstract partition.
A partition is a set of disjoint sets whose union equals a given set.
See Also
========
sympy.utilities.iterables.partitions,
sympy.utilities.iterables.multiset_partitions
"""
_rank = None
_partition = None
def __new__(cls, *partition):
"""
Generates a new partition object.
This method also verifies if the arguments passed are
valid and raises a ValueError if they are not.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3])
>>> a
{{3}, {1, 2}}
>>> a.partition
[[1, 2], [3]]
>>> len(a)
2
>>> a.members
(1, 2, 3)
"""
args = partition
if not all(isinstance(part, (list, FiniteSet)) for part in args):
raise ValueError(
"Each argument to Partition should be a list or a FiniteSet")
# sort so we have a canonical reference for RGS
partition = sorted(sum(partition, []), key=default_sort_key)
if has_dups(partition):
raise ValueError("Partition contained duplicated elements.")
obj = FiniteSet.__new__(cls, *[FiniteSet(*x) for x in args])
obj.members = tuple(partition)
obj.size = len(partition)
return obj
def sort_key(self, order=None):
"""Return a canonical key that can be used for sorting.
Ordering is based on the size and sorted elements of the partition
and ties are broken with the rank.
Examples
========
>>> from sympy.utilities.iterables import default_sort_key
>>> from sympy.combinatorics.partitions import Partition
>>> from sympy.abc import x
>>> a = Partition([1, 2])
>>> b = Partition([3, 4])
>>> c = Partition([1, x])
>>> d = Partition(list(range(4)))
>>> l = [d, b, a + 1, a, c]
>>> l.sort(key=default_sort_key); l
[{{1, 2}}, {{1}, {2}}, {{1, x}}, {{3, 4}}, {{0, 1, 2, 3}}]
"""
if order is None:
members = self.members
else:
members = tuple(sorted(self.members,
key=lambda w: default_sort_key(w, order)))
return list(map(default_sort_key, (self.size, members, self.rank)))
@property
def partition(self):
"""Return partition as a sorted list of lists.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> Partition([1], [2, 3]).partition
[[1], [2, 3]]
"""
if self._partition is None:
self._partition = sorted([sorted(p, key=default_sort_key)
for p in self.args])
return self._partition
def __add__(self, other):
"""
Return permutation whose rank is ``other`` greater than current rank,
(mod the maximum rank for the set).
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3])
>>> a.rank
1
>>> (a + 1).rank
2
>>> (a + 100).rank
1
"""
other = as_int(other)
offset = self.rank + other
result = RGS_unrank((offset) %
RGS_enum(self.size),
self.size)
return Partition.from_rgs(result, self.members)
def __sub__(self, other):
"""
Return permutation whose rank is ``other`` less than current rank,
(mod the maximum rank for the set).
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3])
>>> a.rank
1
>>> (a - 1).rank
0
>>> (a - 100).rank
1
"""
return self.__add__(-other)
def __le__(self, other):
"""
Checks if a partition is less than or equal to
the other based on rank.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3, 4, 5])
>>> b = Partition([1], [2, 3], [4], [5])
>>> a.rank, b.rank
(9, 34)
>>> a <= a
True
>>> a <= b
True
"""
return self.sort_key() <= sympify(other).sort_key()
def __lt__(self, other):
"""
Checks if a partition is less than the other.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3, 4, 5])
>>> b = Partition([1], [2, 3], [4], [5])
>>> a.rank, b.rank
(9, 34)
>>> a < b
True
"""
return self.sort_key() < sympify(other).sort_key()
@property
def rank(self):
"""
Gets the rank of a partition.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3], [4, 5])
>>> a.rank
13
"""
if self._rank is not None:
return self._rank
self._rank = RGS_rank(self.RGS)
return self._rank
@property
def RGS(self):
"""
Returns the "restricted growth string" of the partition.
The RGS is returned as a list of indices, L, where L[i] indicates
the block in which element i appears. For example, in a partition
of 3 elements (a, b, c) into 2 blocks ([c], [a, b]) the RGS is
[1, 1, 0]: "a" is in block 1, "b" is in block 1 and "c" is in block 0.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3], [4, 5])
>>> a.members
(1, 2, 3, 4, 5)
>>> a.RGS
(0, 0, 1, 2, 2)
>>> a + 1
{{3}, {4}, {5}, {1, 2}}
>>> _.RGS
(0, 0, 1, 2, 3)
"""
rgs = {}
partition = self.partition
for i, part in enumerate(partition):
for j in part:
rgs[j] = i
return tuple([rgs[i] for i in sorted(
[i for p in partition for i in p], key=default_sort_key)])
@classmethod
def from_rgs(self, rgs, elements):
"""
Creates a set partition from a restricted growth string.
The indices given in rgs are assumed to be the index
of the element as given in elements *as provided* (the
elements are not sorted by this routine). Block numbering
starts from 0. If any block was not referenced in ``rgs``
an error will be raised.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> Partition.from_rgs([0, 1, 2, 0, 1], list('abcde'))
{{c}, {a, d}, {b, e}}
>>> Partition.from_rgs([0, 1, 2, 0, 1], list('cbead'))
{{e}, {a, c}, {b, d}}
>>> a = Partition([1, 4], [2], [3, 5])
>>> Partition.from_rgs(a.RGS, a.members)
{{2}, {1, 4}, {3, 5}}
"""
if len(rgs) != len(elements):
raise ValueError('mismatch in rgs and element lengths')
max_elem = max(rgs) + 1
partition = [[] for i in range(max_elem)]
j = 0
for i in rgs:
partition[i].append(elements[j])
j += 1
if not all(p for p in partition):
raise ValueError('some blocks of the partition were empty.')
return Partition(*partition)
class IntegerPartition(Basic):
"""
This class represents an integer partition.
In number theory and combinatorics, a partition of a positive integer,
``n``, also called an integer partition, is a way of writing ``n`` as a
list of positive integers that sum to n. Two partitions that differ only
in the order of summands are considered to be the same partition; if order
matters then the partitions are referred to as compositions. For example,
4 has five partitions: [4], [3, 1], [2, 2], [2, 1, 1], and [1, 1, 1, 1];
the compositions [1, 2, 1] and [1, 1, 2] are the same as partition
[2, 1, 1].
See Also
========
sympy.utilities.iterables.partitions,
sympy.utilities.iterables.multiset_partitions
Reference: http://en.wikipedia.org/wiki/Partition_%28number_theory%29
"""
_dict = None
_keys = None
def __new__(cls, partition, integer=None):
"""
Generates a new IntegerPartition object from a list or dictionary.
The partition can be given as a list of positive integers or a
dictionary of (integer, multiplicity) items. If the partition is
preceeded by an integer an error will be raised if the partition
does not sum to that given integer.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([5, 4, 3, 1, 1])
>>> a
IntegerPartition(14, (5, 4, 3, 1, 1))
>>> print(a)
[5, 4, 3, 1, 1]
>>> IntegerPartition({1:3, 2:1})
IntegerPartition(5, (2, 1, 1, 1))
If the value that the partion should sum to is given first, a check
will be made to see n error will be raised if there is a discrepancy:
>>> IntegerPartition(10, [5, 4, 3, 1])
Traceback (most recent call last):
...
ValueError: The partition is not valid
"""
if integer is not None:
integer, partition = partition, integer
if isinstance(partition, (dict, Dict)):
_ = []
for k, v in sorted(list(partition.items()), reverse=True):
if not v:
continue
k, v = as_int(k), as_int(v)
_.extend([k]*v)
partition = tuple(_)
else:
partition = tuple(sorted(map(as_int, partition), reverse=True))
sum_ok = False
if integer is None:
integer = sum(partition)
sum_ok = True
else:
integer = as_int(integer)
if not sum_ok and sum(partition) != integer:
raise ValueError("Partition did not add to %s" % integer)
if any(i < 1 for i in partition):
raise ValueError("The summands must all be positive.")
obj = Basic.__new__(cls, integer, partition)
obj.partition = list(partition)
obj.integer = integer
return obj
def prev_lex(self):
"""Return the previous partition of the integer, n, in lexical order,
wrapping around to [1, ..., 1] if the partition is [n].
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> p = IntegerPartition([4])
>>> print(p.prev_lex())
[3, 1]
>>> p.partition > p.prev_lex().partition
True
"""
d = defaultdict(int)
d.update(self.as_dict())
keys = self._keys
if keys == [1]:
return IntegerPartition({self.integer: 1})
if keys[-1] != 1:
d[keys[-1]] -= 1
if keys[-1] == 2:
d[1] = 2
else:
d[keys[-1] - 1] = d[1] = 1
else:
d[keys[-2]] -= 1
left = d[1] + keys[-2]
new = keys[-2]
d[1] = 0
while left:
new -= 1
if left - new >= 0:
d[new] += left//new
left -= d[new]*new
return IntegerPartition(self.integer, d)
def next_lex(self):
"""Return the next partition of the integer, n, in lexical order,
wrapping around to [n] if the partition is [1, ..., 1].
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> p = IntegerPartition([3, 1])
>>> print(p.next_lex())
[4]
>>> p.partition < p.next_lex().partition
True
"""
d = defaultdict(int)
d.update(self.as_dict())
key = self._keys
a = key[-1]
if a == self.integer:
d.clear()
d[1] = self.integer
elif a == 1:
if d[a] > 1:
d[a + 1] += 1
d[a] -= 2
else:
b = key[-2]
d[b + 1] += 1
d[1] = (d[b] - 1)*b
d[b] = 0
else:
if d[a] > 1:
if len(key) == 1:
d.clear()
d[a + 1] = 1
d[1] = self.integer - a - 1
else:
a1 = a + 1
d[a1] += 1
d[1] = d[a]*a - a1
d[a] = 0
else:
b = key[-2]
b1 = b + 1
d[b1] += 1
need = d[b]*b + d[a]*a - b1
d[a] = d[b] = 0
d[1] = need
return IntegerPartition(self.integer, d)
def as_dict(self):
"""Return the partition as a dictionary whose keys are the
partition integers and the values are the multiplicity of that
integer.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> IntegerPartition([1]*3 + [2] + [3]*4).as_dict()
{1: 3, 2: 1, 3: 4}
"""
if self._dict is None:
groups = group(self.partition, multiple=False)
self._keys = [g[0] for g in groups]
self._dict = dict(groups)
return self._dict
@property
def conjugate(self):
"""
Computes the conjugate partition of itself.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([6, 3, 3, 2, 1])
>>> a.conjugate
[5, 4, 3, 1, 1, 1]
"""
j = 1
temp_arr = list(self.partition) + [0]
k = temp_arr[0]
b = [0]*k
while k > 0:
while k > temp_arr[j]:
b[k - 1] = j
k -= 1
j += 1
return b
def __lt__(self, other):
"""Return True if self is less than other when the partition
is listed from smallest to biggest.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([3, 1])
>>> a < a
False
>>> b = a.next_lex()
>>> a < b
True
>>> a == b
False
"""
return list(reversed(self.partition)) < list(reversed(other.partition))
def __le__(self, other):
"""Return True if self is less than other when the partition
is listed from smallest to biggest.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([4])
>>> a <= a
True
"""
return list(reversed(self.partition)) <= list(reversed(other.partition))
def as_ferrers(self, char='#'):
"""
Prints the ferrer diagram of a partition.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> print(IntegerPartition([1, 1, 5]).as_ferrers())
#####
#
#
"""
return "\n".join([char*i for i in self.partition])
def __str__(self):
return str(list(self.partition))
def random_integer_partition(n, seed=None):
"""
Generates a random integer partition summing to ``n`` as a list
of reverse-sorted integers.
Examples
========
>>> from sympy.combinatorics.partitions import random_integer_partition
For the following, a seed is given so a known value can be shown; in
practice, the seed would not be given.
>>> random_integer_partition(100, seed=[1, 1, 12, 1, 2, 1, 85, 1])
[85, 12, 2, 1]
>>> random_integer_partition(10, seed=[1, 2, 3, 1, 5, 1])
[5, 3, 1, 1]
>>> random_integer_partition(1)
[1]
"""
from sympy.utilities.randtest import _randint
n = as_int(n)
if n < 1:
raise ValueError('n must be a positive integer')
randint = _randint(seed)
partition = []
while (n > 0):
k = randint(1, n)
mult = randint(1, n//k)
partition.append((k, mult))
n -= k*mult
partition.sort(reverse=True)
partition = flatten([[k]*m for k, m in partition])
return partition
def RGS_generalized(m):
"""
Computes the m + 1 generalized unrestricted growth strings
and returns them as rows in matrix.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_generalized
>>> RGS_generalized(6)
Matrix([
[ 1, 1, 1, 1, 1, 1, 1],
[ 1, 2, 3, 4, 5, 6, 0],
[ 2, 5, 10, 17, 26, 0, 0],
[ 5, 15, 37, 77, 0, 0, 0],
[ 15, 52, 151, 0, 0, 0, 0],
[ 52, 203, 0, 0, 0, 0, 0],
[203, 0, 0, 0, 0, 0, 0]])
"""
d = zeros(m + 1)
for i in range(0, m + 1):
d[0, i] = 1
for i in range(1, m + 1):
for j in range(m):
if j <= m - i:
d[i, j] = j * d[i - 1, j] + d[i - 1, j + 1]
else:
d[i, j] = 0
return d
def RGS_enum(m):
"""
RGS_enum computes the total number of restricted growth strings
possible for a superset of size m.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_enum
>>> from sympy.combinatorics.partitions import Partition
>>> RGS_enum(4)
15
>>> RGS_enum(5)
52
>>> RGS_enum(6)
203
We can check that the enumeration is correct by actually generating
the partitions. Here, the 15 partitions of 4 items are generated:
>>> a = Partition(list(range(4)))
>>> s = set()
>>> for i in range(20):
... s.add(a)
... a += 1
...
>>> assert len(s) == 15
"""
if (m < 1):
return 0
elif (m == 1):
return 1
else:
return bell(m)
def RGS_unrank(rank, m):
"""
Gives the unranked restricted growth string for a given
superset size.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_unrank
>>> RGS_unrank(14, 4)
[0, 1, 2, 3]
>>> RGS_unrank(0, 4)
[0, 0, 0, 0]
"""
if m < 1:
raise ValueError("The superset size must be >= 1")
if rank < 0 or RGS_enum(m) <= rank:
raise ValueError("Invalid arguments")
L = [1] * (m + 1)
j = 1
D = RGS_generalized(m)
for i in range(2, m + 1):
v = D[m - i, j]
cr = j*v
if cr <= rank:
L[i] = j + 1
rank -= cr
j += 1
else:
L[i] = int(rank / v + 1)
rank %= v
return [x - 1 for x in L[1:]]
def RGS_rank(rgs):
"""
Computes the rank of a restricted growth string.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_rank, RGS_unrank
>>> RGS_rank([0, 1, 2, 1, 3])
42
>>> RGS_rank(RGS_unrank(4, 7))
4
"""
rgs_size = len(rgs)
rank = 0
D = RGS_generalized(rgs_size)
for i in range(1, rgs_size):
n = len(rgs[(i + 1):])
m = max(rgs[0:i])
rank += D[n, m + 1] * rgs[i]
return rank
| bsd-3-clause |
pschorf/gcc-races | contrib/testsuite-management/validate_failures.py | 23 | 18811 | #!/usr/bin/python
# Script to compare testsuite failures against a list of known-to-fail
# tests.
#
# NOTE: This script is used in installations that are running Python 2.4.
# Please stick to syntax features available in 2.4 and earlier
# versions.
# Contributed by Diego Novillo <dnovillo@google.com>
#
# Copyright (C) 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GCC.
#
# GCC is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GCC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GCC; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
"""This script provides a coarser XFAILing mechanism that requires no
detailed DejaGNU markings. This is useful in a variety of scenarios:
- Development branches with many known failures waiting to be fixed.
- Release branches with known failures that are not considered
important for the particular release criteria used in that branch.
The script must be executed from the toplevel build directory. When
executed it will:
1- Determine the target built: TARGET
2- Determine the source directory: SRCDIR
3- Look for a failure manifest file in
<SRCDIR>/<MANIFEST_SUBDIR>/<MANIFEST_NAME>.xfail
4- Collect all the <tool>.sum files from the build tree.
5- Produce a report stating:
a- Failures expected in the manifest but not present in the build.
b- Failures in the build not expected in the manifest.
6- If all the build failures are expected in the manifest, it exits
with exit code 0. Otherwise, it exits with error code 1.
Manifest files contain expected DejaGNU results that are otherwise
treated as failures.
They may also contain additional text:
# This is a comment. - self explanatory
@include file - the file is a path relative to the includer
@remove result text - result text is removed from the expected set
"""
import datetime
import optparse
import os
import re
import sys
# Handled test results.
_VALID_TEST_RESULTS = [ 'FAIL', 'UNRESOLVED', 'XPASS', 'ERROR' ]
_VALID_TEST_RESULTS_REX = re.compile("%s" % "|".join(_VALID_TEST_RESULTS))
# Subdirectory of srcdir in which to find the manifest file.
_MANIFEST_SUBDIR = 'contrib/testsuite-management'
# Pattern for naming manifest files.
# The first argument should be the toplevel GCC(/GNU tool) source directory.
# The second argument is the manifest subdir.
# The third argument is the manifest target, which defaults to the target
# triplet used during the build.
_MANIFEST_PATH_PATTERN = '%s/%s/%s.xfail'
# The options passed to the program.
_OPTIONS = None
def Error(msg):
print >>sys.stderr, 'error: %s' % msg
sys.exit(1)
class TestResult(object):
"""Describes a single DejaGNU test result as emitted in .sum files.
We are only interested in representing unsuccessful tests. So, only
a subset of all the tests are loaded.
The summary line used to build the test result should have this format:
attrlist | XPASS: gcc.dg/unroll_1.c (test for excess errors)
^^^^^^^^ ^^^^^ ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^
optional state name description
attributes
Attributes:
attrlist: A comma separated list of attributes.
Valid values:
flaky Indicates that this test may not always fail. These
tests are reported, but their presence does not affect
the results.
expire=YYYYMMDD After this date, this test will produce an error
whether it is in the manifest or not.
state: One of UNRESOLVED, XPASS or FAIL.
name: File name for the test.
description: String describing the test (flags used, dejagnu message, etc)
ordinal: Monotonically increasing integer.
It is used to keep results for one .exp file sorted
by the order the tests were run.
"""
def __init__(self, summary_line, ordinal=-1):
try:
self.attrs = ''
if '|' in summary_line:
(self.attrs, summary_line) = summary_line.split('|', 1)
try:
(self.state,
self.name,
self.description) = re.match(r' *([A-Z]+):\s*(\S+)\s+(.*)',
summary_line).groups()
except:
print 'Failed to parse summary line: "%s"' % summary_line
raise
self.attrs = self.attrs.strip()
self.state = self.state.strip()
self.description = self.description.strip()
self.ordinal = ordinal
except ValueError:
Error('Cannot parse summary line "%s"' % summary_line)
if self.state not in _VALID_TEST_RESULTS:
Error('Invalid test result %s in "%s" (parsed as "%s")' % (
self.state, summary_line, self))
def __lt__(self, other):
return (self.name < other.name or
(self.name == other.name and self.ordinal < other.ordinal))
def __hash__(self):
return hash(self.state) ^ hash(self.name) ^ hash(self.description)
def __eq__(self, other):
return (self.state == other.state and
self.name == other.name and
self.description == other.description)
def __ne__(self, other):
return not (self == other)
def __str__(self):
attrs = ''
if self.attrs:
attrs = '%s | ' % self.attrs
return '%s%s: %s %s' % (attrs, self.state, self.name, self.description)
def ExpirationDate(self):
# Return a datetime.date object with the expiration date for this
# test result. Return None, if no expiration has been set.
if re.search(r'expire=', self.attrs):
expiration = re.search(r'expire=(\d\d\d\d)(\d\d)(\d\d)', self.attrs)
if not expiration:
Error('Invalid expire= format in "%s". Must be of the form '
'"expire=YYYYMMDD"' % self)
return datetime.date(int(expiration.group(1)),
int(expiration.group(2)),
int(expiration.group(3)))
return None
def HasExpired(self):
# Return True if the expiration date of this result has passed.
expiration_date = self.ExpirationDate()
if expiration_date:
now = datetime.date.today()
return now > expiration_date
def GetMakefileValue(makefile_name, value_name):
if os.path.exists(makefile_name):
makefile = open(makefile_name)
for line in makefile:
if line.startswith(value_name):
(_, value) = line.split('=', 1)
value = value.strip()
makefile.close()
return value
makefile.close()
return None
def ValidBuildDirectory(builddir, target):
if (not os.path.exists(builddir) or
not os.path.exists('%s/Makefile' % builddir) or
(not os.path.exists('%s/build-%s' % (builddir, target)) and
not os.path.exists('%s/%s' % (builddir, target)))):
return False
return True
def IsComment(line):
"""Return True if line is a comment."""
return line.startswith('#')
def IsInterestingResult(line):
"""Return True if line is one of the summary lines we care about."""
if '|' in line:
(_, line) = line.split('|', 1)
line = line.strip()
return bool(_VALID_TEST_RESULTS_REX.match(line))
def IsInclude(line):
"""Return True if line is an include of another file."""
return line.startswith("@include ")
def GetIncludeFile(line, includer):
"""Extract the name of the include file from line."""
includer_dir = os.path.dirname(includer)
include_file = line[len("@include "):]
return os.path.join(includer_dir, include_file.strip())
def IsNegativeResult(line):
"""Return True if line should be removed from the expected results."""
return line.startswith("@remove ")
def GetNegativeResult(line):
"""Extract the name of the negative result from line."""
line = line[len("@remove "):]
return line.strip()
def ParseManifestWorker(result_set, manifest_path):
"""Read manifest_path, adding the contents to result_set."""
if _OPTIONS.verbosity >= 1:
print 'Parsing manifest file %s.' % manifest_path
manifest_file = open(manifest_path)
for line in manifest_file:
line = line.strip()
if line == "":
pass
elif IsComment(line):
pass
elif IsNegativeResult(line):
result_set.remove(TestResult(GetNegativeResult(line)))
elif IsInclude(line):
ParseManifestWorker(result_set, GetIncludeFile(line, manifest_path))
elif IsInterestingResult(line):
result_set.add(TestResult(line))
else:
Error('Unrecognized line in manifest file: %s' % line)
manifest_file.close()
def ParseManifest(manifest_path):
"""Create a set of TestResult instances from the given manifest file."""
result_set = set()
ParseManifestWorker(result_set, manifest_path)
return result_set
def ParseSummary(sum_fname):
"""Create a set of TestResult instances from the given summary file."""
result_set = set()
# ordinal is used when sorting the results so that tests within each
# .exp file are kept sorted.
ordinal=0
sum_file = open(sum_fname)
for line in sum_file:
if IsInterestingResult(line):
result = TestResult(line, ordinal)
ordinal += 1
if result.HasExpired():
# Tests that have expired are not added to the set of expected
# results. If they are still present in the set of actual results,
# they will cause an error to be reported.
print 'WARNING: Expected failure "%s" has expired.' % line.strip()
continue
result_set.add(result)
sum_file.close()
return result_set
def GetManifest(manifest_path):
"""Build a set of expected failures from the manifest file.
Each entry in the manifest file should have the format understood
by the TestResult constructor.
If no manifest file exists for this target, it returns an empty set.
"""
if os.path.exists(manifest_path):
return ParseManifest(manifest_path)
else:
return set()
def CollectSumFiles(builddir):
sum_files = []
for root, dirs, files in os.walk(builddir):
for ignored in ('.svn', '.git'):
if ignored in dirs:
dirs.remove(ignored)
for fname in files:
if fname.endswith('.sum'):
sum_files.append(os.path.join(root, fname))
return sum_files
def GetResults(sum_files):
"""Collect all the test results from the given .sum files."""
build_results = set()
for sum_fname in sum_files:
print '\t%s' % sum_fname
build_results |= ParseSummary(sum_fname)
return build_results
def CompareResults(manifest, actual):
"""Compare sets of results and return two lists:
- List of results present in ACTUAL but missing from MANIFEST.
- List of results present in MANIFEST but missing from ACTUAL.
"""
# Collect all the actual results not present in the manifest.
# Results in this set will be reported as errors.
actual_vs_manifest = set()
for actual_result in actual:
if actual_result not in manifest:
actual_vs_manifest.add(actual_result)
# Collect all the tests in the manifest that were not found
# in the actual results.
# Results in this set will be reported as warnings (since
# they are expected failures that are not failing anymore).
manifest_vs_actual = set()
for expected_result in manifest:
# Ignore tests marked flaky.
if 'flaky' in expected_result.attrs:
continue
if expected_result not in actual:
manifest_vs_actual.add(expected_result)
return actual_vs_manifest, manifest_vs_actual
def GetManifestPath(srcdir, target, user_provided_must_exist):
"""Return the full path to the manifest file."""
manifest_path = _OPTIONS.manifest
if manifest_path:
if user_provided_must_exist and not os.path.exists(manifest_path):
Error('Manifest does not exist: %s' % manifest_path)
return manifest_path
else:
assert srcdir and target
return _MANIFEST_PATH_PATTERN % (srcdir, _MANIFEST_SUBDIR, target)
def GetBuildData():
srcdir = GetMakefileValue('%s/Makefile' % _OPTIONS.build_dir, 'srcdir =')
target = GetMakefileValue('%s/Makefile' % _OPTIONS.build_dir, 'target_alias=')
if not ValidBuildDirectory(_OPTIONS.build_dir, target):
# If we have been given a set of results to use, we may
# not be inside a valid GCC build directory. In that case,
# the user must provide both a manifest file and a set
# of results to check against it.
if not _OPTIONS.results or not _OPTIONS.manifest:
Error('%s is not a valid GCC top level build directory. '
'You must use --manifest and --results to do the validation.' %
_OPTIONS.build_dir)
else:
return None, None
print 'Source directory: %s' % srcdir
print 'Build target: %s' % target
return srcdir, target
def PrintSummary(msg, summary):
print '\n\n%s' % msg
for result in sorted(summary):
print result
def GetSumFiles(results, build_dir):
if not results:
print 'Getting actual results from build directory %s' % build_dir
sum_files = CollectSumFiles(build_dir)
else:
print 'Getting actual results from user-provided results'
sum_files = results.split()
return sum_files
def PerformComparison(expected, actual, ignore_missing_failures):
actual_vs_expected, expected_vs_actual = CompareResults(expected, actual)
tests_ok = True
if len(actual_vs_expected) > 0:
PrintSummary('Unexpected results in this build (new failures)',
actual_vs_expected)
tests_ok = False
if not ignore_missing_failures and len(expected_vs_actual) > 0:
PrintSummary('Expected results not present in this build (fixed tests)'
'\n\nNOTE: This is not a failure. It just means that these '
'tests were expected\nto fail, but they worked in this '
'configuration.\n', expected_vs_actual)
if tests_ok:
print '\nSUCCESS: No unexpected failures.'
return tests_ok
def CheckExpectedResults():
srcdir, target = GetBuildData()
manifest_path = GetManifestPath(srcdir, target, True)
print 'Manifest: %s' % manifest_path
manifest = GetManifest(manifest_path)
sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir)
actual = GetResults(sum_files)
if _OPTIONS.verbosity >= 1:
PrintSummary('Tests expected to fail', manifest)
PrintSummary('\nActual test results', actual)
return PerformComparison(manifest, actual, _OPTIONS.ignore_missing_failures)
def ProduceManifest():
(srcdir, target) = GetBuildData()
manifest_path = GetManifestPath(srcdir, target, False)
print 'Manifest: %s' % manifest_path
if os.path.exists(manifest_path) and not _OPTIONS.force:
Error('Manifest file %s already exists.\nUse --force to overwrite.' %
manifest_path)
sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir)
actual = GetResults(sum_files)
manifest_file = open(manifest_path, 'w')
for result in sorted(actual):
print result
manifest_file.write('%s\n' % result)
manifest_file.close()
return True
def CompareBuilds():
(srcdir, target) = GetBuildData()
sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir)
actual = GetResults(sum_files)
clean_sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.clean_build)
clean = GetResults(clean_sum_files)
return PerformComparison(clean, actual, _OPTIONS.ignore_missing_failures)
def Main(argv):
parser = optparse.OptionParser(usage=__doc__)
# Keep the following list sorted by option name.
parser.add_option('--build_dir', action='store', type='string',
dest='build_dir', default='.',
help='Build directory to check (default = .)')
parser.add_option('--clean_build', action='store', type='string',
dest='clean_build', default=None,
help='Compare test results from this build against '
'those of another (clean) build. Use this option '
'when comparing the test results of your patch versus '
'the test results of a clean build without your patch. '
'You must provide the path to the top directory of your '
'clean build.')
parser.add_option('--force', action='store_true', dest='force',
default=False, help='When used with --produce_manifest, '
'it will overwrite an existing manifest file '
'(default = False)')
parser.add_option('--ignore_missing_failures', action='store_true',
dest='ignore_missing_failures', default=False,
help='When a failure is expected in the manifest but '
'it is not found in the actual results, the script '
'produces a note alerting to this fact. This means '
'that the expected failure has been fixed, or '
'it did not run, or it may simply be flaky '
'(default = False)')
parser.add_option('--manifest', action='store', type='string',
dest='manifest', default=None,
help='Name of the manifest file to use (default = '
'taken from '
'contrib/testsuite-managment/<target_alias>.xfail)')
parser.add_option('--produce_manifest', action='store_true',
dest='produce_manifest', default=False,
help='Produce the manifest for the current '
'build (default = False)')
parser.add_option('--results', action='store', type='string',
dest='results', default=None, help='Space-separated list '
'of .sum files with the testing results to check. The '
'only content needed from these files are the lines '
'starting with FAIL, XPASS or UNRESOLVED (default = '
'.sum files collected from the build directory).')
parser.add_option('--verbosity', action='store', dest='verbosity',
type='int', default=0, help='Verbosity level (default = 0)')
global _OPTIONS
(_OPTIONS, _) = parser.parse_args(argv[1:])
if _OPTIONS.produce_manifest:
retval = ProduceManifest()
elif _OPTIONS.clean_build:
retval = CompareBuilds()
else:
retval = CheckExpectedResults()
if retval:
return 0
else:
return 1
if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)
| gpl-2.0 |
alex/warehouse | tests/unit/cache/origin/test_init.py | 3 | 8664 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
import pytest
from warehouse.cache import origin
from warehouse.cache.origin.interfaces import IOriginCache
def test_store_purge_keys():
class Type1:
pass
class Type2:
pass
class Type3:
pass
class Type4:
pass
config = pretend.stub(
registry={
"cache_keys": {
Type1: lambda o: origin.CacheKeys(cache=[], purge=["type_1"]),
Type2: lambda o: origin.CacheKeys(
cache=[],
purge=["type_2", "foo"],
),
Type3: lambda o: origin.CacheKeys(
cache=[],
purge=["type_3", "foo"],
),
},
},
)
session = pretend.stub(
info={},
new={Type1()},
dirty={Type2()},
deleted={Type3(), Type4()},
)
origin.store_purge_keys(config, session, pretend.stub())
assert session.info["warehouse.cache.origin.purges"] == {
"type_1", "type_2", "type_3", "foo",
}
def test_execute_purge_success(app_config):
cacher = pretend.stub(purge=pretend.call_recorder(lambda purges: None))
factory = pretend.call_recorder(lambda ctx, config: cacher)
app_config.register_service_factory(factory, IOriginCache)
app_config.commit()
session = pretend.stub(
info={
"warehouse.cache.origin.purges": {"type_1", "type_2", "foobar"},
},
)
origin.execute_purge(app_config, session)
assert factory.calls == [pretend.call(None, app_config)]
assert cacher.purge.calls == [pretend.call({"type_1", "type_2", "foobar"})]
assert "warehouse.cache.origin.purges" not in session.info
def test_execute_purge_no_backend():
@pretend.call_recorder
def find_service_factory(interface):
raise ValueError
config = pretend.stub(find_service_factory=find_service_factory)
session = pretend.stub(
info={
"warehouse.cache.origin.purges": {"type_1", "type_2", "foobar"},
},
)
origin.execute_purge(config, session)
assert find_service_factory.calls == [pretend.call(origin.IOriginCache)]
assert "warehouse.cache.origin.purges" not in session.info
class TestOriginCache:
def test_no_cache_key(self):
response = pretend.stub()
@origin.origin_cache(1)
def view(context, request):
return response
def raiser(iface):
raise ValueError
context = pretend.stub()
request = pretend.stub(
registry={"cache_keys": {}},
find_service=raiser,
)
assert view(context, request) is response
def test_no_origin_cache(self):
class Fake:
pass
response = pretend.stub()
@origin.origin_cache(1)
def view(context, request):
return response
@pretend.call_recorder
def raiser(iface):
raise ValueError
context = Fake()
request = pretend.stub(
registry={
"cache_keys": {
Fake: lambda X: origin.CacheKeys(cache=[], purge=[]),
},
},
find_service=raiser,
)
assert view(context, request) is response
assert raiser.calls == [pretend.call(IOriginCache)]
@pytest.mark.parametrize(
("seconds", "keys"),
[
(745, None),
(823, ["nope", "yup"]),
],
)
def test_response_hook(self, seconds, keys):
class Fake:
pass
class Cache:
@staticmethod
@pretend.call_recorder
def cache(keys, request, response, seconds, stale_while_revalidate,
stale_if_error):
pass
response = pretend.stub()
deco = origin.origin_cache(seconds, keys=keys)
@deco
def view(context, request):
return response
key_maker = pretend.call_recorder(
lambda obj: origin.CacheKeys(cache=["one", "two"], purge=[])
)
cacher = Cache()
context = Fake()
callbacks = []
request = pretend.stub(
registry={"cache_keys": {Fake: key_maker}},
find_service=lambda iface: cacher,
add_response_callback=callbacks.append,
)
assert view(context, request) is response
assert key_maker.calls == [pretend.call(context)]
assert len(callbacks) == 1
callbacks[0](request, response)
assert cacher.cache.calls == [
pretend.call(
sorted(["one", "two"] + ([] if keys is None else keys)),
request,
response,
seconds=seconds,
stale_while_revalidate=None,
stale_if_error=None,
),
]
class TestKeyMaker:
def test_both_cache_and_purge(self):
key_maker = origin.key_maker_factory(
cache_keys=["foo", "foo/{obj.attr}"],
purge_keys=["bar", "bar/{obj.attr}"],
)
assert key_maker(pretend.stub(attr="bar")) == origin.CacheKeys(
cache=["foo", "foo/bar"],
purge=["bar", "bar/bar"],
)
def test_only_cache(self):
key_maker = origin.key_maker_factory(
cache_keys=["foo", "foo/{obj.attr}"],
purge_keys=None,
)
assert key_maker(pretend.stub(attr="bar")) == origin.CacheKeys(
cache=["foo", "foo/bar"],
purge=[],
)
def test_only_purge(self):
key_maker = origin.key_maker_factory(
cache_keys=None,
purge_keys=["bar", "bar/{obj.attr}"],
)
assert key_maker(pretend.stub(attr="bar")) == origin.CacheKeys(
cache=[],
purge=["bar", "bar/bar"],
)
def test_register_origin_keys(monkeypatch):
class Fake1:
pass
class Fake2:
pass
key_maker = pretend.stub()
key_maker_factory = pretend.call_recorder(lambda **kw: key_maker)
monkeypatch.setattr(origin, "key_maker_factory", key_maker_factory)
config = pretend.stub(registry={})
origin.register_origin_cache_keys(
config, Fake1, cache_keys=["one", "two/{obj.attr}"])
origin.register_origin_cache_keys(
config, Fake2, cache_keys=["three"], purge_keys=["lol"],
)
assert key_maker_factory.calls == [
pretend.call(cache_keys=["one", "two/{obj.attr}"], purge_keys=None),
pretend.call(cache_keys=["three"], purge_keys=["lol"]),
]
assert config.registry == {
"cache_keys": {
Fake1: key_maker,
Fake2: key_maker,
},
}
def test_includeme_no_origin_cache():
config = pretend.stub(
add_directive=pretend.call_recorder(lambda name, func: None),
registry=pretend.stub(settings={}),
)
origin.includeme(config)
assert config.add_directive.calls == [
pretend.call(
"register_origin_cache_keys",
origin.register_origin_cache_keys,
),
]
def test_includeme_with_origin_cache():
cache_class = pretend.stub(create_service=pretend.stub())
config = pretend.stub(
add_directive=pretend.call_recorder(lambda name, func: None),
registry=pretend.stub(
settings={
"origin_cache.backend":
"warehouse.cache.origin.fastly.FastlyCache",
},
),
maybe_dotted=pretend.call_recorder(lambda n: cache_class),
register_service_factory=pretend.call_recorder(lambda f, iface: None)
)
origin.includeme(config)
assert config.add_directive.calls == [
pretend.call(
"register_origin_cache_keys",
origin.register_origin_cache_keys,
),
]
assert config.maybe_dotted.calls == [
pretend.call("warehouse.cache.origin.fastly.FastlyCache"),
]
assert config.register_service_factory.calls == [
pretend.call(cache_class.create_service, IOriginCache),
]
| apache-2.0 |
galtys/odoo | addons/hr_expense/report/hr_expense_report.py | 29 | 6423 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
from openerp.addons.decimal_precision import decimal_precision as dp
class hr_expense_report(osv.osv):
_name = "hr.expense.report"
_description = "Expenses Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.date('Date ', readonly=True),
'create_date': fields.datetime('Creation Date', readonly=True),
'product_id':fields.many2one('product.product', 'Product', readonly=True),
'journal_id': fields.many2one('account.journal', 'Force Journal', readonly=True),
'product_qty':fields.float('Product Quantity', readonly=True),
'employee_id': fields.many2one('hr.employee', "Employee's Name", readonly=True),
'date_confirm': fields.date('Confirmation Date', readonly=True),
'date_valid': fields.date('Validation Date', readonly=True),
'department_id':fields.many2one('hr.department','Department', readonly=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'user_id':fields.many2one('res.users', 'Validation User', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'price_total':fields.float('Total Price', readonly=True, digits_compute=dp.get_precision('Account')),
'delay_valid':fields.float('Delay to Valid', readonly=True),
'delay_confirm':fields.float('Delay to Confirm', readonly=True),
'analytic_account': fields.many2one('account.analytic.account','Analytic account',readonly=True),
'price_average':fields.float('Average Price', readonly=True, digits_compute=dp.get_precision('Account')),
'nbr':fields.integer('# of Lines', readonly=True), # TDE FIXME master: rename into nbr_lines
'no_of_products':fields.integer('# of Products', readonly=True),
'no_of_account':fields.integer('# of Accounts', readonly=True),
'state': fields.selection([
('draft', 'Draft'),
('confirm', 'Waiting confirmation'),
('accepted', 'Accepted'),
('done', 'Done'),
('cancelled', 'Cancelled')],
'Status', readonly=True),
}
_order = 'date desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'hr_expense_report')
cr.execute("""
create or replace view hr_expense_report as (
WITH currency_rate (currency_id, rate, date_start, date_end) AS (
SELECT r.currency_id, r.rate, r.name AS date_start,
(SELECT name FROM res_currency_rate r2
WHERE r2.name > r.name AND
r2.currency_id = r.currency_id
ORDER BY r2.name ASC
LIMIT 1) AS date_end
FROM res_currency_rate r
)
select
min(l.id) as id,
s.date as date,
s.create_date as create_date,
s.employee_id,
s.journal_id,
s.currency_id,
s.date_confirm as date_confirm,
s.date_valid as date_valid,
s.user_valid as user_id,
s.department_id,
avg(extract('epoch' from age(s.date_valid,s.date)))/(3600*24) as delay_valid,
avg(extract('epoch' from age(s.date_valid,s.date_confirm)))/(3600*24) as delay_confirm,
l.product_id as product_id,
l.analytic_account as analytic_account,
sum(l.unit_quantity * u.factor) as product_qty,
s.company_id as company_id,
sum(l.unit_amount/cr.rate*l.unit_quantity)::decimal(16,2) as price_total,
(sum(l.unit_quantity*l.unit_amount/cr.rate)/sum(case when l.unit_quantity=0 or u.factor=0 then 1 else l.unit_quantity * u.factor end))::decimal(16,2) as price_average,
count(*) as nbr,
(select unit_quantity from hr_expense_line where id=l.id and product_id is not null) as no_of_products,
(select analytic_account from hr_expense_line where id=l.id and analytic_account is not null) as no_of_account,
s.state
from hr_expense_line l
left join hr_expense_expense s on (s.id=l.expense_id)
left join product_uom u on (u.id=l.uom_id)
left join currency_rate cr on (cr.currency_id = s.currency_id and
cr.date_start <= coalesce(s.date_confirm, now()) and
(cr.date_end is null or cr.date_end > coalesce(s.date_confirm, now())))
group by
s.date,
s.create_date,
s.date_confirm,
s.date_valid,
l.product_id,
l.analytic_account,
s.currency_id,
s.user_valid,
s.department_id,
l.uom_id,
l.id,
s.state,
s.journal_id,
s.company_id,
s.employee_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
GdZ/scriptfile | software/googleAppEngine/google/storage/speckle/python/django/backend/base.py | 2 | 8620 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Django database backend for rdbms.
This acts as a simple wrapper around the MySQLdb database backend to utilize an
alternate settings.py configuration. When used in an application running on
Google App Engine, this backend will use the GAE Apiproxy as a communications
driver. When used with dev_appserver, or from outside the context of an App
Engine app, this backend will instead use a driver that communicates over the
Google API for SQL Service.
Communicating over Google API requires valid OAuth 2.0 credentials. Before
the backend can be used with this transport on dev_appserver, users should
first run the Django 'syncdb' management command (or any other of the commands
that interact with the database), and follow the instructions to obtain an
OAuth2 token and persist it to disk for subsequent use.
If you should need to manually force the selection of a particular driver
module, you can do so by specifying it in the OPTIONS section of the database
configuration in settings.py. For example:
DATABASES = {
'default': {
'ENGINE': 'google.storage.speckle.python.django.backend',
'INSTANCE': 'example.com:project:instance',
'NAME': 'mydb',
'USER': 'myusername',
'PASSWORD': 'mypassword',
'OPTIONS': {
'driver': 'google.storage.speckle.python.api.rdbms_googleapi',
}
}
}
"""
import logging
import os
import sys
from django.core import exceptions
from django.db import backends
from django.db.backends import signals
from django.utils import safestring
from google.appengine.api import apiproxy_stub_map
from google.storage.speckle.python.api import rdbms
from google.storage.speckle.python.django.backend import client
PROD_SERVER_SOFTWARE = 'Google App Engine'
modules_to_swap = (
'MySQLdb',
'MySQLdb.constants',
'MySQLdb.constants.CLIENT',
'MySQLdb.constants.FIELD_TYPE',
'MySQLdb.constants.FLAG',
'MySQLdb.converters',
)
old_modules = [(name, sys.modules.pop(name)) for name in modules_to_swap
if name in sys.modules]
sys.modules['MySQLdb'] = rdbms
from django.db.backends.mysql import base
for module_name, module in old_modules:
sys.modules[module_name] = module
_SETTINGS_CONNECT_ARGS = (
('HOST', 'dsn', False),
('INSTANCE', 'instance', True),
('NAME', 'database', True),
('USER', 'user', False),
('PASSWORD', 'password', False),
('OAUTH2_SECRET', 'oauth2_refresh_token', False),
('driver', 'driver_name', False),
('oauth_storage', 'oauth_storage', False),
)
def _GetDriver(driver_name=None):
"""Imports the driver module specified by the given module name.
If no name is given, this will attempt to automatically determine an
appropriate driver to use based on the current environment. When running on
a production App Engine instance, the ApiProxy driver will be used, otherwise,
the Google API driver will be used. This conveniently allows the backend to
be used with the same configuration on production, and with command line tools
like manage.py syncdb.
Args:
driver_name: The name of the driver module to import.
Returns:
The imported driver module, or None if a suitable driver can not be found.
"""
if not driver_name:
base_pkg_path = 'google.storage.speckle.python.api.'
if apiproxy_stub_map.apiproxy.GetStub('rdbms'):
driver_name = base_pkg_path + 'rdbms_apiproxy'
else:
driver_name = base_pkg_path + 'rdbms_googleapi'
__import__(driver_name)
return sys.modules[driver_name]
def Connect(driver_name=None, oauth2_refresh_token=None, **kwargs):
"""Gets an appropriate connection driver, and connects with it.
Args:
driver_name: The name of the driver module to use.
oauth2_refresh_token: The OAuth2 refresh token used to aquire an access
token for authenticating requests made by the Google API driver; defaults
to the value provided by the GOOGLE_SQL_OAUTH2_REFRESH_TOKEN environment
variable, if present.
kwargs: Additional keyword arguments to pass to the driver's connect
function.
Returns:
An rdbms.Connection subclass instance.
Raises:
exceptions.ImproperlyConfigured: Valid OAuth 2.0 credentials could not be
found in storage and no oauth2_refresh_token was given.
"""
driver = _GetDriver(driver_name)
if (os.getenv('APPENGINE_RUNTIME') and
driver.__name__.endswith('rdbms_googleapi')):
if os.getenv('SERVER_SOFTWARE', '').startswith(PROD_SERVER_SOFTWARE):
logging.warning(
'Using the Google API driver is not recommended when running on '
'production App Engine. You should instead use the GAE API Proxy '
'driver (google.storage.speckle.python.api.rdbms_apiproxy).')
import oauth2client.client
from google.storage.speckle.python.api import rdbms_googleapi
from google.storage.speckle.python.django.backend import oauth2storage
storage = kwargs.setdefault('oauth_storage', oauth2storage.storage)
credentials = storage.get()
if credentials is None or credentials.invalid:
if not oauth2_refresh_token:
oauth2_refresh_token = os.getenv('GOOGLE_SQL_OAUTH2_REFRESH_TOKEN')
if not oauth2_refresh_token:
raise exceptions.ImproperlyConfigured(
'No valid OAuth 2.0 credentials. Before using the Google SQL '
'Service backend on dev_appserver, you must first run "manage.py '
'syncdb" and proceed through the given instructions to fetch an '
'OAuth 2.0 token.')
credentials = oauth2client.client.OAuth2Credentials(
None, rdbms_googleapi.CLIENT_ID, rdbms_googleapi.CLIENT_SECRET,
oauth2_refresh_token, None,
'https://accounts.google.com/o/oauth2/token',
rdbms_googleapi.USER_AGENT)
credentials.set_store(storage)
storage.put(credentials)
return driver.connect(**kwargs)
class DatabaseOperations(base.DatabaseOperations):
"""DatabaseOperations for use with rdbms."""
def last_executed_query(self, cursor, sql, params):
"""Returns the query last executed by the given cursor.
Placeholders found in the given sql string will be replaced with actual
values from the params list.
Args:
cursor: The database Cursor.
sql: The raw query containing placeholders.
params: The sequence of parameters.
Returns:
The string representing the query last executed by the cursor.
"""
return backends.BaseDatabaseOperations.last_executed_query(
self, cursor, sql, params)
class DatabaseWrapper(base.DatabaseWrapper):
"""Django DatabaseWrapper for use with rdbms.
Overrides many pieces of the MySQL DatabaseWrapper for compatibility with
the rdbms API.
"""
vendor = 'rdbms'
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.client = client.DatabaseClient(self)
try:
self.ops = DatabaseOperations()
except TypeError:
self.ops = DatabaseOperations(self)
def _cursor(self):
if not self._valid_connection():
kwargs = {'conv': base.django_conversions, 'dsn': None}
settings_dict = self.settings_dict
settings_dict.update(settings_dict.get('OPTIONS', {}))
for settings_key, kwarg, required in _SETTINGS_CONNECT_ARGS:
value = settings_dict.get(settings_key)
if value:
kwargs[kwarg] = value
elif required:
raise exceptions.ImproperlyConfigured(
"You must specify a '%s' for database '%s'" %
(settings_key, self.alias))
self.connection = Connect(**kwargs)
encoders = {safestring.SafeUnicode: self.connection.encoders[unicode],
safestring.SafeString: self.connection.encoders[str]}
self.connection.encoders.update(encoders)
signals.connection_created.send(sender=self.__class__, connection=self)
cursor = base.CursorWrapper(self.connection.cursor())
return cursor
| mit |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/webdriver/pylib/selenium/webdriver/ie/webdriver.py | 10 | 2039 | #!/usr/bin/python
#
# Copyright 2008-2010 WebDriver committers
# Copyright 2008-2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium.webdriver.common import utils
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.command import Command
from selenium.common.exceptions import WebDriverException
import base64
from service import Service
DEFAULT_TIMEOUT = 30
DEFAULT_PORT = 0
DEFAULT_HOST = None
DEFAULT_LOG_LEVEL = None
DEFAULT_LOG_FILE = None
class WebDriver(RemoteWebDriver):
def __init__(self, executable_path='IEDriverServer.exe',
port=DEFAULT_PORT, timeout=DEFAULT_TIMEOUT, host=DEFAULT_HOST,
log_level=DEFAULT_LOG_LEVEL, log_file=DEFAULT_LOG_FILE):
self.port = port
if self.port == 0:
self.port = utils.free_port()
self.host = host
self.log_level = log_level
self.log_file = log_file
self.iedriver = Service(executable_path, port=self.port,
host=self.host, log_level=self.log_level, log_file=self.log_file)
self.iedriver.start()
RemoteWebDriver.__init__(
self,
command_executor='http://localhost:%d' % self.port,
desired_capabilities=DesiredCapabilities.INTERNETEXPLORER)
def quit(self):
RemoteWebDriver.quit(self)
self.iedriver.stop()
| mit |
aral/isvat | django/conf/locale/fr/formats.py | 107 | 1315 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j N Y'
SHORT_DATETIME_FORMAT = 'j N Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%d.%m.%Y', '%d.%m.%y', # Swiss (fr_CH), '25.10.2006', '25.10.06'
# '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d.%m.%Y %H:%M:%S', # Swiss (fr_CH), '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # Swiss (fr_CH), '25.10.2006 14:30'
'%d.%m.%Y', # Swiss (fr_CH), '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| mit |
ivandevp/django | django/contrib/auth/forms.py | 100 | 14241 | from __future__ import unicode_literals
from django import forms
from django.contrib.auth import (
authenticate, get_user_model, password_validation,
)
from django.contrib.auth.hashers import (
UNUSABLE_PASSWORD_PREFIX, identify_hasher,
)
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMultiAlternatives
from django.forms.utils import flatatt
from django.template import loader
from django.utils.encoding import force_bytes
from django.utils.html import format_html, format_html_join
from django.utils.http import urlsafe_base64_encode
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext, ugettext_lazy as _
class ReadOnlyPasswordHashWidget(forms.Widget):
def render(self, name, value, attrs):
encoded = value
final_attrs = self.build_attrs(attrs)
if not encoded or encoded.startswith(UNUSABLE_PASSWORD_PREFIX):
summary = mark_safe("<strong>%s</strong>" % ugettext("No password set."))
else:
try:
hasher = identify_hasher(encoded)
except ValueError:
summary = mark_safe("<strong>%s</strong>" % ugettext(
"Invalid password format or unknown hashing algorithm."))
else:
summary = format_html_join('',
"<strong>{}</strong>: {} ",
((ugettext(key), value)
for key, value in hasher.safe_summary(encoded).items())
)
return format_html("<div{}>{}</div>", flatatt(final_attrs), summary)
class ReadOnlyPasswordHashField(forms.Field):
widget = ReadOnlyPasswordHashWidget
def __init__(self, *args, **kwargs):
kwargs.setdefault("required", False)
super(ReadOnlyPasswordHashField, self).__init__(*args, **kwargs)
def bound_data(self, data, initial):
# Always return initial because the widget doesn't
# render an input field.
return initial
def has_changed(self, initial, data):
return False
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as before, for verification."))
class Meta:
model = User
fields = ("username",)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
self.instance.username = self.cleaned_data.get('username')
password_validation.validate_password(self.cleaned_data.get('password2'), self.instance)
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField(label=_("Password"),
help_text=_("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"../password/\">this form</a>."))
class Meta:
model = User
fields = '__all__'
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions')
if f is not None:
f.queryset = f.queryset.select_related('content_type')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField(max_length=254)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct %(username)s and password. "
"Note that both fields may be case-sensitive."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
The 'request' parameter is set for custom auth use by subclasses.
The form data comes in via the standard 'data' kwarg.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
if self.fields['username'].label is None:
self.fields['username'].label = capfirst(self.username_field.verbose_name)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
else:
self.confirm_login_allowed(self.user_cache)
return self.cleaned_data
def confirm_login_allowed(self, user):
"""
Controls whether the given User may log in. This is a policy setting,
independent of end-user authentication. This default behavior is to
allow login by active users, and reject login by inactive users.
If the given user cannot log in, this method should raise a
``forms.ValidationError``.
If the given user may log in, this method should return None.
"""
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordResetForm(forms.Form):
email = forms.EmailField(label=_("Email"), max_length=254)
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
"""
Sends a django.core.mail.EmailMultiAlternatives to `to_email`.
"""
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = loader.render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name is not None:
html_email = loader.render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, 'text/html')
email_message.send()
def get_users(self, email):
"""Given an email, return matching user(s) who should receive a reset.
This allows subclasses to more easily customize the default policies
that prevent inactive users and users with unusable passwords from
resetting their password.
"""
active_users = get_user_model()._default_manager.filter(
email__iexact=email, is_active=True)
return (u for u in active_users if u.has_usable_password())
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None, html_email_template_name=None):
"""
Generates a one-use only link for resetting password and sends to the
user.
"""
email = self.cleaned_data["email"]
for user in self.get_users(email):
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
context = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'user': user,
'token': token_generator.make_token(user),
'protocol': 'https' if use_https else 'http',
}
self.send_mail(subject_template_name, email_template_name,
context, from_email, user.email,
html_email_template_name=html_email_template_name)
class SetPasswordForm(forms.Form):
"""
A form that lets a user change set their password without entering the old
password
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
new_password1 = forms.CharField(label=_("New password"),
widget=forms.PasswordInput,
help_text=password_validation.password_validators_help_text_html())
new_password2 = forms.CharField(label=_("New password confirmation"),
widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(SetPasswordForm, self).__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
password_validation.validate_password(password2, self.user)
return password2
def save(self, commit=True):
password = self.cleaned_data["new_password1"]
self.user.set_password(password)
if commit:
self.user.save()
return self.user
class PasswordChangeForm(SetPasswordForm):
"""
A form that lets a user change their password by entering their old
password.
"""
error_messages = dict(SetPasswordForm.error_messages, **{
'password_incorrect': _("Your old password was entered incorrectly. "
"Please enter it again."),
})
old_password = forms.CharField(label=_("Old password"),
widget=forms.PasswordInput)
field_order = ['old_password', 'new_password1', 'new_password2']
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'],
code='password_incorrect',
)
return old_password
class AdminPasswordChangeForm(forms.Form):
"""
A form used to change the password of a user in the admin interface.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
required_css_class = 'required'
password1 = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput,
help_text=password_validation.password_validators_help_text_html(),
)
password2 = forms.CharField(
label=_("Password (again)"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as before, for verification."),
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AdminPasswordChangeForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
password_validation.validate_password(password2, self.user)
return password2
def save(self, commit=True):
"""
Saves the new password.
"""
password = self.cleaned_data["password1"]
self.user.set_password(password)
if commit:
self.user.save()
return self.user
def _get_changed_data(self):
data = super(AdminPasswordChangeForm, self).changed_data
for name in self.fields.keys():
if name not in data:
return []
return ['password']
changed_data = property(_get_changed_data)
| bsd-3-clause |
js0701/chromium-crosswalk | tools/coverity/coverity.py | 179 | 11670 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Runs Coverity Prevent on a build of Chromium.
This script should be run in a Visual Studio Command Prompt, so that the
INCLUDE, LIB, and PATH environment variables are set properly for Visual
Studio.
Usage examples:
coverity.py
coverity.py --dry-run
coverity.py --target=debug
%comspec% /c ""C:\Program Files\Microsoft Visual Studio 8\VC\vcvarsall.bat"
x86 && C:\Python24\python.exe C:\coverity.py"
For a full list of options, pass the '--help' switch.
See http://support.microsoft.com/kb/308569 for running this script as a
Scheduled Task on Windows XP.
"""
import optparse
import os
import os.path
import shutil
import subprocess
import sys
import time
# These constants provide default values, but are exposed as command-line
# flags. See the --help for more info. Note that for historical reasons
# (the script started out as Windows-only and has legacy usages which pre-date
# these switches), the constants are all tuned for Windows.
# Usage of this script on Linux pretty much requires explicit
# --source-dir, --coverity-bin-dir, --coverity-intermediate-dir, and
# --coverity-target command line flags.
CHROMIUM_SOURCE_DIR = 'C:\\chromium.latest'
# Relative to CHROMIUM_SOURCE_DIR.
CHROMIUM_SOLUTION_FILE = 'src\\chrome\\chrome.sln'
# Relative to CHROMIUM_SOURCE_DIR.
CHROMIUM_SOLUTION_DIR = 'src\\chrome'
COVERITY_BIN_DIR = 'C:\\coverity\\prevent-win32-4.5.1\\bin'
COVERITY_INTERMEDIATE_DIR = 'C:\\coverity\\cvbuild\\cr_int'
COVERITY_ANALYZE_OPTIONS = ('--cxx --security --concurrency '
'--enable ATOMICITY '
'--enable MISSING_LOCK '
'--enable DELETE_VOID '
'--checker-option PASS_BY_VALUE:size_threshold:16 '
'--checker-option '
'USE_AFTER_FREE:allow_simple_use:false '
'--enable-constraint-fpp '
'--enable-callgraph-metrics')
# Might need to be changed to FQDN
COVERITY_REMOTE = 'chromecoverity-linux1'
COVERITY_PORT = '5467'
COVERITY_PRODUCT = 'Chromium'
COVERITY_TARGET = 'Windows'
COVERITY_USER = 'admin'
# looking for a PASSWORD constant? Look at --coverity-password-file instead.
# Relative to CHROMIUM_SOURCE_DIR. Contains the pid of this script.
LOCK_FILE = 'coverity.lock'
def _ReadPassword(pwfilename):
"""Reads the coverity password in from a file where it was stashed"""
pwfile = open(pwfilename, 'r')
password = pwfile.readline()
pwfile.close()
return password.rstrip()
def _RunCommand(cmd, dry_run, shell=False, echo_cmd=True):
"""Runs the command if dry_run is false, otherwise just prints the command."""
if echo_cmd:
print cmd
if not dry_run:
return subprocess.call(cmd, shell=shell)
else:
return 0
def _ReleaseLock(lock_file, lock_filename):
"""Removes the lockfile. Function-ized so we can bail from anywhere"""
os.close(lock_file)
os.remove(lock_filename)
def run_coverity(options, args):
"""Runs all the selected tests for the given build type and target."""
# Create the lock file to prevent another instance of this script from
# running.
lock_filename = os.path.join(options.source_dir, LOCK_FILE)
try:
lock_file = os.open(lock_filename,
os.O_CREAT | os.O_EXCL | os.O_TRUNC | os.O_RDWR)
except OSError, err:
print 'Failed to open lock file:\n ' + str(err)
return 1
# Write the pid of this script (the python.exe process) to the lock file.
os.write(lock_file, str(os.getpid()))
options.target = options.target.title()
start_time = time.time()
print 'Change directory to ' + options.source_dir
os.chdir(options.source_dir)
# The coverity-password filename may have been a relative path.
# If so, assume it's relative to the source directory, which means
# the time to read the password is after we do the chdir().
coverity_password = _ReadPassword(options.coverity_password_file)
cmd = 'gclient sync'
gclient_exit = _RunCommand(cmd, options.dry_run, shell=True)
if gclient_exit != 0:
print 'gclient aborted with status %s' % gclient_exit
_ReleaseLock(lock_file, lock_filename)
return 1
print 'Elapsed time: %ds' % (time.time() - start_time)
# Do a clean build. Remove the build output directory first.
if sys.platform.startswith('linux'):
rm_path = os.path.join(options.source_dir,'src','out',options.target)
elif sys.platform == 'win32':
rm_path = os.path.join(options.source_dir,options.solution_dir,
options.target)
elif sys.platform == 'darwin':
rm_path = os.path.join(options.source_dir,'src','xcodebuild')
else:
print 'Platform "%s" unrecognized, aborting' % sys.platform
_ReleaseLock(lock_file, lock_filename)
return 1
if options.dry_run:
print 'shutil.rmtree(%s)' % repr(rm_path)
else:
shutil.rmtree(rm_path,True)
if options.preserve_intermediate_dir:
print 'Preserving intermediate directory.'
else:
if options.dry_run:
print 'shutil.rmtree(%s)' % repr(options.coverity_intermediate_dir)
print 'os.mkdir(%s)' % repr(options.coverity_intermediate_dir)
else:
shutil.rmtree(options.coverity_intermediate_dir,True)
os.mkdir(options.coverity_intermediate_dir)
print 'Elapsed time: %ds' % (time.time() - start_time)
use_shell_during_make = False
if sys.platform.startswith('linux'):
use_shell_during_make = True
os.chdir('src')
_RunCommand('pwd', options.dry_run, shell=True)
cmd = '%s/cov-build --dir %s make BUILDTYPE=%s chrome' % (
options.coverity_bin_dir, options.coverity_intermediate_dir,
options.target)
elif sys.platform == 'win32':
cmd = ('%s\\cov-build.exe --dir %s devenv.com %s\\%s /build %s '
'/project chrome.vcproj') % (
options.coverity_bin_dir, options.coverity_intermediate_dir,
options.source_dir, options.solution_file, options.target)
elif sys.platform == 'darwin':
use_shell_during_make = True
os.chdir('src/chrome')
_RunCommand('pwd', options.dry_run, shell=True)
cmd = ('%s/cov-build --dir %s xcodebuild -project chrome.xcodeproj '
'-configuration %s -target chrome') % (
options.coverity_bin_dir, options.coverity_intermediate_dir,
options.target)
_RunCommand(cmd, options.dry_run, shell=use_shell_during_make)
print 'Elapsed time: %ds' % (time.time() - start_time)
cov_analyze_exe = os.path.join(options.coverity_bin_dir,'cov-analyze')
cmd = '%s --dir %s %s' % (cov_analyze_exe,
options.coverity_intermediate_dir,
options.coverity_analyze_options)
_RunCommand(cmd, options.dry_run, shell=use_shell_during_make)
print 'Elapsed time: %ds' % (time.time() - start_time)
cov_commit_exe = os.path.join(options.coverity_bin_dir,'cov-commit-defects')
# On Linux we have started using a Target with a space in it, so we want
# to quote it. On the other hand, Windows quoting doesn't work quite the
# same way. To be conservative, I'd like to avoid quoting an argument
# that doesn't need quoting and which we haven't historically been quoting
# on that platform. So, only quote the target if we have to.
coverity_target = options.coverity_target
if sys.platform != 'win32':
coverity_target = '"%s"' % coverity_target
cmd = ('%s --dir %s --remote %s --port %s '
'--product %s '
'--target %s '
'--user %s '
'--password %s') % (cov_commit_exe,
options.coverity_intermediate_dir,
options.coverity_dbhost,
options.coverity_port,
options.coverity_product,
coverity_target,
options.coverity_user,
coverity_password)
# Avoid echoing the Commit command because it has a password in it
_RunCommand(cmd, options.dry_run, shell=use_shell_during_make, echo_cmd=False)
print 'Total time: %ds' % (time.time() - start_time)
_ReleaseLock(lock_file, lock_filename)
return 0
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('', '--dry-run', action='store_true', default=False,
help='print but don\'t run the commands')
option_parser.add_option('', '--target', default='Release',
help='build target (Debug or Release)')
option_parser.add_option('', '--source-dir', dest='source_dir',
help='full path to directory ABOVE "src"',
default=CHROMIUM_SOURCE_DIR)
option_parser.add_option('', '--solution-file', dest='solution_file',
default=CHROMIUM_SOLUTION_FILE)
option_parser.add_option('', '--solution-dir', dest='solution_dir',
default=CHROMIUM_SOLUTION_DIR)
option_parser.add_option('', '--coverity-bin-dir', dest='coverity_bin_dir',
default=COVERITY_BIN_DIR)
option_parser.add_option('', '--coverity-intermediate-dir',
dest='coverity_intermediate_dir',
default=COVERITY_INTERMEDIATE_DIR)
option_parser.add_option('', '--coverity-analyze-options',
dest='coverity_analyze_options',
help=('all cov-analyze options, e.g. "%s"'
% COVERITY_ANALYZE_OPTIONS),
default=COVERITY_ANALYZE_OPTIONS)
option_parser.add_option('', '--coverity-db-host',
dest='coverity_dbhost',
help=('coverity defect db server hostname, e.g. %s'
% COVERITY_REMOTE),
default=COVERITY_REMOTE)
option_parser.add_option('', '--coverity-db-port', dest='coverity_port',
help=('port # of coverity web/db server, e.g. %s'
% COVERITY_PORT),
default=COVERITY_PORT)
option_parser.add_option('', '--coverity-product', dest='coverity_product',
help=('Product name reported to coverity, e.g. %s'
% COVERITY_PRODUCT),
default=COVERITY_PRODUCT)
option_parser.add_option('', '--coverity-target', dest='coverity_target',
help='Platform Target reported to coverity',
default=COVERITY_TARGET)
option_parser.add_option('', '--coverity-user', dest='coverity_user',
help='Username used to log into coverity',
default=COVERITY_USER)
option_parser.add_option('', '--coverity-password-file',
dest='coverity_password_file',
help='file containing the coverity password',
default='coverity-password')
helpmsg = ('By default, the intermediate dir is emptied before analysis. '
'This switch disables that behavior.')
option_parser.add_option('', '--preserve-intermediate-dir',
action='store_true', help=helpmsg,
default=False)
options, args = option_parser.parse_args()
return run_coverity(options, args)
if '__main__' == __name__:
sys.exit(main())
| bsd-3-clause |
RackSec/ansible | lib/ansible/parsing/quoting.py | 241 | 1141 | # (c) 2014 James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
def is_quoted(data):
return len(data) > 1 and data[0] == data[-1] and data[0] in ('"', "'") and data[-2] != '\\'
def unquote(data):
''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
if is_quoted(data):
return data[1:-1]
return data
| gpl-3.0 |
Samuel789/MediPi | MedManagementWeb/env/lib/python3.5/site-packages/django/contrib/gis/geoip/base.py | 334 | 11859 | import os
import re
import warnings
from ctypes import c_char_p
from django.contrib.gis.geoip.libgeoip import GEOIP_SETTINGS
from django.contrib.gis.geoip.prototypes import (
GeoIP_country_code_by_addr, GeoIP_country_code_by_name,
GeoIP_country_name_by_addr, GeoIP_country_name_by_name,
GeoIP_database_info, GeoIP_delete, GeoIP_lib_version, GeoIP_open,
GeoIP_record_by_addr, GeoIP_record_by_name,
)
from django.core.validators import ipv4_re
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_bytes, force_text
# Regular expressions for recognizing the GeoIP free database editions.
free_regex = re.compile(r'^GEO-\d{3}FREE')
lite_regex = re.compile(r'^GEO-\d{3}LITE')
class GeoIPException(Exception):
pass
class GeoIP(object):
# The flags for GeoIP memory caching.
# GEOIP_STANDARD - read database from filesystem, uses least memory.
#
# GEOIP_MEMORY_CACHE - load database into memory, faster performance
# but uses more memory
#
# GEOIP_CHECK_CACHE - check for updated database. If database has been
# updated, reload filehandle and/or memory cache. This option
# is not thread safe.
#
# GEOIP_INDEX_CACHE - just cache the most frequently accessed index
# portion of the database, resulting in faster lookups than
# GEOIP_STANDARD, but less memory usage than GEOIP_MEMORY_CACHE -
# useful for larger databases such as GeoIP Organization and
# GeoIP City. Note, for GeoIP Country, Region and Netspeed
# databases, GEOIP_INDEX_CACHE is equivalent to GEOIP_MEMORY_CACHE
#
# GEOIP_MMAP_CACHE - load database into mmap shared memory ( not available
# on Windows).
GEOIP_STANDARD = 0
GEOIP_MEMORY_CACHE = 1
GEOIP_CHECK_CACHE = 2
GEOIP_INDEX_CACHE = 4
GEOIP_MMAP_CACHE = 8
cache_options = {opt: None for opt in (0, 1, 2, 4, 8)}
# Paths to the city & country binary databases.
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initializes the GeoIP object, no parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP data sets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.dat) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH settings attribute.
* cache: The cache settings when opening up the GeoIP datasets,
and may be an integer in (0, 1, 2, 4, 8) corresponding to
the GEOIP_STANDARD, GEOIP_MEMORY_CACHE, GEOIP_CHECK_CACHE,
GEOIP_INDEX_CACHE, and GEOIP_MMAP_CACHE, `GeoIPOptions` C API
settings, respectively. Defaults to 0, meaning that the data is read
from the disk.
* country: The name of the GeoIP country data file. Defaults to
'GeoIP.dat'; overrides the GEOIP_COUNTRY settings attribute.
* city: The name of the GeoIP city data file. Defaults to
'GeoLiteCity.dat'; overrides the GEOIP_CITY settings attribute.
"""
warnings.warn(
"django.contrib.gis.geoip is deprecated in favor of "
"django.contrib.gis.geoip2 and the MaxMind GeoLite2 database "
"format.", RemovedInDjango20Warning, 2
)
# Checking the given cache option.
if cache in self.cache_options:
self._cache = cache
else:
raise GeoIPException('Invalid GeoIP caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS.get('GEOIP_PATH')
if not path:
raise GeoIPException('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, six.string_types):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try and open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS.get('GEOIP_COUNTRY', 'GeoIP.dat'))
if os.path.isfile(country_db):
self._country = GeoIP_open(force_bytes(country_db), cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS.get('GEOIP_CITY', 'GeoLiteCity.dat'))
if os.path.isfile(city_db):
self._city = GeoIP_open(force_bytes(city_db), cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure
# out whether the given database path is for the GeoIP country
# or city databases.
ptr = GeoIP_open(force_bytes(path), cache)
info = GeoIP_database_info(ptr)
if lite_regex.match(info):
# GeoLite City database detected.
self._city = ptr
self._city_file = path
elif free_regex.match(info):
# GeoIP Country database detected.
self._country = ptr
self._country_file = path
else:
raise GeoIPException('Unable to recognize database edition: %s' % info)
else:
raise GeoIPException('GeoIP path must be a valid file or directory.')
def __del__(self):
# Cleaning any GeoIP file handles lying around.
if GeoIP_delete is None:
return
if self._country:
GeoIP_delete(self._country)
if self._city:
GeoIP_delete(self._city)
def __repr__(self):
version = ''
if GeoIP_lib_version is not None:
version += ' [v%s]' % force_text(GeoIP_lib_version())
return '<%(cls)s%(version)s _country_file="%(country)s", _city_file="%(city)s">' % {
'cls': self.__class__.__name__,
'version': version,
'country': self._country_file,
'city': self._city_file,
}
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Helper routine for checking the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, six.string_types):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIPException('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIPException('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIPException('Invalid GeoIP city data file: %s' % self._city_file)
# Return the query string back to the caller. GeoIP only takes bytestrings.
return force_bytes(query)
def city(self, query):
"""
Returns a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
enc_query = self._check_query(query, city=True)
if ipv4_re.match(query):
# If an IP address was passed in
return GeoIP_record_by_addr(self._city, c_char_p(enc_query))
else:
# If a FQDN was passed in.
return GeoIP_record_by_name(self._city, c_char_p(enc_query))
def country_code(self, query):
"Returns the country code for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
if self._country:
if ipv4_re.match(query):
return GeoIP_country_code_by_addr(self._country, enc_query)
else:
return GeoIP_country_code_by_name(self._country, enc_query)
else:
return self.city(query)['country_code']
def country_name(self, query):
"Returns the country name for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
if self._country:
if ipv4_re.match(query):
return GeoIP_country_name_by_addr(self._country, enc_query)
else:
return GeoIP_country_name_by_name(self._country, enc_query)
else:
return self.city(query)['country_name']
def country(self, query):
"""
Returns a dictionary with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
return {'country_code': self.country_code(query),
'country_name': self.country_name(query),
}
# #### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None:
return None
else:
return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Returns a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Returns a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Returns a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
# #### GeoIP Database Information Routines ####
@property
def country_info(self):
"Returns information about the GeoIP country database."
if self._country is None:
ci = 'No GeoIP Country data in "%s"' % self._country_file
else:
ci = GeoIP_database_info(self._country)
return ci
@property
def city_info(self):
"Returns information about the GeoIP city database."
if self._city is None:
ci = 'No GeoIP City data in "%s"' % self._city_file
else:
ci = GeoIP_database_info(self._city)
return ci
@property
def info(self):
"Returns information about the GeoIP library and databases in use."
info = ''
if GeoIP_lib_version:
info += 'GeoIP Library:\n\t%s\n' % GeoIP_lib_version()
return info + 'Country:\n\t%s\nCity:\n\t%s' % (self.country_info, self.city_info)
# #### Methods for compatibility w/the GeoIP-Python API. ####
@classmethod
def open(cls, full_path, cache):
return GeoIP(full_path, cache)
def _rec_by_arg(self, arg):
if self._city:
return self.city(arg)
else:
return self.country(arg)
region_by_addr = city
region_by_name = city
record_by_addr = _rec_by_arg
record_by_name = _rec_by_arg
country_code_by_addr = country_code
country_code_by_name = country_code
country_name_by_addr = country_name
country_name_by_name = country_name
| apache-2.0 |
alexus37/AugmentedRealityChess | pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/raw/GLES2/EXT/disjoint_timer_query.py | 8 | 1931 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_EXT_disjoint_timer_query'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_EXT_disjoint_timer_query',error_checker=_errors._error_checker)
GL_CURRENT_QUERY_EXT=_C('GL_CURRENT_QUERY_EXT',0x8865)
GL_GPU_DISJOINT_EXT=_C('GL_GPU_DISJOINT_EXT',0x8FBB)
GL_QUERY_COUNTER_BITS_EXT=_C('GL_QUERY_COUNTER_BITS_EXT',0x8864)
GL_QUERY_RESULT_AVAILABLE_EXT=_C('GL_QUERY_RESULT_AVAILABLE_EXT',0x8867)
GL_QUERY_RESULT_EXT=_C('GL_QUERY_RESULT_EXT',0x8866)
GL_TIMESTAMP_EXT=_C('GL_TIMESTAMP_EXT',0x8E28)
GL_TIME_ELAPSED_EXT=_C('GL_TIME_ELAPSED_EXT',0x88BF)
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint)
def glBeginQueryEXT(target,id):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glDeleteQueriesEXT(n,ids):pass
@_f
@_p.types(None,_cs.GLenum)
def glEndQueryEXT(target):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glGenQueriesEXT(n,ids):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLint64Array)
def glGetQueryObjecti64vEXT(id,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetQueryObjectivEXT(id,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLuint64Array)
def glGetQueryObjectui64vEXT(id,pname,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLuintArray)
def glGetQueryObjectuivEXT(id,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
def glGetQueryivEXT(target,pname,params):pass
@_f
@_p.types(_cs.GLboolean,_cs.GLuint)
def glIsQueryEXT(id):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum)
def glQueryCounterEXT(id,target):pass
| mit |
ryfeus/lambda-packs | Spacy/source2.7/spacy/lang/hi/stop_words.py | 3 | 2877 | # coding: utf8
from __future__ import unicode_literals
# Source: https://github.com/taranjeet/hindi-tokenizer/blob/master/stopwords.txt
STOP_WORDS = set("""
अंदर
अत
अदि
अप
अपना
अपनि
अपनी
अपने
अभि
अभी
अंदर
आदि
आप
इंहिं
इंहें
इंहों
इतयादि
इत्यादि
इन
इनका
इन्हीं
इन्हें
इन्हों
इस
इसका
इसकि
इसकी
इसके
इसमें
इसि
इसी
इसे
उंहिं
उंहें
उंहों
उन
उनका
उनकि
उनकी
उनके
उनको
उन्हीं
उन्हें
उन्हों
उस
उसके
उसि
उसी
उसे
एक
एवं
एस
एसे
ऐसे
ओर
और
कइ
कई
कर
करता
करते
करना
करने
करें
कहते
कहा
का
काफि
काफ़ी
कि
किंहें
किंहों
कितना
किन्हें
किन्हों
किया
किर
किस
किसि
किसी
किसे
की
कुछ
कुल
के
को
कोइ
कोई
कोन
कोनसा
कौन
कौनसा
गया
घर
जब
जहाँ
जहां
जा
जिंहें
जिंहों
जितना
जिधर
जिन
जिन्हें
जिन्हों
जिस
जिसे
जीधर
जेसा
जेसे
जैसा
जैसे
जो
तक
तब
तरह
तिंहें
तिंहों
तिन
तिन्हें
तिन्हों
तिस
तिसे
तो
था
थि
थी
थे
दबारा
दवारा
दिया
दुसरा
दुसरे
दूसरे
दो
द्वारा
न
नहिं
नहीं
ना
निचे
निहायत
नीचे
ने
पर
पहले
पुरा
पूरा
पे
फिर
बनि
बनी
बहि
बही
बहुत
बाद
बाला
बिलकुल
भि
भितर
भी
भीतर
मगर
मानो
मे
में
यदि
यह
यहाँ
यहां
यहि
यही
या
यिह
ये
रखें
रवासा
रहा
रहे
ऱ्वासा
लिए
लिये
लेकिन
व
वगेरह
वग़ैरह
वरग
वर्ग
वह
वहाँ
वहां
वहिं
वहीं
वाले
वुह
वे
वग़ैरह
संग
सकता
सकते
सबसे
सभि
सभी
साथ
साबुत
साभ
सारा
से
सो
संग
हि
ही
हुअ
हुआ
हुइ
हुई
हुए
हे
हें
है
हैं
हो
होता
होति
होती
होते
होना
होने
""".split())
| mit |
GoogleCloudPlatform/mlops-on-gcp | on_demand/kfp-caip-sklearn/lab-03-kfp-cicd/pipeline/covertype_training_pipeline.py | 6 | 7511 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KFP pipeline orchestrating BigQuery and Cloud AI Platform services."""
import os
from helper_components import evaluate_model
from helper_components import retrieve_best_run
from jinja2 import Template
import kfp
from kfp.components import func_to_container_op
from kfp.dsl.types import Dict
from kfp.dsl.types import GCPProjectID
from kfp.dsl.types import GCPRegion
from kfp.dsl.types import GCSPath
from kfp.dsl.types import String
from kfp.gcp import use_gcp_secret
# Defaults and environment settings
BASE_IMAGE = os.getenv('BASE_IMAGE')
TRAINER_IMAGE = os.getenv('TRAINER_IMAGE')
RUNTIME_VERSION = os.getenv('RUNTIME_VERSION')
PYTHON_VERSION = os.getenv('PYTHON_VERSION')
COMPONENT_URL_SEARCH_PREFIX = os.getenv('COMPONENT_URL_SEARCH_PREFIX')
USE_KFP_SA = os.getenv('USE_KFP_SA')
TRAINING_FILE_PATH = 'datasets/training/data.csv'
VALIDATION_FILE_PATH = 'datasets/validation/data.csv'
TESTING_FILE_PATH = 'datasets/testing/data.csv'
# Parameter defaults
SPLITS_DATASET_ID = 'splits'
HYPERTUNE_SETTINGS = """
{
"hyperparameters": {
"goal": "MAXIMIZE",
"maxTrials": 6,
"maxParallelTrials": 3,
"hyperparameterMetricTag": "accuracy",
"enableTrialEarlyStopping": True,
"params": [
{
"parameterName": "max_iter",
"type": "DISCRETE",
"discreteValues": [500, 1000]
},
{
"parameterName": "alpha",
"type": "DOUBLE",
"minValue": 0.0001,
"maxValue": 0.001,
"scaleType": "UNIT_LINEAR_SCALE"
}
]
}
}
"""
# Helper functions
def generate_sampling_query(source_table_name, num_lots, lots):
"""Prepares the data sampling query."""
sampling_query_template = """
SELECT *
FROM
`{{ source_table }}` AS cover
WHERE
MOD(ABS(FARM_FINGERPRINT(TO_JSON_STRING(cover))), {{ num_lots }}) IN ({{ lots }})
"""
query = Template(sampling_query_template).render(
source_table=source_table_name, num_lots=num_lots, lots=str(lots)[1:-1])
return query
# Create component factories
component_store = kfp.components.ComponentStore(
local_search_paths=None, url_search_prefixes=[COMPONENT_URL_SEARCH_PREFIX])
bigquery_query_op = component_store.load_component('bigquery/query')
mlengine_train_op = component_store.load_component('ml_engine/train')
mlengine_deploy_op = component_store.load_component('ml_engine/deploy')
retrieve_best_run_op = func_to_container_op(
retrieve_best_run, base_image=BASE_IMAGE)
evaluate_model_op = func_to_container_op(evaluate_model, base_image=BASE_IMAGE)
@kfp.dsl.pipeline(
name='Covertype Classifier Training',
description='The pipeline training and deploying the Covertype classifierpipeline_yaml'
)
def covertype_train(project_id,
region,
source_table_name,
gcs_root,
dataset_id,
evaluation_metric_name,
evaluation_metric_threshold,
model_id,
version_id,
replace_existing_version,
hypertune_settings=HYPERTUNE_SETTINGS,
dataset_location='US'):
"""Orchestrates training and deployment of an sklearn model."""
# Create the training split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[1, 2, 3, 4])
training_file_path = '{}/{}'.format(gcs_root, TRAINING_FILE_PATH)
create_training_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=training_file_path,
dataset_location=dataset_location)
# Create the validation split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[8])
validation_file_path = '{}/{}'.format(gcs_root, VALIDATION_FILE_PATH)
create_validation_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=validation_file_path,
dataset_location=dataset_location)
# Create the testing split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[9])
testing_file_path = '{}/{}'.format(gcs_root, TESTING_FILE_PATH)
create_testing_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=testing_file_path,
dataset_location=dataset_location)
# Tune hyperparameters
tune_args = [
'--training_dataset_path',
create_training_split.outputs['output_gcs_path'],
'--validation_dataset_path',
create_validation_split.outputs['output_gcs_path'], '--hptune', 'True'
]
job_dir = '{}/{}/{}'.format(gcs_root, 'jobdir/hypertune',
kfp.dsl.RUN_ID_PLACEHOLDER)
hypertune = mlengine_train_op(
project_id=project_id,
region=region,
master_image_uri=TRAINER_IMAGE,
job_dir=job_dir,
args=tune_args,
training_input=hypertune_settings)
# Retrieve the best trial
get_best_trial = retrieve_best_run_op(project_id, hypertune.outputs['job_id'])
# Train the model on a combined training and validation datasets
job_dir = '{}/{}/{}'.format(gcs_root, 'jobdir', kfp.dsl.RUN_ID_PLACEHOLDER)
train_args = [
'--training_dataset_path',
create_training_split.outputs['output_gcs_path'],
'--validation_dataset_path',
create_validation_split.outputs['output_gcs_path'], '--alpha',
get_best_trial.outputs['alpha'], '--max_iter',
get_best_trial.outputs['max_iter'], '--hptune', 'False'
]
train_model = mlengine_train_op(
project_id=project_id,
region=region,
master_image_uri=TRAINER_IMAGE,
job_dir=job_dir,
args=train_args)
# Evaluate the model on the testing split
eval_model = evaluate_model_op(
dataset_path=str(create_testing_split.outputs['output_gcs_path']),
model_path=str(train_model.outputs['job_dir']),
metric_name=evaluation_metric_name)
# Deploy the model if the primary metric is better than threshold
with kfp.dsl.Condition(
eval_model.outputs['metric_value'] > evaluation_metric_threshold):
deploy_model = mlengine_deploy_op(
model_uri=train_model.outputs['job_dir'],
project_id=project_id,
model_id=model_id,
version_id=version_id,
runtime_version=RUNTIME_VERSION,
python_version=PYTHON_VERSION,
replace_existing_version=replace_existing_version)
# Configure the pipeline to run using the service account defined
# in the user-gcp-sa k8s secret
if USE_KFP_SA == 'True':
kfp.dsl.get_pipeline_conf().add_op_transformer(use_gcp_secret('user-gcp-sa'))
| apache-2.0 |
jocave/snapcraft | snapcraft/tests/fixture_setup.py | 1 | 8063 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015, 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import io
import threading
import urllib.parse
from unittest import mock
import fixtures
import xdg
from snapcraft.tests import fake_servers
class TempCWD(fixtures.TempDir):
def setUp(self):
"""Create a temporary directory an cd into it for the test duration."""
super().setUp()
current_dir = os.getcwd()
self.addCleanup(os.chdir, current_dir)
os.chdir(self.path)
class TempXDG(fixtures.Fixture):
"""Isolate a test from xdg so a private temp config is used."""
def __init__(self, path):
super().setUp()
self.path = path
def setUp(self):
super().setUp()
patcher = mock.patch(
'xdg.BaseDirectory.xdg_config_home',
new=os.path.join(self.path, '.config'))
patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch(
'xdg.BaseDirectory.xdg_data_home',
new=os.path.join(self.path, '.local'))
patcher.start()
self.addCleanup(patcher.stop)
patcher_dirs = mock.patch(
'xdg.BaseDirectory.xdg_config_dirs',
new=[xdg.BaseDirectory.xdg_config_home])
patcher_dirs.start()
self.addCleanup(patcher_dirs.stop)
patcher_dirs = mock.patch(
'xdg.BaseDirectory.xdg_data_dirs',
new=[xdg.BaseDirectory.xdg_data_home])
patcher_dirs.start()
self.addCleanup(patcher_dirs.stop)
class CleanEnvironment(fixtures.Fixture):
def setUp(self):
super().setUp()
current_environment = os.environ.copy()
os.environ = {}
self.addCleanup(os.environ.update, current_environment)
class _FakeStdout(io.StringIO):
"""A fake stdout using StringIO implementing the missing fileno attrib."""
def fileno(self):
return 1
class _FakeTerminalSize:
def __init__(self, columns=80):
self.columns = columns
class FakeTerminal(fixtures.Fixture):
def __init__(self, columns=80, isatty=True):
self.columns = columns
self.isatty = isatty
def _setUp(self):
patcher = mock.patch('shutil.get_terminal_size')
mock_terminal_size = patcher.start()
mock_terminal_size.return_value = _FakeTerminalSize(self.columns)
self.addCleanup(patcher.stop)
patcher = mock.patch('sys.stdout', new_callable=_FakeStdout)
self.mock_stdout = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch('os.isatty')
mock_isatty = patcher.start()
mock_isatty.return_value = self.isatty
self.addCleanup(patcher.stop)
def getvalue(self):
return self.mock_stdout.getvalue()
class FakePartsWiki(fixtures.Fixture):
def setUp(self):
super().setUp()
self.fake_parts_wiki_fixture = FakePartsWikiRunning()
self.useFixture(self.fake_parts_wiki_fixture)
self.useFixture(fixtures.EnvironmentVariable(
'no_proxy', 'localhost,127.0.0.1'))
class FakePartsWikiOrigin(fixtures.Fixture):
def setUp(self):
super().setUp()
self.fake_parts_wiki_origin_fixture = FakePartsWikiOriginRunning()
self.useFixture(self.fake_parts_wiki_origin_fixture)
self.useFixture(fixtures.EnvironmentVariable(
'no_proxy', 'localhost,127.0.0.1'))
class FakeParts(fixtures.Fixture):
def setUp(self):
super().setUp()
self.fake_parts_server_fixture = FakePartsServerRunning()
self.useFixture(self.fake_parts_server_fixture)
self.useFixture(fixtures.EnvironmentVariable(
'SNAPCRAFT_PARTS_URI',
urllib.parse.urljoin(
self.fake_parts_server_fixture.url, 'parts.yaml')))
self.useFixture(fixtures.EnvironmentVariable(
'no_proxy', 'localhost,127.0.0.1'))
class FakeStore(fixtures.Fixture):
def setUp(self):
super().setUp()
self.fake_sso_server_fixture = FakeSSOServerRunning()
self.useFixture(self.fake_sso_server_fixture)
self.useFixture(fixtures.EnvironmentVariable(
'UBUNTU_SSO_API_ROOT_URL',
urllib.parse.urljoin(
self.fake_sso_server_fixture.url, 'api/v2/')))
self.fake_store_upload_server_fixture = FakeStoreUploadServerRunning()
self.useFixture(self.fake_store_upload_server_fixture)
self.useFixture(fixtures.EnvironmentVariable(
'UBUNTU_STORE_UPLOAD_ROOT_URL',
self.fake_store_upload_server_fixture.url))
self.fake_store_api_server_fixture = FakeStoreAPIServerRunning()
self.useFixture(self.fake_store_api_server_fixture)
self.useFixture(fixtures.EnvironmentVariable(
'UBUNTU_STORE_API_ROOT_URL',
urllib.parse.urljoin(
self.fake_store_api_server_fixture.url, 'dev/api/')))
self.fake_store_search_server_fixture = FakeStoreSearchServerRunning()
self.useFixture(self.fake_store_search_server_fixture)
self.useFixture(fixtures.EnvironmentVariable(
'UBUNTU_STORE_SEARCH_ROOT_URL',
self.fake_store_search_server_fixture.url))
self.useFixture(fixtures.EnvironmentVariable(
'no_proxy', 'localhost,127.0.0.1'))
class _FakeServerRunning(fixtures.Fixture):
# To be defined by child fixtures.
fake_server = None
def setUp(self):
super().setUp()
self._start_fake_server()
def _start_fake_server(self):
server_address = ('', 0)
self.server = self.fake_server(server_address)
server_thread = threading.Thread(target=self.server.serve_forever)
server_thread.start()
self.addCleanup(self._stop_fake_server, server_thread)
self.url = 'http://localhost:{}/'.format(self.server.server_port)
def _stop_fake_server(self, thread):
self.server.shutdown()
self.server.socket.close()
thread.join()
class FakePartsWikiOriginRunning(_FakeServerRunning):
fake_server = fake_servers.FakePartsWikiOriginServer
class FakePartsWikiRunning(_FakeServerRunning):
fake_server = fake_servers.FakePartsWikiServer
class FakePartsServerRunning(_FakeServerRunning):
fake_server = fake_servers.FakePartsServer
class FakeSSOServerRunning(_FakeServerRunning):
fake_server = fake_servers.FakeSSOServer
class FakeStoreUploadServerRunning(_FakeServerRunning):
fake_server = fake_servers.FakeStoreUploadServer
class FakeStoreAPIServerRunning(_FakeServerRunning):
fake_server = fake_servers.FakeStoreAPIServer
class FakeStoreSearchServerRunning(_FakeServerRunning):
fake_server = fake_servers.FakeStoreSearchServer
class StagingStore(fixtures.Fixture):
def setUp(self):
super().setUp()
self.useFixture(fixtures.EnvironmentVariable(
'UBUNTU_STORE_API_ROOT_URL',
'https://myapps.developer.staging.ubuntu.com/dev/api/'))
self.useFixture(fixtures.EnvironmentVariable(
'UBUNTU_STORE_UPLOAD_ROOT_URL',
'https://upload.apps.staging.ubuntu.com/'))
self.useFixture(fixtures.EnvironmentVariable(
'UBUNTU_SSO_API_ROOT_URL',
'https://login.staging.ubuntu.com/api/v2/'))
self.useFixture(fixtures.EnvironmentVariable(
'UBUNTU_STORE_SEARCH_ROOT_URL',
'https://search.apps.staging.ubuntu.com/'))
| gpl-3.0 |
qsnake/gpaw | gpaw/test/diamond_absorption.py | 1 | 1342 | import numpy as np
import sys
import time
from ase.units import Bohr
from ase.structure import bulk
from gpaw import GPAW, FermiDirac
from gpaw.atom.basis import BasisMaker
from gpaw.response.df import DF
from gpaw.mpi import serial_comm, rank, size
from gpaw.utilities import devnull
if rank != 0:
sys.stdout = devnull
# GS Calculation One
a = 6.75 * Bohr
atoms = bulk('C', 'diamond', a=a)
calc = GPAW(h=0.2,
kpts=(4,4,4),
occupations=FermiDirac(0.001))
atoms.set_calculator(calc)
atoms.get_potential_energy()
calc.write('C.gpw','all')
# Macroscopic dielectric constant calculation
q = np.array([0.0, 0.00001, 0.])
w = np.linspace(0, 24., 241)
df = DF(calc='C.gpw', q=q, w=(0.,), eta=0.001,
ecut=50, hilbert_trans=False, optical_limit=True)
df1, df2 = df.get_dielectric_function()
eM1, eM2 = df.get_macroscopic_dielectric_constant(df1, df2)
eM1_ = 6.15185095143
eM2_ = 6.04815084635
if (np.abs(eM1 - eM1_) > 1e-5 or
np.abs(eM2 - eM2_) > 1e-5):
print eM1, eM2
raise ValueError('Macroscopic dielectric constant not correct ! ')
# Absorption spectrum calculation
df = DF(calc='C.gpw', q=q, w=w, eta=0.25,
ecut=50, optical_limit=True, txt='C_df.out')
df1, df2 = df.get_dielectric_function()
df.get_absorption_spectrum(df1, df2)
df.check_sum_rule(df1, df2)
df.write('C_df.pckl')
| gpl-3.0 |
pyecs/servo | tests/wpt/update/github.py | 197 | 5152 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
from urlparse import urljoin
requests = None
class GitHubError(Exception):
def __init__(self, status, data):
self.status = status
self.data = data
class GitHub(object):
url_base = "https://api.github.com"
def __init__(self, token):
# Defer the import of requests since it isn't installed by default
global requests
if requests is None:
import requests
self.headers = {"Accept": "application/vnd.github.v3+json"}
self.auth = (token, "x-oauth-basic")
def get(self, path):
return self._request("GET", path)
def post(self, path, data):
return self._request("POST", path, data=data)
def put(self, path, data):
return self._request("PUT", path, data=data)
def _request(self, method, path, data=None):
url = urljoin(self.url_base, path)
kwargs = {"headers": self.headers,
"auth": self.auth}
if data is not None:
kwargs["data"] = json.dumps(data)
resp = requests.request(method, url, **kwargs)
if 200 <= resp.status_code < 300:
return resp.json()
else:
raise GitHubError(resp.status_code, resp.json())
def repo(self, owner, name):
"""GitHubRepo for a particular repository.
:param owner: String repository owner
:param name: String repository name
"""
return GitHubRepo.from_name(self, owner, name)
class GitHubRepo(object):
def __init__(self, github, data):
"""Object respresenting a GitHub respoitory"""
self.gh = github
self.owner = data["owner"]
self.name = data["name"]
self.url = data["ssh_url"]
self._data = data
@classmethod
def from_name(cls, github, owner, name):
data = github.get("/repos/%s/%s" % (owner, name))
return cls(github, data)
@property
def url_base(self):
return "/repos/%s/" % (self._data["full_name"])
def create_pr(self, title, head, base, body):
"""Create a Pull Request in the repository
:param title: Pull Request title
:param head: ref to the HEAD of the PR branch.
:param base: ref to the base branch for the Pull Request
:param body: Description of the PR
"""
return PullRequest.create(self, title, head, base, body)
def load_pr(self, number):
"""Load an existing Pull Request by number.
:param number: Pull Request number
"""
return PullRequest.from_number(self, number)
def path(self, suffix):
return urljoin(self.url_base, suffix)
class PullRequest(object):
def __init__(self, repo, data):
"""Object representing a Pull Request"""
self.repo = repo
self._data = data
self.number = data["number"]
self.title = data["title"]
self.base = data["base"]["ref"]
self.base = data["head"]["ref"]
self._issue = None
@classmethod
def from_number(cls, repo, number):
data = repo.gh.get(repo.path("pulls/%i" % number))
return cls(repo, data)
@classmethod
def create(cls, repo, title, head, base, body):
data = repo.gh.post(repo.path("pulls"),
{"title": title,
"head": head,
"base": base,
"body": body})
return cls(repo, data)
def path(self, suffix):
return urljoin(self.repo.path("pulls/%i/" % self.number), suffix)
@property
def issue(self):
"""Issue related to the Pull Request"""
if self._issue is None:
self._issue = Issue.from_number(self.repo, self.number)
return self._issue
def merge(self, commit_message=None):
"""Merge the Pull Request into its base branch.
:param commit_message: Message to use for the merge commit. If None a default
message is used instead
"""
if commit_message is None:
commit_message = "Merge pull request #%i from %s" % (self.number, self.base)
self.repo.gh.put(self.path("merge"),
{"commit_message": commit_message})
class Issue(object):
def __init__(self, repo, data):
"""Object representing a GitHub Issue"""
self.repo = repo
self._data = data
self.number = data["number"]
@classmethod
def from_number(cls, repo, number):
data = repo.gh.get(repo.path("issues/%i" % number))
return cls(repo, data)
def path(self, suffix):
return urljoin(self.repo.path("issues/%i/" % self.number), suffix)
def add_comment(self, message):
"""Add a comment to the issue
:param message: The text of the comment
"""
self.repo.gh.post(self.path("comments"),
{"body": message})
| mpl-2.0 |
kalikaneko/pyzqm-deb | docs/sphinxext/ipython_console_highlighting.py | 112 | 4183 | """reST directive for syntax-highlighting ipython interactive sessions.
XXX - See what improvements can be made based on the new (as of Sept 2009)
'pycon' lexer for the python console. At the very least it will give better
highlighted tracebacks.
"""
#-----------------------------------------------------------------------------
# Needed modules
# Standard library
import re
# Third party
from pygments.lexer import Lexer, do_insertions
from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer,
PythonTracebackLexer)
from pygments.token import Comment, Generic
from sphinx import highlighting
#-----------------------------------------------------------------------------
# Global constants
line_re = re.compile('.*?\n')
#-----------------------------------------------------------------------------
# Code begins - classes and functions
class IPythonConsoleLexer(Lexer):
"""
For IPython console output or doctests, such as:
.. sourcecode:: ipython
In [1]: a = 'foo'
In [2]: a
Out[2]: 'foo'
In [3]: print a
foo
In [4]: 1 / 0
Notes:
- Tracebacks are not currently supported.
- It assumes the default IPython prompts, not customized ones.
"""
name = 'IPython console session'
aliases = ['ipython']
mimetypes = ['text/x-ipython-console']
input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)")
output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)")
continue_prompt = re.compile(" \.\.\.+:")
tb_start = re.compile("\-+")
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
input_prompt = self.input_prompt.match(line)
continue_prompt = self.continue_prompt.match(line.rstrip())
output_prompt = self.output_prompt.match(line)
if line.startswith("#"):
insertions.append((len(curcode),
[(0, Comment, line)]))
elif input_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, input_prompt.group())]))
curcode += line[input_prompt.end():]
elif continue_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, continue_prompt.group())]))
curcode += line[continue_prompt.end():]
elif output_prompt is not None:
# Use the 'error' token for output. We should probably make
# our own token, but error is typicaly in a bright color like
# red, so it works fine for our output prompts.
insertions.append((len(curcode),
[(0, Generic.Error, output_prompt.group())]))
curcode += line[output_prompt.end():]
else:
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
def setup(app):
"""Setup as a sphinx extension."""
# This is only a lexer, so adding it below to pygments appears sufficient.
# But if somebody knows that the right API usage should be to do that via
# sphinx, by all means fix it here. At least having this setup.py
# suppresses the sphinx warning we'd get without it.
pass
#-----------------------------------------------------------------------------
# Register the extension as a valid pygments lexer
highlighting.lexers['ipython'] = IPythonConsoleLexer()
| lgpl-3.0 |
ME-ICA/me-ica | gooey/gui/widgets/components.py | 1 | 7202 | from functools import partial
import wx
from gooey.gui.util import wx_util
from gooey.gui.widgets import widget_pack
class BaseGuiComponent(object):
widget_class = None
def __init__(self, parent, title, msg, choices=None):
'''
:param data: field info (title, help, etc..)
:param widget_pack: internal wxWidgets to render
'''
# parent
self.parent = parent
# Widgets
self.title = None
self.help_msg = None
self.choices = choices
# Internal WidgetPack set in subclasses
self.do_layout(parent, title, msg)
def do_layout(self, parent, title, msg):
self.panel = wx.Panel(parent)
self.widget_pack = self.widget_class()
self.title = self.format_title(self.panel, title)
self.help_msg = self.format_help_msg(self.panel, msg)
self.help_msg.SetMinSize((0, -1))
core_widget_set = self.widget_pack.build(self.panel, {}, self.choices)
vertical_container = wx.BoxSizer(wx.VERTICAL)
vertical_container.Add(self.title)
vertical_container.AddSpacer(2)
if self.help_msg.GetLabelText():
vertical_container.Add(self.help_msg, 1, wx.EXPAND)
vertical_container.AddSpacer(2)
else:
vertical_container.AddStretchSpacer(1)
vertical_container.Add(core_widget_set, 0, wx.EXPAND)
self.panel.SetSizer(vertical_container)
return self.panel
def bind(self, *args, **kwargs):
print self.widget_pack.widget.Bind(*args, **kwargs)
def get_title(self):
return self.title.GetLabel()
def set_title(self, text):
self.title.SetLabel(text)
def get_help_msg(self):
return self.help_msg.GetLabelText()
def set_label_text(self, text):
self.help_msg.SetLabel(text)
def format_help_msg(self, parent, msg):
base_text = wx.StaticText(parent, label=msg or '')
wx_util.dark_grey(base_text)
return base_text
def format_title(self, parent, title):
text = wx.StaticText(parent, label=title)
wx_util.make_bold(text)
return text
def onResize(self, evt):
# handle internal widgets
# self.panel.Freeze()
self._onResize(evt)
# propagate event to child widgets
self.widget_pack.onResize(evt)
evt.Skip()
# self.panel.Thaw()
def _onResize(self, evt):
if not self.help_msg:
return
self.panel.Size = evt.GetSize()
container_width, _ = self.panel.Size
text_width, _ = self.help_msg.Size
if text_width != container_width:
self.help_msg.SetLabel(self.help_msg.GetLabelText().replace('\n', ' '))
self.help_msg.Wrap(container_width)
evt.Skip()
def get_value(self):
return self.widget_pack.get_value()
def set_value(self, val):
if val:
self.widget_pack.widget.SetValue(unicode(val))
def __repr__(self):
return self.__class__.__name__
class CheckBox(BaseGuiComponent):
def __init__(self, parent, title, msg, choices=None):
BaseGuiComponent.__init__(self, parent, title, msg)
def do_layout(self, parent, title, msg):
self.panel = wx.Panel(parent)
self.widget = wx.CheckBox(self.panel)
# self.widget.SetValue(self.default_value)
self.title = self.format_title(self.panel, title)
self.help_msg = self.format_help_msg(self.panel, msg)
self.help_msg.SetMinSize((0, -1))
# self.help_msg.Bind(wx.EVT_LEFT_UP, lambda event: self.widget.SetValue(not self.widget.GetValue()))
vertical_container = wx.BoxSizer(wx.VERTICAL)
vertical_container.Add(self.title)
horizontal_sizer = wx.BoxSizer(wx.HORIZONTAL)
horizontal_sizer.Add(self.widget, 0, wx.EXPAND | wx.RIGHT, 10)
horizontal_sizer.Add(self.help_msg, 1, wx.EXPAND)
vertical_container.Add(horizontal_sizer, 0, wx.EXPAND)
self.panel.SetSizer(vertical_container)
self.panel.Bind(wx.EVT_SIZE, self.onResize)
return self.panel
def onResize(self, evt):
msg = self.help_msg
container_width, _ = self.panel.Size
text_width, _ = msg.Size
if text_width != container_width:
msg.SetLabel(msg.GetLabelText().replace('\n', ' '))
msg.Wrap(container_width)
evt.Skip()
def get_value(self):
return self.widget.GetValue()
def set_value(self, val):
self.widget.SetValue(val)
class RadioGroup(object):
def __init__(self, parent, title, msg, choices=None):
self.panel = None
self.radio_buttons = []
self.option_strings = []
self.help_msgs = []
self.btn_names = []
self.do_layout(parent, title, msg)
def do_layout(self, parent, titles, msgs):
self.panel = wx.Panel(parent)
self.radio_buttons = [wx.RadioButton(self.panel, -1) for _ in titles]
self.btn_names = [wx.StaticText(self.panel, label=title.title()) for title in titles]
self.help_msgs = [wx.StaticText(self.panel, label=msg.title()) for msg in msgs]
# box = wx.StaticBox(self.panel, -1, label=self.data['group_name'])
box = wx.StaticBox(self.panel, -1, label='')
vertical_container = wx.StaticBoxSizer(box, wx.VERTICAL)
for button, name, help in zip(self.radio_buttons, self.btn_names, self.help_msgs):
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(button, 0, wx.ALIGN_TOP | wx.ALIGN_LEFT)
hbox.Add(name, 0, wx.LEFT, 10)
vertical_container.Add(hbox, 0, wx.EXPAND)
vertical_container.Add(help, 1, wx.EXPAND | wx.LEFT, 25)
vertical_container.AddSpacer(5)
self.panel.SetSizer(vertical_container)
self.panel.Bind(wx.EVT_SIZE, self.onResize)
self.panel.Bind(wx.EVT_RADIOBUTTON, self.showz)
return self.panel
def showz(self, evt):
print evt
for i in self.radio_buttons:
print i.GetValue()
def onResize(self, evt):
msg = self.help_msgs[0]
container_width, _ = self.panel.Size
text_width, _ = msg.Size
if text_width != container_width:
msg.SetLabel(msg.GetLabelText().replace('\n', ' '))
msg.Wrap(container_width)
evt.Skip()
def get_value(self):
return [button.GetValue() for button in self.radio_buttons]
def set_value(self, val):
pass
def build_subclass(name, widget_class):
# this seemed faster than typing class X a bunch
return type(name, (BaseGuiComponent,), {'widget_class': widget_class})
FileChooser = build_subclass('FileChooser', widget_pack.FileChooserPayload)
MultiFileChooser = build_subclass('MultiFileChooser', widget_pack.MultiFileSaverPayload)
DirChooser = build_subclass('DirChooser', widget_pack.DirChooserPayload)
FileSaver = build_subclass('FileSaver', widget_pack.FileSaverPayload)
DateChooser = build_subclass('DateChooser', widget_pack.DateChooserPayload)
TextField = build_subclass('TextField', widget_pack.TextInputPayload)
CommandField = build_subclass('CommandField', widget_pack.TextInputPayload(no_quoting=True))
Dropdown = build_subclass('Dropdown', widget_pack.DropdownPayload)
Counter = build_subclass('Counter', widget_pack.CounterPayload)
MultiDirChooser = build_subclass('MultiDirChooser', widget_pack.MultiDirChooserPayload)
| lgpl-2.1 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/django/contrib/contenttypes/generic.py | 86 | 20290 | """
Classes allowing "generic" relations through ContentType and object-id fields.
"""
from collections import defaultdict
from functools import partial
from operator import attrgetter
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import signals
from django.db import models, router, DEFAULT_DB_ALIAS
from django.db.models.fields.related import RelatedField, Field, ManyToManyRel
from django.db.models.loading import get_model
from django.forms import ModelForm
from django.forms.models import BaseModelFormSet, modelformset_factory, save_instance
from django.contrib.admin.options import InlineModelAdmin, flatten_fieldsets
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_unicode
class GenericForeignKey(object):
"""
Provides a generic relation to any object through content-type/object-id
fields.
"""
def __init__(self, ct_field="content_type", fk_field="object_id"):
self.ct_field = ct_field
self.fk_field = fk_field
def contribute_to_class(self, cls, name):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_virtual_field(self)
# For some reason I don't totally understand, using weakrefs here doesn't work.
signals.pre_init.connect(self.instance_pre_init, sender=cls, weak=False)
# Connect myself as the descriptor for this field
setattr(cls, name, self)
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handles initializing an object with the generic FK instaed of
content-type/object-id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
def get_content_type(self, obj=None, id=None, using=None):
# Convenience function using get_model avoids a circular import when
# using this model
ContentType = get_model("contenttypes", "contenttype")
if obj:
return ContentType.objects.db_manager(obj._state.db).get_for_model(obj)
elif id:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_query_set(self, instances):
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (ret_val,
lambda obj: (obj._get_pk_val(), obj.__class__),
gfk_key,
True,
self.cache_attr)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError(u"%s must be accessed via instance" % self.related.opts.object_name)
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRelation(RelatedField, Field):
"""Provides an accessor to generic related objects (e.g. comments)"""
def __init__(self, to, **kwargs):
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = GenericRel(to,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', True))
# Override content-type/object-id field names on the related class
self.object_id_field_name = kwargs.pop("object_id_field", "object_id")
self.content_type_field_name = kwargs.pop("content_type_field", "content_type")
kwargs['blank'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
Field.__init__(self, **kwargs)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_unicode([instance._get_pk_val() for instance in qs])
def m2m_db_table(self):
return self.rel.to._meta.db_table
def m2m_column_name(self):
return self.object_id_field_name
def m2m_reverse_name(self):
return self.rel.to._meta.pk.column
def m2m_target_field_name(self):
return self.model._meta.pk.name
def m2m_reverse_target_field_name(self):
return self.rel.to._meta.pk.name
def contribute_to_class(self, cls, name):
super(GenericRelation, self).contribute_to_class(cls, name)
# Save a reference to which model this class is on for future use
self.model = cls
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self))
def contribute_to_related_class(self, cls, related):
pass
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def db_type(self, connection):
# Since we're simulating a ManyToManyField, in effect, best return the
# same db_type as well.
return None
def extra_filters(self, pieces, pos, negate):
"""
Return an extra filter to the queryset so that the results are filtered
on the appropriate content type.
"""
if negate:
return []
ContentType = get_model("contenttypes", "contenttype")
content_type = ContentType.objects.get_for_model(self.model)
prefix = "__".join(pieces[:pos + 1])
return [("%s__%s" % (prefix, self.content_type_field_name),
content_type)]
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.rel.to._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name:
ContentType.objects.db_manager(using).get_for_model(self.model).pk,
"%s__in" % self.object_id_field_name:
[obj.pk for obj in objs]
})
class ReverseGenericRelatedObjectsDescriptor(object):
"""
This class provides the functionality that makes the related-object
managers available as attributes on a model class, for fields that have
multiple "remote" values and have a GenericRelation defined in their model
(rather than having another model pointed *at* them). In the example
"article.publications", the publications attribute is a
ReverseGenericRelatedObjectsDescriptor instance.
"""
def __init__(self, field):
self.field = field
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# This import is done here to avoid circular import importing this module
from django.contrib.contenttypes.models import ContentType
# Dynamically create a class that subclasses the related model's
# default manager.
rel_model = self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_generic_related_manager(superclass)
qn = connection.ops.quote_name
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(instance)
manager = RelatedManager(
model = rel_model,
instance = instance,
symmetrical = (self.field.rel.symmetrical and instance.__class__ == rel_model),
source_col_name = qn(self.field.m2m_column_name()),
target_col_name = qn(self.field.m2m_reverse_name()),
content_type = content_type,
content_type_field_name = self.field.content_type_field_name,
object_id_field_name = self.field.object_id_field_name,
prefetch_cache_name = self.field.attname,
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
manager = self.__get__(instance)
manager.clear()
for obj in value:
manager.add(obj)
def create_generic_related_manager(superclass):
"""
Factory function for a manager that subclasses 'superclass' (which is a
Manager) and adds behavior for generic related objects.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, model=None, instance=None, symmetrical=None,
source_col_name=None, target_col_name=None, content_type=None,
content_type_field_name=None, object_id_field_name=None,
prefetch_cache_name=None):
super(GenericRelatedObjectManager, self).__init__()
self.model = model
self.content_type = content_type
self.symmetrical = symmetrical
self.instance = instance
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self.content_type_field_name = content_type_field_name
self.object_id_field_name = object_id_field_name
self.prefetch_cache_name = prefetch_cache_name
self.pk_val = self.instance._get_pk_val()
self.core_filters = {
'%s__pk' % content_type_field_name: content_type.id,
'%s__exact' % object_id_field_name: instance._get_pk_val(),
}
def get_query_set(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).get_query_set().using(db).filter(**self.core_filters)
def get_prefetch_query_set(self, instances):
db = self._db or router.db_for_read(self.model, instance=instances[0])
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name:
set(obj._get_pk_val() for obj in instances)
}
qs = super(GenericRelatedObjectManager, self).get_query_set().using(db).filter(**query)
return (qs,
attrgetter(self.object_id_field_name),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
obj.save()
add.alters_data = True
def remove(self, *objs):
db = router.db_for_write(self.model, instance=self.instance)
for obj in objs:
obj.delete(using=db)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.model, instance=self.instance)
for obj in self.all():
obj.delete(using=db)
clear.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
return GenericRelatedObjectManager
class GenericRel(ManyToManyRel):
def __init__(self, to, related_name=None, limit_choices_to=None, symmetrical=True):
self.to = to
self.related_name = related_name
self.limit_choices_to = limit_choices_to or {}
self.symmetrical = symmetrical
self.multiple = True
self.through = None
class BaseGenericInlineFormSet(BaseModelFormSet):
"""
A formset for generic inline objects to a parent.
"""
def __init__(self, data=None, files=None, instance=None, save_as_new=None,
prefix=None, queryset=None):
# Avoid a circular import.
from django.contrib.contenttypes.models import ContentType
opts = self.model._meta
self.instance = instance
self.rel_name = '-'.join((
opts.app_label, opts.object_name.lower(),
self.ct_field.name, self.ct_fk_field.name,
))
if self.instance is None or self.instance.pk is None:
qs = self.model._default_manager.none()
else:
if queryset is None:
queryset = self.model._default_manager
qs = queryset.filter(**{
self.ct_field.name: ContentType.objects.get_for_model(self.instance),
self.ct_fk_field.name: self.instance.pk,
})
super(BaseGenericInlineFormSet, self).__init__(
queryset=qs, data=data, files=files,
prefix=prefix
)
@classmethod
def get_default_prefix(cls):
opts = cls.model._meta
return '-'.join((opts.app_label, opts.object_name.lower(),
cls.ct_field.name, cls.ct_fk_field.name,
))
def save_new(self, form, commit=True):
# Avoid a circular import.
from django.contrib.contenttypes.models import ContentType
kwargs = {
self.ct_field.get_attname(): ContentType.objects.get_for_model(self.instance).pk,
self.ct_fk_field.get_attname(): self.instance.pk,
}
new_obj = self.model(**kwargs)
return save_instance(form, new_obj, commit=commit)
def generic_inlineformset_factory(model, form=ModelForm,
formset=BaseGenericInlineFormSet,
ct_field="content_type", fk_field="object_id",
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True,
max_num=None,
formfield_callback=None):
"""
Returns an ``GenericInlineFormSet`` for the given kwargs.
You must provide ``ct_field`` and ``object_id`` if they different from the
defaults ``content_type`` and ``object_id`` respectively.
"""
opts = model._meta
# Avoid a circular import.
from django.contrib.contenttypes.models import ContentType
# if there is no field called `ct_field` let the exception propagate
ct_field = opts.get_field(ct_field)
if not isinstance(ct_field, models.ForeignKey) or ct_field.rel.to != ContentType:
raise Exception("fk_name '%s' is not a ForeignKey to ContentType" % ct_field)
fk_field = opts.get_field(fk_field) # let the exception propagate
if exclude is not None:
exclude = list(exclude)
exclude.extend([ct_field.name, fk_field.name])
else:
exclude = [ct_field.name, fk_field.name]
FormSet = modelformset_factory(model, form=form,
formfield_callback=formfield_callback,
formset=formset,
extra=extra, can_delete=can_delete, can_order=can_order,
fields=fields, exclude=exclude, max_num=max_num)
FormSet.ct_field = ct_field
FormSet.ct_fk_field = fk_field
return FormSet
class GenericInlineModelAdmin(InlineModelAdmin):
ct_field = "content_type"
ct_fk_field = "object_id"
formset = BaseGenericInlineFormSet
def get_formset(self, request, obj=None, **kwargs):
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# GenericInlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"ct_field": self.ct_field,
"fk_field": self.ct_fk_field,
"form": self.form,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"formset": self.formset,
"extra": self.extra,
"can_delete": can_delete,
"can_order": False,
"fields": fields,
"max_num": self.max_num,
"exclude": exclude
}
defaults.update(kwargs)
return generic_inlineformset_factory(self.model, **defaults)
class GenericStackedInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class GenericTabularInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| agpl-3.0 |
cabincode/django-djembe | djembe/tests/data.py | 2 | 6487 | RECIPIENT1_KEY = """-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEA0JLAp80Qa/AhjwK17UaM37J5ddQg87B163ImY8mMPSruNk0u
dUc7WFoTtQ90uGhCm8C6VZx7RDji5tzPeVJTAPOQO1IZaGvgJI1e1fayzyvX1Mbj
MSOnEBoZRWLSJIndL4jrkfDGpUL0ifJlVg+KzAK42b5Jvdd0fdPeFJuqtaP/Pknh
G2jB9cNOuQTU4fU0hguOJViyTov4Nht9zSEd8/W3jnT/tWSaltfHG9a8QoN89Tnq
e4tppia1ngJPaFbDrVIhKmtXUiMLPpywDbieX6Kc/gWBANcaVx+GD3lFzM1hXpDW
ikgLkFEq4rYVIHGbJ+WFUwEL+kIf1ec5YcdLGQIDAQABAoIBAQCc3rNNwZrnQ81J
rWU9V4GewbI+n9RE1GRsOAeNp8n1O3O8naIijPeihOUrmbtU8v2d/X9n4nCHqC1o
K41UF0ncflPtedsoQXJsq3M3g/uKe9Qx8YBwtfrxzl9RY3jgCFy2YsmikzAknc4k
bo4vAzOGK0nyYv89bG3uMUpnv4jOYXWA+RJMa8E9b4KQGMijH6nKhtrQ4QPxTYS3
93T4ax7WE7VVrhokNDfiTZeAMFBOEamfQqNRzSDa52qkU/UKTrfD+nMv/gtVgP1Y
if1wSUZCDeN8cNaAugsaaZZIoCczzo6fwxJVetrbOOy50bdfXkTdhH+FCx6nkTx6
j58vlW69AoGBAOgcVNehkd1qq6U0DdaJhs5LLBPdeK+CNOU5TShniFDoxq57SFyo
OjPaxwrOiIASxHVaV/j9YzyAeMXq6P6KMGkjWkDwp9M+pWCcNAgZG6KITN6O8X3Y
889F5YbdF3JbMGW0TyEUkZO64NIuDUOeEHdJXgBVbXShlpQcGNYPF6S3AoGBAOYK
QW3PNlk5CLxT32fG5LIJEqtt+1CaQWKpJVUp7oqhqrL/o4fhwq21UlEV4prIFh5N
OHy7bzeCIqPRCtbbdfTQBN/tY9Tpkj/u83YpoFIIf0pNYU9FXTmv3joKKlvUQzgT
bflJNRpfqecbPfacvTOTcWT1YKycXAU8xhr9cd6vAoGAZxHdovs+OKJbojqhQnlD
gWvkINYh913Me1Wcq6p939/eCjD4ixyw68HkzSm1d5nd33bExe5PGNi7yLdty0o2
ARoPw/KZs7aH3YS/njD4Eo6QA+xZ1FZ4vbv+FosrFD7mGOOel8P4eS9tEigc/7+6
9B2CPoew2S/AG/j2FmgRSbUCgYEAs8zgRwvm1uYVRd3oiByVM/KTfRqBjAaFhK/g
VihoIkMqQK4/Cz4SA+PtayNH8uLhJt4EgLmfvCsjGBeT+UufbrIInV3W4duOTBpg
gMMYZAXwOAszO9pje2tSrtqzMQM/Zt0I4AewUeTjz5cjX1YzK9TalKaV4VR1OELA
NOhT0FECgYEA0Z09S56hasmL8aXXjHtH+x2ODV+QNcRoCYmXkWPjoDmUApHJvYjS
VsYlfvixiEKU2ceWL9UD5CBSAZgQFSd4F4ywX/GMVRqMYsZ8g+LxZtsQWOtaATAV
dHK84qTzfkneGuONpIFn2wGdwPXxox5wCy1MaCzP6hVW6lMx+YsLHmU=
-----END RSA PRIVATE KEY-----"""
RECIPIENT1_CERTIFICATE = """-----BEGIN CERTIFICATE-----
MIIELTCCAxWgAwIBAgIJAKlnkc+pDoJLMA0GCSqGSIb3DQEBBQUAMGwxCzAJBgNV
BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX
aWRnaXRzIFB0eSBMdGQxJTAjBgkqhkiG9w0BCQEWFnJlY2lwaWVudDFAZXhhbXBs
ZS5jb20wHhcNMTMwMzEzMDIzMDIyWhcNMjMwMzExMDIzMDIyWjBsMQswCQYDVQQG
EwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lk
Z2l0cyBQdHkgTHRkMSUwIwYJKoZIhvcNAQkBFhZyZWNpcGllbnQxQGV4YW1wbGUu
Y29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0JLAp80Qa/AhjwK1
7UaM37J5ddQg87B163ImY8mMPSruNk0udUc7WFoTtQ90uGhCm8C6VZx7RDji5tzP
eVJTAPOQO1IZaGvgJI1e1fayzyvX1MbjMSOnEBoZRWLSJIndL4jrkfDGpUL0ifJl
Vg+KzAK42b5Jvdd0fdPeFJuqtaP/PknhG2jB9cNOuQTU4fU0hguOJViyTov4Nht9
zSEd8/W3jnT/tWSaltfHG9a8QoN89Tnqe4tppia1ngJPaFbDrVIhKmtXUiMLPpyw
DbieX6Kc/gWBANcaVx+GD3lFzM1hXpDWikgLkFEq4rYVIHGbJ+WFUwEL+kIf1ec5
YcdLGQIDAQABo4HRMIHOMB0GA1UdDgQWBBRkgypC8Ht9aKNrYPFdSxRMNjYbtTCB
ngYDVR0jBIGWMIGTgBRkgypC8Ht9aKNrYPFdSxRMNjYbtaFwpG4wbDELMAkGA1UE
BhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0IFdp
ZGdpdHMgUHR5IEx0ZDElMCMGCSqGSIb3DQEJARYWcmVjaXBpZW50MUBleGFtcGxl
LmNvbYIJAKlnkc+pDoJLMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
AFQ4ESnW10Cd5ON5KuBK9V+UNXyEGUxxQbo2Py+LrZsuA70DAfREzyIG4YS5/Mq3
s/ExBV1vUHx+J6a+99R1ZLYXV0Uw30orOTfkuKQsL7MvpH/cYI+cdN/C+xiFkdQ4
/TPLKHwSDjkrFrWQUUa/96J6UjPIX3Wp3I19yb/mItqSpxdzCBn1WBihH0tJ3ufk
6u2l0lrmWzURJXuDVv4+hRCESCoGdBZ1og6vmCNhROo0yPt+Av3Q/5fwRDXT6+yj
vcOE8ot5kRB4yA8YSw5mVt99sf/0yRlZNuYJpyQjMIeFDzcfkD7BdvkfI+wUz0Qx
/ocfLq2+QYlkR0j7paGrsyk=
-----END CERTIFICATE-----"""
RECIPIENT2_CERTIFICATE = """-----BEGIN CERTIFICATE-----
MIIELTCCAxWgAwIBAgIJAMfN9NnWc93xMA0GCSqGSIb3DQEBBQUAMGwxCzAJBgNV
BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX
aWRnaXRzIFB0eSBMdGQxJTAjBgkqhkiG9w0BCQEWFnJlY2lwaWVudDJAZXhhbXBs
ZS5jb20wHhcNMTMwMzEzMDIzMjA4WhcNMjMwMzExMDIzMjA4WjBsMQswCQYDVQQG
EwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lk
Z2l0cyBQdHkgTHRkMSUwIwYJKoZIhvcNAQkBFhZyZWNpcGllbnQyQGV4YW1wbGUu
Y29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAte2l7nj4eaK8SmnG
SWDHFawcU3kUHpQa4KInAet6SM9xzkrXT4sT0ht6Rrs7oQuvTmqiJ3PZJTmOa6xr
qLabobL7aZ7QM5VjI/6U0pchL4y+/8/5IeR3H9bbAPuZd6ERKHLBLWIlkWORcEHX
Vy/st7AjevYYU7Up5AqHc1uhw6xgLj4iYG2tSE+rUjeF91yqaBDAr4454R+P6RG8
T/5VkeT8tUoW6REvUAQtgPg3mkrU1gnrnSQcu+1NICpkjQor3grPGHQMTZjZcUzi
XZ/sxTC9R9fcWf6k7Lvb2C2YOTEqWvX6fH418PPe/tn1yIdiaYqNY4cemH0Pbxtv
D1bYqwIDAQABo4HRMIHOMB0GA1UdDgQWBBSTPcT25ZY3lF32oQkr19R5X0DVJDCB
ngYDVR0jBIGWMIGTgBSTPcT25ZY3lF32oQkr19R5X0DVJKFwpG4wbDELMAkGA1UE
BhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0IFdp
ZGdpdHMgUHR5IEx0ZDElMCMGCSqGSIb3DQEJARYWcmVjaXBpZW50MkBleGFtcGxl
LmNvbYIJAMfN9NnWc93xMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
AEhhEQKHNy5SZLDybJYh5NPzSBvf8DyTX79BHUQkVYFxC7jWOGhNDnkZGkVd0f7e
Qj88+WJf6eb7SROtE3ysXU0pi427T8IpEJK/5MCNgupaZ79ngAvtS+PFkL16LIHs
q1t+HuyJfgeaPL7g4LI7BZ8h6MKmxbWvjHzNSCipn6Mo1tEkyj6c60WgmLUBSWfq
mdZ0tMZlUpJXWWul9Cfz3bjcyImkonp92sojiMIxblWO+AHpRA585vCixXJakApe
cohK2wj5rBT2S1L81b+4hMvhYI4ggFKXEdx7QVC2K+doCAvOZ8BsE/1PCexJl+T6
dIqXZbPRmZ3dCJc3db6Rd1I=
-----END CERTIFICATE-----"""
RECIPIENT2_KEY = """-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAte2l7nj4eaK8SmnGSWDHFawcU3kUHpQa4KInAet6SM9xzkrX
T4sT0ht6Rrs7oQuvTmqiJ3PZJTmOa6xrqLabobL7aZ7QM5VjI/6U0pchL4y+/8/5
IeR3H9bbAPuZd6ERKHLBLWIlkWORcEHXVy/st7AjevYYU7Up5AqHc1uhw6xgLj4i
YG2tSE+rUjeF91yqaBDAr4454R+P6RG8T/5VkeT8tUoW6REvUAQtgPg3mkrU1gnr
nSQcu+1NICpkjQor3grPGHQMTZjZcUziXZ/sxTC9R9fcWf6k7Lvb2C2YOTEqWvX6
fH418PPe/tn1yIdiaYqNY4cemH0PbxtvD1bYqwIDAQABAoIBAQCgUfmMykvlHNSj
Gs1jMoaegZpAsQ1a2RdKZ3rKhVGwwiY/LdLJxPzFhfNeS3cDt4FYMD4PWFa+KBdq
qXoCJlHF0C0f6ZT+MCI6wQky/31+IqDaW9Lm3CdiBnveRNn8AxNUurOjo78JfV3Y
OB1lNtV7FhxeKmpLsfVlgSXWMaS/AjZhJVm2NXsLGzOn9teqc0r5AUkMUhtMa+BF
4MOvTBMN6Y6N1pb8tEfYYppqdel0z+yDiHBGLiK0ohQXLNt9jNsje7YDWNxaTpi+
J8yseBPAoj8xO2uZlfs0pp1R0RaDArNaLhaJCzoxxM8hjmABavXADefCkg8Njny6
V4f5NJIpAoGBAOZVgO1qdV1lPShbbjZ7iXLt4Y3OBUodpHbwvG6nzNKlYwAq7gJg
dVMz+4eyxvnMl+WotQTspx9FOBAPUJmYEEyMd20J2mB3VgJzbBU8q0RyI1rSp1+1
rdrZ+9820YKWapQsppI6dSG1YdMnefi37QW6TRCQb7UGHkCI3uqb6s+9AoGBAMoz
U4qg6R2hl4GLeUDwVWOZSGg4l4Ru2f5V0Wyq7e8N8fMBCPEbPqtaA4mmGhFaml3y
jf2HOQLb+kq553byeXj5xQW2AUuLmhMVA6bVbbQyRvGPWk0OnyoQeOwiakNfJ4cM
XONlnhhxmaziDjHFLUOxbVEiUmHCN/fHOEhkzzyHAoGAXOsFVcFqk0CyCUPJFEWF
z01TfmcsDxgiLrEy8tjSce+yHnEzDtZmvifsQnPGfSMh683BKLlnOeAsm53x/+Qu
PxMzYlNlOyEpG1gAtBiDZts8k/kC+ENMXUsupn9wyWNwn1Hb6IWYlv6a9hmoxMM9
5N+hExuRVM+hCJuugapgcWkCgYEAltogE6blBGRQYieqONaU/g+Xw5mlAPqqzGM+
MTJ8/QKMZd15zFelM7WeOg2gGR5dWIXjhShjUkzHuQRlOYrBMY42fYJCCRiHxIDD
Dq7gf7Wa1BE9L4wfFwTfElzCpqzREqIYktZ3Uok+O4sTR/JxSLWQdx2CFMkPdF4/
EVOUgPUCgYEAh+aavW92jkswqmJZvDtwiNq+vr6syURJcgODj4fz+Ji1PfpnGG1Q
VbD5vEy0oil4ErFV8tCJlSOIFn4CSL0sc9DGIqN9Rt2YIDRvFOtTPPMISNxuE6v7
vliD2IX8JBBQ1VKo+/hjjZYvwjGBTGJ4Tbx/npgvztg81LwPhX+e9zQ=
-----END RSA PRIVATE KEY-----"""
| cc0-1.0 |
schubergphilis/MCT-shared | ci/ci-setup-infra.py | 3 | 1306 | #!/usr/bin/env python
import sys
import click
from Cosmic.CI import CI
from Cosmic.NSX import NSX
@click.command()
@click.option('--marvin-config', '-m', help='Marvin config file', required=True)
@click.option('--cloudstack', '-c', help='Cloudstack deploy mode', default=False, is_flag=True, required=False)
@click.option('--debug', help="Turn on debugging output", is_flag=True)
@click.option('--template', help='Use template', required=False)
def main(**kwargs):
isolation_mode = "vxlan"
cloudstack_deploy_mode = kwargs.get('cloudstack')
template = kwargs.get('template') or '/data/templates/cosmic-systemvm.qcow2'
ci = CI(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
ci.deploy_cosmic_db()
ci.install_systemvm_templates(template=template)
nsx = NSX(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
if nsx:
nsx.create_cluster()
ci.configure_tomcat_to_load_jacoco_agent()
ci.deploy_cosmic_war()
ci.install_kvm_packages()
ci.configure_agent_to_load_jacoco_agent()
if nsx:
nsx.configure_kvm_host()
if nsx:
if cloudstack_deploy_mode:
isolation_mode = "stt"
nsx.setup_cosmic(isolation_mode=isolation_mode)
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
bjorand/django-allauth | allauth/socialaccount/providers/vk/tests.py | 3 | 1215 | from __future__ import absolute_import
from allauth.socialaccount.tests import OAuth2TestsMixin
from allauth.tests import MockedResponse, TestCase
from .provider import VKProvider
class VKTests(OAuth2TestsMixin, TestCase):
provider_id = VKProvider.id
def get_mocked_response(self, verified_email=True):
return MockedResponse(200, """
{"response": [{"last_name": "Penners", "university_name": "", "photo": "http://vk.com/images/camera_c.gif", "sex": 2, "photo_medium": "http://vk.com/images/camera_b.gif", "relation": "0", "timezone": 1, "photo_big": "http://vk.com/images/camera_a.gif", "uid": 219004864, "universities": [], "city": "1430", "first_name": "Raymond", "faculty_name": "", "online": 1, "counters": {"videos": 0, "online_friends": 0, "notes": 0, "audios": 0, "photos": 0, "followers": 0, "groups": 0, "user_videos": 0, "albums": 0, "friends": 0}, "home_phone": "", "faculty": 0, "nickname": "", "screen_name": "id219004864", "has_mobile": 1, "country": "139", "university": 0, "graduation": 0, "activity": "", "last_seen": {"time": 1377805189}}]}
""")
def get_login_response_json(self, with_refresh_token=True):
return '{"user_id": 219004864, "access_token":"testac"}'
| mit |
AutorestCI/azure-sdk-for-python | azure-cognitiveservices-vision-customvision/azure/cognitiveservices/vision/customvision/training/models/__init__.py | 2 | 2821 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .key_pair import KeyPair
from .api_keys import ApiKeys
from .quota import Quota
from .per_project_quota import PerProjectQuota
from .account_quota import AccountQuota
from .account import Account
from .domain import Domain
from .image_tag import ImageTag
from .prediction_tag import PredictionTag
from .image import Image
from .image_create_result import ImageCreateResult
from .image_create_summary import ImageCreateSummary
from .image_file_create_entry import ImageFileCreateEntry
from .image_file_create_batch import ImageFileCreateBatch
from .image_url_create_entry import ImageUrlCreateEntry
from .image_url_create_batch import ImageUrlCreateBatch
from .image_id_create_entry import ImageIdCreateEntry
from .image_id_create_batch import ImageIdCreateBatch
from .image_tag_create_entry import ImageTagCreateEntry
from .image_tag_create_batch import ImageTagCreateBatch
from .image_tag_create_summary import ImageTagCreateSummary
from .prediction_query_tag import PredictionQueryTag
from .prediction_query_token import PredictionQueryToken
from .prediction import Prediction
from .prediction_query import PredictionQuery
from .image_url import ImageUrl
from .image_tag_prediction import ImageTagPrediction
from .image_prediction_result import ImagePredictionResult
from .project_settings import ProjectSettings
from .project import Project
from .iteration import Iteration
from .tag_performance import TagPerformance
from .iteration_performance import IterationPerformance
from .export import Export
from .tag import Tag
from .tag_list import TagList
__all__ = [
'KeyPair',
'ApiKeys',
'Quota',
'PerProjectQuota',
'AccountQuota',
'Account',
'Domain',
'ImageTag',
'PredictionTag',
'Image',
'ImageCreateResult',
'ImageCreateSummary',
'ImageFileCreateEntry',
'ImageFileCreateBatch',
'ImageUrlCreateEntry',
'ImageUrlCreateBatch',
'ImageIdCreateEntry',
'ImageIdCreateBatch',
'ImageTagCreateEntry',
'ImageTagCreateBatch',
'ImageTagCreateSummary',
'PredictionQueryTag',
'PredictionQueryToken',
'Prediction',
'PredictionQuery',
'ImageUrl',
'ImageTagPrediction',
'ImagePredictionResult',
'ProjectSettings',
'Project',
'Iteration',
'TagPerformance',
'IterationPerformance',
'Export',
'Tag',
'TagList',
]
| mit |
betoesquivel/PLYpractice | env/lib/python2.7/site-packages/pip/operations/freeze.py | 84 | 3860 | from __future__ import absolute_import
import logging
import re
import pip
from pip.compat import stdlib_pkgs
from pip.req import InstallRequirement
from pip.utils import get_installed_distributions
from pip._vendor import pkg_resources
logger = logging.getLogger(__name__)
# packages to exclude from freeze output
freeze_excludes = stdlib_pkgs + ['setuptools', 'pip', 'distribute']
def freeze(
requirement=None,
find_links=None, local_only=None, user_only=None, skip_regex=None,
find_tags=False,
default_vcs=None,
isolated=False):
find_links = find_links or []
skip_match = None
if skip_regex:
skip_match = re.compile(skip_regex)
dependency_links = []
for dist in pkg_resources.working_set:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt')
)
for link in find_links:
if '#egg=' in link:
dependency_links.append(link)
for link in find_links:
yield '-f %s' % link
installations = {}
for dist in get_installed_distributions(local_only=local_only,
skip=freeze_excludes,
user_only=user_only):
req = pip.FrozenRequirement.from_dist(
dist,
dependency_links,
find_tags=find_tags,
)
installations[req.name] = req
if requirement:
with open(requirement) as req_file:
for line in req_file:
if (not line.strip()
or line.strip().startswith('#')
or (skip_match and skip_match.search(line))
or line.startswith((
'-r', '--requirement',
'-Z', '--always-unzip',
'-f', '--find-links',
'-i', '--index-url',
'--extra-index-url'))):
yield line.rstrip()
continue
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
line_req = InstallRequirement.from_editable(
line,
default_vcs=default_vcs,
isolated=isolated,
)
else:
line_req = InstallRequirement.from_line(
line,
isolated=isolated,
)
if not line_req.name:
logger.info(
"Skipping line because it's not clear what it "
"would install: %s",
line.strip(),
)
logger.info(
" (add #egg=PackageName to the URL to avoid"
" this warning)"
)
elif line_req.name not in installations:
logger.warning(
"Requirement file contains %s, but that package is"
" not installed",
line.strip(),
)
else:
yield str(installations[line_req.name]).rstrip()
del installations[line_req.name]
yield(
'## The following requirements were added by '
'pip freeze:'
)
for installation in sorted(
installations.values(), key=lambda x: x.name.lower()):
yield str(installation).rstrip()
| mit |
eaas-framework/virtualbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/GenFds/Vtf.py | 11 | 7215 | ## @file
# process VTF generation
#
# Copyright (c) 2007, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from GenFdsGlobalVariable import GenFdsGlobalVariable
import os
from CommonDataClass.FdfClass import VtfClassObject
T_CHAR_LF = '\n'
## generate VTF
#
#
class Vtf (VtfClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
VtfClassObject.__init__(self)
## GenVtf() method
#
# Generate VTF
#
# @param self The object pointer
# @param FdAddressDict dictionary contains FV name and its base address
# @retval Dict FV and corresponding VTF file name
#
def GenVtf(self, FdAddressDict) :
self.GenBsfInf()
OutputFile = os.path.join(GenFdsGlobalVariable.FvDir, self.UiName + '.Vtf')
BaseAddArg = self.GetBaseAddressArg(FdAddressDict)
OutputArg, VtfRawDict = self.GenOutputArg()
Cmd = (
'GenVtf',
) + OutputArg + (
'-f', self.BsfInfName,
) + BaseAddArg
GenFdsGlobalVariable.CallExternalTool(Cmd, "GenFv -Vtf Failed!")
GenFdsGlobalVariable.SharpCounter = 0
return VtfRawDict
## GenBsfInf() method
#
# Generate inf used to generate VTF
#
# @param self The object pointer
#
def GenBsfInf (self):
FvList = self.GetFvList()
self.BsfInfName = os.path.join(GenFdsGlobalVariable.FvDir, self.UiName + '.inf')
BsfInf = open (self.BsfInfName, 'w+')
if self.ResetBin != None:
BsfInf.writelines ("[OPTIONS]" + T_CHAR_LF)
BsfInf.writelines ("IA32_RST_BIN" + \
" = " + \
GenFdsGlobalVariable.MacroExtend(GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.ResetBin)) + \
T_CHAR_LF )
BsfInf.writelines (T_CHAR_LF )
BsfInf.writelines ("[COMPONENTS]" + T_CHAR_LF)
for ComponentObj in self.ComponentStatementList :
BsfInf.writelines ("COMP_NAME" + \
" = " + \
ComponentObj.CompName + \
T_CHAR_LF )
if ComponentObj.CompLoc.upper() == 'NONE':
BsfInf.writelines ("COMP_LOC" + \
" = " + \
'N' + \
T_CHAR_LF )
elif ComponentObj.FilePos != None:
BsfInf.writelines ("COMP_LOC" + \
" = " + \
ComponentObj.FilePos + \
T_CHAR_LF )
else:
Index = FvList.index(ComponentObj.CompLoc.upper())
if Index == 0:
BsfInf.writelines ("COMP_LOC" + \
" = " + \
'F' + \
T_CHAR_LF )
elif Index == 1:
BsfInf.writelines ("COMP_LOC" + \
" = " + \
'S' + \
T_CHAR_LF )
BsfInf.writelines ("COMP_TYPE" + \
" = " + \
ComponentObj.CompType + \
T_CHAR_LF )
BsfInf.writelines ("COMP_VER" + \
" = " + \
ComponentObj.CompVer + \
T_CHAR_LF )
BsfInf.writelines ("COMP_CS" + \
" = " + \
ComponentObj.CompCs + \
T_CHAR_LF )
BinPath = ComponentObj.CompBin
if BinPath != '-':
BinPath = GenFdsGlobalVariable.MacroExtend(GenFdsGlobalVariable.ReplaceWorkspaceMacro(BinPath))
BsfInf.writelines ("COMP_BIN" + \
" = " + \
BinPath + \
T_CHAR_LF )
SymPath = ComponentObj.CompSym
if SymPath != '-':
SymPath = GenFdsGlobalVariable.MacroExtend(GenFdsGlobalVariable.ReplaceWorkspaceMacro(SymPath))
BsfInf.writelines ("COMP_SYM" + \
" = " + \
SymPath + \
T_CHAR_LF )
BsfInf.writelines ("COMP_SIZE" + \
" = " + \
ComponentObj.CompSize + \
T_CHAR_LF )
BsfInf.writelines (T_CHAR_LF )
BsfInf.close()
## GenFvList() method
#
# Get FV list referenced by VTF components
#
# @param self The object pointer
#
def GetFvList(self):
FvList = []
for component in self.ComponentStatementList :
if component.CompLoc.upper() != 'NONE' and not (component.CompLoc.upper() in FvList):
FvList.append(component.CompLoc.upper())
return FvList
## GetBaseAddressArg() method
#
# Get base address arguments for GenVtf
#
# @param self The object pointer
#
def GetBaseAddressArg(self, FdAddressDict):
FvList = self.GetFvList()
CmdStr = tuple()
for i in FvList:
(BaseAddress, Size) = FdAddressDict.get(i)
CmdStr += (
'-r', '0x%x' % BaseAddress,
'-s', '0x%x' %Size,
)
return CmdStr
## GenOutputArg() method
#
# Get output arguments for GenVtf
#
# @param self The object pointer
#
def GenOutputArg(self):
FvVtfDict = {}
OutputFileName = ''
FvList = self.GetFvList()
Index = 0
Arg = tuple()
for FvObj in FvList:
Index = Index + 1
OutputFileName = 'Vtf%d.raw' % Index
OutputFileName = os.path.join(GenFdsGlobalVariable.FvDir, OutputFileName)
Arg += ('-o', OutputFileName)
FvVtfDict[FvObj.upper()] = OutputFileName
return Arg, FvVtfDict
| gpl-2.0 |
wangjun/pyload | module/plugins/hoster/PornhubCom.py | 3 | 2495 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from module.plugins.Hoster import Hoster
class PornhubCom(Hoster):
__name__ = "PornhubCom"
__type__ = "hoster"
__pattern__ = r'http://[\w\.]*?pornhub\.com/view_video\.php\?viewkey=[\w\d]+'
__version__ = "0.5"
__description__ = """Pornhub.com Download Hoster"""
__author_name__ = ("jeix")
__author_mail__ = ("jeix@hasnomail.de")
def process(self, pyfile):
self.download_html()
if not self.file_exists():
self.offline()
pyfile.name = self.get_file_name()
self.download(self.get_file_url())
def download_html(self):
url = self.pyfile.url
self.html = self.load(url)
def get_file_url(self):
""" returns the absolute downloadable filepath
"""
if self.html is None:
self.download_html()
url = "http://www.pornhub.com//gateway.php"
video_id = self.pyfile.url.split('=')[-1]
# thanks to jD team for this one v
post_data = "\x00\x03\x00\x00\x00\x01\x00\x0c\x70\x6c\x61\x79\x65\x72\x43\x6f\x6e\x66\x69\x67\x00\x02\x2f\x31\x00\x00\x00\x44\x0a\x00\x00\x00\x03\x02\x00"
post_data += chr(len(video_id))
post_data += video_id
post_data += "\x02\x00\x02\x2d\x31\x02\x00\x20"
post_data += "add299463d4410c6d1b1c418868225f7"
content = self.req.load(url, post=str(post_data))
new_content = ""
for x in content:
if ord(x) < 32 or ord(x) > 176:
new_content += '#'
else:
new_content += x
content = new_content
file_url = re.search(r'flv_url.*(http.*?)##post_roll', content).group(1)
return file_url
def get_file_name(self):
if self.html is None:
self.download_html()
match = re.search(r'<title[^>]+>([^<]+) - ', self.html)
if match:
name = match.group(1)
else:
matches = re.findall('<h1>(.*?)</h1>', self.html)
if len(matches) > 1:
name = matches[1]
else:
name = matches[0]
return name + '.flv'
def file_exists(self):
""" returns True or False
"""
if self.html is None:
self.download_html()
if re.search(r'This video is no longer in our database or is in conversion', self.html) is not None:
return False
else:
return True
| gpl-3.0 |
iulian787/spack | var/spack/repos/builtin/packages/pnmpi/package.py | 5 | 1762 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Pnmpi(CMakePackage):
"""PnMPI is a dynamic MPI tool infrastructure that builds on top of
the standardized PMPI interface. """
homepage = "https://github.com/LLNL/PnMPI"
url = "https://github.com/LLNL/PnMPI/releases/download/v1.7/PnMPI-v1.7-full.tar.gz"
version('1.7', sha256='523228bdc220ae417d6812c0766bba698a240d71c69981cb0cb2b09a75ef4a9e')
variant('fortran', default=False,
description='Configure PnMPI with Fortran support')
variant('tests', default=False,
description='Build test cases and enable "test" makefile target')
depends_on('cmake', type='build')
depends_on('argp-standalone', when='platform=darwin')
depends_on('binutils')
depends_on('help2man')
depends_on('doxygen')
depends_on('mpi')
@run_before('cmake')
def check_fortran(self):
is_no_fortran_compiler = not self.compiler.f77 and not self.compiler.fc
if self.spec.satisfies('+fortran'):
if is_no_fortran_compiler:
raise InstallError('pnmpi+fortran requires Fortran compiler '
'but no Fortran compiler found!')
def cmake_args(self):
args = []
spec = self.spec
on_off = {True: 'ON', False: 'OFF'}
has_fortran = spec.satisfies('+fortran')
has_tests = spec.satisfies('+tests')
args.append('-DENABLE_FORTRAN:BOOL={0}'.format(on_off[has_fortran]))
args.append('-DENABLE_TESTING:BOOL={0}'.format(on_off[has_tests]))
return args
| lgpl-2.1 |
sigma-random/androguard | androguard/decompiler/dad/util.py | 9 | 5858 | # This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
logger = logging.getLogger('dad.util')
TYPE_DESCRIPTOR = {
'V': 'void',
'Z': 'boolean',
'B': 'byte',
'S': 'short',
'C': 'char',
'I': 'int',
'J': 'long',
'F': 'float',
'D': 'double',
}
ACCESS_FLAGS_CLASSES = {
0x1: 'public',
0x2: 'private',
0x4: 'protected',
0x8: 'static',
0x10: 'final',
0x200: 'interface',
0x400: 'abstract',
0x1000: 'synthetic',
0x2000: 'annotation',
0x4000: 'enum',
}
ACCESS_FLAGS_FIELDS = {
0x1: 'public',
0x2: 'private',
0x4: 'protected',
0x8: 'static',
0x10: 'final',
0x40: 'volatile',
0x80: 'transient',
0x1000: 'synthetic',
0x4000: 'enum',
}
ACCESS_FLAGS_METHODS = {
0x1: 'public',
0x2: 'private',
0x4: 'protected',
0x8: 'static',
0x10: 'final',
0x20: 'synchronized',
0x40: 'bridge',
0x80: 'varargs',
0x100: 'native',
0x400: 'abstract',
0x800: 'strictfp',
0x1000: 'synthetic',
0x10000: 'constructor',
0x20000: 'declared_synchronized',
}
ACCESS_ORDER = [0x1, 0x4, 0x2, 0x400, 0x8, 0x10,
0x80, 0x40, 0x20, 0x100, 0x800,
0x200, 0x1000, 0x2000, 0x4000,
0x10000, 0x20000]
TYPE_LEN = {
'J': 2,
'D': 2,
}
def get_access_class(access):
sorted_access = [i for i in ACCESS_ORDER if i & access]
return [ACCESS_FLAGS_CLASSES[flag] for flag in sorted_access]
def get_access_method(access):
sorted_access = [i for i in ACCESS_ORDER if i & access]
return [ACCESS_FLAGS_METHODS[flag] for flag in sorted_access]
def get_access_field(access):
sorted_access = [i for i in ACCESS_ORDER if i & access]
return [ACCESS_FLAGS_FIELDS[flag] for flag in sorted_access]
def build_path(graph, node1, node2, path=None):
'''
Build the path from node1 to node2.
The path is composed of all the nodes between node1 and node2,
node1 excluded. Although if there is a loop starting from node1, it will be
included in the path.
'''
if path is None:
path = []
if node1 is node2:
return path
path.append(node2)
for pred in graph.all_preds(node2):
if pred in path:
continue
build_path(graph, node1, pred, path)
return path
def common_dom(idom, cur, pred):
if not (cur and pred):
return cur or pred
while cur is not pred:
while cur.num < pred.num:
pred = idom[pred]
while cur.num > pred.num:
cur = idom[cur]
return cur
def merge_inner(clsdict):
'''
Merge the inner class(es) of a class:
e.g class A { ... } class A$foo{ ... } class A$bar{ ... }
==> class A { class foo{...} class bar{...} ... }
'''
samelist = False
done = {}
while not samelist:
samelist = True
classlist = clsdict.keys()
for classname in classlist:
parts_name = classname.rsplit('$', 1)
if len(parts_name) > 1:
mainclass, innerclass = parts_name
innerclass = innerclass[:-1] # remove ';' of the name
mainclass += ';'
if mainclass in clsdict:
clsdict[mainclass].add_subclass(innerclass,
clsdict[classname])
clsdict[classname].name = innerclass
done[classname] = clsdict[classname]
del clsdict[classname]
samelist = False
elif mainclass in done:
cls = done[mainclass]
cls.add_subclass(innerclass, clsdict[classname])
clsdict[classname].name = innerclass
done[classname] = done[mainclass]
del clsdict[classname]
samelist = False
def get_type_size(param):
'''
Return the number of register needed by the type @param
'''
return TYPE_LEN.get(param, 1)
def get_type(atype, size=None):
'''
Retrieve the java type of a descriptor (e.g : I)
'''
res = TYPE_DESCRIPTOR.get(atype)
if res is None:
if atype[0] == 'L':
if atype.startswith('Ljava/lang'):
res = atype[1:-1].lstrip('java/lang/').replace('/', '.')
else:
res = atype[1:-1].replace('/', '.')
elif atype[0] == '[':
if size is None:
res = '%s[]' % get_type(atype[1:])
else:
res = '%s[%s]' % (get_type(atype[1:]), size)
else:
res = atype
logger.debug('Unknown descriptor: "%s".', atype)
return res
def get_params_type(descriptor):
'''
Return the parameters type of a descriptor (e.g (IC)V)
'''
params = descriptor.split(')')[0][1:].split()
if params:
return [param for param in params]
return []
def create_png(cls_name, meth_name, graph, dir_name='graphs2'):
m_name = ''.join(x for x in meth_name if x.isalnum())
name = ''.join((cls_name.split('/')[-1][:-1], '#', m_name))
graph.draw(name, dir_name)
| apache-2.0 |
hyperized/ansible | test/units/modules/network/f5/test_bigip_qkview.py | 38 | 5656 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_qkview import Parameters
from library.modules.bigip_qkview import ModuleManager
from library.modules.bigip_qkview import MadmLocationManager
from library.modules.bigip_qkview import BulkLocationManager
from library.modules.bigip_qkview import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_qkview import Parameters
from ansible.modules.network.f5.bigip_qkview import ModuleManager
from ansible.modules.network.f5.bigip_qkview import MadmLocationManager
from ansible.modules.network.f5.bigip_qkview import BulkLocationManager
from ansible.modules.network.f5.bigip_qkview import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
filename='foo.qkview',
asm_request_log=False,
max_file_size=1024,
complete_information=True,
exclude_core=True,
force=False,
exclude=['audit', 'secure'],
dest='/tmp/foo.qkview'
)
p = Parameters(params=args)
assert p.filename == 'foo.qkview'
assert p.asm_request_log is None
assert p.max_file_size == '-s 1024'
assert p.complete_information == '-c'
assert p.exclude_core == '-C'
assert p.force is False
assert len(p.exclude_core) == 2
assert 'audit' in p.exclude
assert 'secure' in p.exclude
assert p.dest == '/tmp/foo.qkview'
def test_module_asm_parameter(self):
args = dict(
asm_request_log=True,
)
p = Parameters(params=args)
assert p.asm_request_log == '-o asm-request-log'
class TestMadmLocationManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_qkview_default_options(self, *args):
set_module_args(dict(
dest='/tmp/foo.qkview',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = MadmLocationManager(module=module, params=module.params)
tm.exists = Mock(return_value=False)
tm.execute_on_device = Mock(return_value=True)
tm._move_qkview_to_download = Mock(return_value=True)
tm._download_file = Mock(return_value=True)
tm._delete_qkview = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_less_than_14 = Mock(return_value=True)
mm.get_manager = Mock(return_value=tm)
with patch('os.path.exists') as mo:
mo.return_value = True
results = mm.exec_module()
assert results['changed'] is False
class TestBulkLocationManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_qkview_default_options(self, *args):
set_module_args(dict(
dest='/tmp/foo.qkview',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = BulkLocationManager(module=module, params=module.params)
tm.exists = Mock(return_value=False)
tm.execute_on_device = Mock(return_value=True)
tm._move_qkview_to_download = Mock(return_value=True)
tm._download_file = Mock(return_value=True)
tm._delete_qkview = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_less_than_14 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
with patch('os.path.exists') as mo:
mo.return_value = True
results = mm.exec_module()
assert results['changed'] is False
| gpl-3.0 |
thnee/ansible | lib/ansible/modules/network/cloudengine/ce_vrf_interface.py | 13 | 15784 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_vrf_interface
version_added: "2.4"
short_description: Manages interface specific VPN configuration on HUAWEI CloudEngine switches.
description:
- Manages interface specific VPN configuration of HUAWEI CloudEngine switches.
author: Zhijin Zhou (@QijunPan)
notes:
- Ensure that a VPN instance has been created and the IPv4 address family has been enabled for the VPN instance.
- This module requires the netconf system service be enabled on the remote device being managed.
- Recommended connection is C(netconf).
- This module also works with C(local) connections for legacy playbooks.
options:
vrf:
description:
- VPN instance, the length of vrf name is 1 ~ 31, i.e. "test", but can not be C(_public_).
required: true
vpn_interface:
description:
- An interface that can binding VPN instance, i.e. 40GE1/0/22, Vlanif10.
Must be fully qualified interface name.
Interface types, such as 10GE, 40GE, 100GE, LoopBack, MEth, Tunnel, Vlanif....
required: true
state:
description:
- Manage the state of the resource.
required: false
choices: ['present','absent']
default: present
'''
EXAMPLES = '''
- name: VRF interface test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Configure a VPN instance for the interface"
ce_vrf_interface:
vpn_interface: 40GE1/0/2
vrf: test
state: present
provider: "{{ cli }}"
- name: "Disable the association between a VPN instance and an interface"
ce_vrf_interface:
vpn_interface: 40GE1/0/2
vrf: test
state: absent
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {
"state": "present",
"vpn_interface": "40GE2/0/17",
"vrf": "jss"
}
existing:
description: k/v pairs of existing attributes on the interface
returned: verbose mode
type: dict
sample: {
"vpn_interface": "40GE2/0/17",
"vrf": null
}
end_state:
description: k/v pairs of end attributes on the interface
returned: verbose mode
type: dict
sample: {
"vpn_interface": "40GE2/0/17",
"vrf": "jss"
}
updates:
description: command list sent to the device
returned: always
type: list
sample: [
"ip binding vpn-instance jss",
]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec, get_nc_config, set_nc_config
CE_NC_GET_VRF = """
<filter type="subtree">
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName>%s</vrfName>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</filter>
"""
CE_NC_GET_VRF_INTERFACE = """
<filter type="subtree">
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName></vrfName>
<l3vpnIfs>
<l3vpnIf>
<ifName></ifName>
</l3vpnIf>
</l3vpnIfs>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</filter>
"""
CE_NC_MERGE_VRF_INTERFACE = """
<config>
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName>%s</vrfName>
<l3vpnIfs>
<l3vpnIf operation="merge">
<ifName>%s</ifName>
</l3vpnIf>
</l3vpnIfs>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</config>
"""
CE_NC_GET_INTF = """
<filter type="subtree">
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface>
<ifName>%s</ifName>
<isL2SwitchPort></isL2SwitchPort>
</interface>
</interfaces>
</ifm>
</filter>
"""
CE_NC_DEL_INTF_VPN = """
<config>
<l3vpn xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<l3vpncomm>
<l3vpnInstances>
<l3vpnInstance>
<vrfName>%s</vrfName>
<l3vpnIfs>
<l3vpnIf operation="delete">
<ifName>%s</ifName>
</l3vpnIf>
</l3vpnIfs>
</l3vpnInstance>
</l3vpnInstances>
</l3vpncomm>
</l3vpn>
</config>
"""
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-Port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
class VrfInterface(object):
"""Manage vpn instance"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# vpn instance info
self.vrf = self.module.params['vrf']
self.vpn_interface = self.module.params['vpn_interface']
self.vpn_interface = self.vpn_interface.upper().replace(' ', '')
self.state = self.module.params['state']
self.intf_info = dict()
self.intf_info['isL2SwitchPort'] = None
self.intf_info['vrfName'] = None
self.conf_exist = False
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init_module"""
required_one_of = [("vrf", "vpn_interface")]
self.module = AnsibleModule(
argument_spec=self.spec, required_one_of=required_one_of, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_update_cmd(self):
""" get updated command"""
if self.conf_exist:
return
if self.state == 'absent':
self.updates_cmd.append(
"undo ip binding vpn-instance %s" % self.vrf)
return
if self.vrf != self.intf_info['vrfName']:
self.updates_cmd.append("ip binding vpn-instance %s" % self.vrf)
return
def check_params(self):
"""Check all input params"""
if not self.is_vrf_exist():
self.module.fail_json(
msg='Error: The VPN instance is not existed.')
if self.state == 'absent':
if self.vrf != self.intf_info['vrfName']:
self.module.fail_json(
msg='Error: The VPN instance is not bound to the interface.')
if self.intf_info['isL2SwitchPort'] == 'true':
self.module.fail_json(
msg='Error: L2Switch Port can not binding a VPN instance.')
# interface type check
if self.vpn_interface:
intf_type = get_interface_type(self.vpn_interface)
if not intf_type:
self.module.fail_json(
msg='Error: interface name of %s'
' is error.' % self.vpn_interface)
# vrf check
if self.vrf == '_public_':
self.module.fail_json(
msg='Error: The vrf name _public_ is reserved.')
if len(self.vrf) < 1 or len(self.vrf) > 31:
self.module.fail_json(
msg='Error: The vrf name length must be between 1 and 31.')
def get_interface_vpn_name(self, vpninfo, vpn_name):
""" get vpn instance name"""
l3vpn_if = vpninfo.findall("l3vpnIf")
for l3vpn_ifinfo in l3vpn_if:
for ele in l3vpn_ifinfo:
if ele.tag in ['ifName']:
if ele.text.lower() == self.vpn_interface.lower():
self.intf_info['vrfName'] = vpn_name
def get_interface_vpn(self):
""" get the VPN instance associated with the interface"""
xml_str = CE_NC_GET_VRF_INTERFACE
con_obj = get_nc_config(self.module, xml_str)
if "<data/>" in con_obj:
return
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get global vrf interface info
root = ElementTree.fromstring(xml_str)
vpns = root.findall(
"l3vpn/l3vpncomm/l3vpnInstances/l3vpnInstance")
if vpns:
for vpnele in vpns:
vpn_name = None
for vpninfo in vpnele:
if vpninfo.tag == 'vrfName':
vpn_name = vpninfo.text
if vpninfo.tag == 'l3vpnIfs':
self.get_interface_vpn_name(vpninfo, vpn_name)
return
def is_vrf_exist(self):
""" judge whether the VPN instance is existed"""
conf_str = CE_NC_GET_VRF % self.vrf
con_obj = get_nc_config(self.module, conf_str)
if "<data/>" in con_obj:
return False
return True
def get_intf_conf_info(self):
""" get related configuration of the interface"""
conf_str = CE_NC_GET_INTF % self.vpn_interface
con_obj = get_nc_config(self.module, conf_str)
if "<data/>" in con_obj:
return
# get interface base info
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
interface = root.find("ifm/interfaces/interface")
if interface:
for eles in interface:
if eles.tag in ["isL2SwitchPort"]:
self.intf_info[eles.tag] = eles.text
self.get_interface_vpn()
return
def get_existing(self):
"""get existing config"""
self.existing = dict(vrf=self.intf_info['vrfName'],
vpn_interface=self.vpn_interface)
def get_proposed(self):
"""get_proposed"""
self.proposed = dict(vrf=self.vrf,
vpn_interface=self.vpn_interface,
state=self.state)
def get_end_state(self):
"""get_end_state"""
self.intf_info['vrfName'] = None
self.get_intf_conf_info()
self.end_state = dict(vrf=self.intf_info['vrfName'],
vpn_interface=self.vpn_interface)
def show_result(self):
""" show result"""
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def judge_if_config_exist(self):
""" judge whether configuration has existed"""
if self.state == 'absent':
return False
delta = set(self.proposed.items()).difference(
self.existing.items())
delta = dict(delta)
if len(delta) == 1 and delta['state']:
return True
return False
def config_interface_vrf(self):
""" configure VPN instance of the interface"""
if not self.conf_exist and self.state == 'present':
xml_str = CE_NC_MERGE_VRF_INTERFACE % (
self.vrf, self.vpn_interface)
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "VRF_INTERFACE_CONFIG")
self.changed = True
elif self.state == 'absent':
xml_str = CE_NC_DEL_INTF_VPN % (self.vrf, self.vpn_interface)
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "DEL_VRF_INTERFACE_CONFIG")
self.changed = True
def work(self):
"""execute task"""
self.get_intf_conf_info()
self.check_params()
self.get_existing()
self.get_proposed()
self.conf_exist = self.judge_if_config_exist()
self.config_interface_vrf()
self.get_update_cmd()
self.get_end_state()
self.show_result()
def main():
"""main"""
argument_spec = dict(
vrf=dict(required=True, type='str'),
vpn_interface=dict(required=True, type='str'),
state=dict(choices=['absent', 'present'],
default='present', required=False),
)
argument_spec.update(ce_argument_spec)
vrf_intf = VrfInterface(argument_spec)
vrf_intf.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
alexus37/AugmentedRealityChess | pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/GL/SUNX/constant_data.py | 9 | 1094 | '''OpenGL extension SUNX.constant_data
This module customises the behaviour of the
OpenGL.raw.GL.SUNX.constant_data to provide a more
Python-friendly API
Overview (from the spec)
This extension allows the pixel data specified by the
application to be used internally without making a second copy.
This extension affects how the pixel data in client memory is
interpreted and therefore affects DrawPixels, Bitmap,
PolygonStipple, TexImage1D, TexImage2D, TexImage3DEXT,
ColorTableSGI.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/SUNX/constant_data.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.SUNX.constant_data import *
from OpenGL.raw.GL.SUNX.constant_data import _EXTENSION_NAME
def glInitConstantDataSUNX():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | mit |
wangqiang8511/troposphere | examples/RDS_VPC.py | 30 | 3676 | # Converted from RDS_VPC.template located at:
# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/
from troposphere import GetAtt, Join, Output, Parameter, Ref, Template
from troposphere.ec2 import SecurityGroup
from troposphere.rds import DBInstance, DBSubnetGroup
t = Template()
t.add_description(
"AWS CloudFormation Sample Template VPC_RDS_DB_Instance: Sample template "
"showing how to create an RDS DBInstance in an existing Virtual Private "
"Cloud (VPC). **WARNING** This template creates an Amazon Relational "
"Database Service database instance. You will be billed for the AWS "
"resources used if you create a stack from this template.")
vpcid = t.add_parameter(Parameter(
"VpcId",
Type="String",
Description="VpcId of your existing Virtual Private Cloud (VPC)"
))
subnet = t.add_parameter(Parameter(
"Subnets",
Type="CommaDelimitedList",
Description=(
"The list of SubnetIds, for at least two Availability Zones in the "
"region in your Virtual Private Cloud (VPC)")
))
dbname = t.add_parameter(Parameter(
"DBName",
Default="MyDatabase",
Description="The database name",
Type="String",
MinLength="1",
MaxLength="64",
AllowedPattern="[a-zA-Z][a-zA-Z0-9]*",
ConstraintDescription=("must begin with a letter and contain only"
" alphanumeric characters.")
))
dbuser = t.add_parameter(Parameter(
"DBUser",
NoEcho=True,
Description="The database admin account username",
Type="String",
MinLength="1",
MaxLength="16",
AllowedPattern="[a-zA-Z][a-zA-Z0-9]*",
ConstraintDescription=("must begin with a letter and contain only"
" alphanumeric characters.")
))
dbpassword = t.add_parameter(Parameter(
"DBPassword",
NoEcho=True,
Description="The database admin account password",
Type="String",
MinLength="1",
MaxLength="41",
AllowedPattern="[a-zA-Z0-9]*",
ConstraintDescription="must contain only alphanumeric characters."
))
dbclass = t.add_parameter(Parameter(
"DBClass",
Default="db.m1.small",
Description="Database instance class",
Type="String",
AllowedValues=[
"db.m1.small", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge",
"db.m2.2xlarge", "db.m2.4xlarge"],
ConstraintDescription="must select a valid database instance type.",
))
dballocatedstorage = t.add_parameter(Parameter(
"DBAllocatedStorage",
Default="5",
Description="The size of the database (Gb)",
Type="Number",
MinValue="5",
MaxValue="1024",
ConstraintDescription="must be between 5 and 1024Gb.",
))
mydbsubnetgroup = t.add_resource(DBSubnetGroup(
"MyDBSubnetGroup",
DBSubnetGroupDescription="Subnets available for the RDS DB Instance",
SubnetIds=Ref(subnet),
))
myvpcsecuritygroup = t.add_resource(SecurityGroup(
"myVPCSecurityGroup",
GroupDescription="Security group for RDS DB Instance.",
VpcId=Ref(vpcid)
))
mydb = t.add_resource(DBInstance(
"MyDB",
DBName=Ref(dbname),
AllocatedStorage=Ref(dballocatedstorage),
DBInstanceClass=Ref(dbclass),
Engine="MySQL",
EngineVersion="5.5",
MasterUsername=Ref(dbuser),
MasterUserPassword=Ref(dbpassword),
DBSubnetGroupName=Ref(mydbsubnetgroup),
VPCSecurityGroups=[Ref(myvpcsecuritygroup)],
))
t.add_output(Output(
"JDBCConnectionString",
Description="JDBC connection string for database",
Value=Join("", [
"jdbc:mysql://",
GetAtt("MyDB", "Endpoint.Address"),
GetAtt("MyDB", "Endpoint.Port"),
"/",
Ref(dbname)
])
))
print(t.to_json())
| bsd-2-clause |
joisig/grit-i18n | grit/tool/build_unittest.py | 16 | 13283 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for the 'grit build' tool.
'''
import codecs
import os
import sys
import tempfile
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from grit import util
from grit.tool import build
class BuildUnittest(unittest.TestCase):
def testFindTranslationsWithSubstitutions(self):
# This is a regression test; we had a bug where GRIT would fail to find
# messages with substitutions e.g. "Hello [IDS_USER]" where IDS_USER is
# another <message>.
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute.grd')
self.verbose = False
self.extra_verbose = False
builder.Run(DummyOpts(), ['-o', output_dir])
def testGenerateDepFile(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute.grd')
self.verbose = False
self.extra_verbose = False
expected_dep_file = os.path.join(output_dir, 'substitute.grd.d')
builder.Run(DummyOpts(), ['-o', output_dir,
'--depdir', output_dir,
'--depfile', expected_dep_file])
self.failUnless(os.path.isfile(expected_dep_file))
with open(expected_dep_file) as f:
line = f.readline()
(dep_output_file, deps_string) = line.split(': ')
deps = deps_string.split(' ')
self.failUnlessEqual("resource.h", dep_output_file)
self.failUnlessEqual(1, len(deps))
self.failUnlessEqual(deps[0],
util.PathFromRoot('grit/testdata/substitute.xmb'))
def testGenerateDepFileWithResourceIds(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute_no_ids.grd')
self.verbose = False
self.extra_verbose = False
expected_dep_file = os.path.join(output_dir, 'substitute_no_ids.grd.d')
builder.Run(DummyOpts(),
['-f', util.PathFromRoot('grit/testdata/resource_ids'),
'-o', output_dir,
'--depdir', output_dir,
'--depfile', expected_dep_file])
self.failUnless(os.path.isfile(expected_dep_file))
with open(expected_dep_file) as f:
line = f.readline()
(dep_output_file, deps_string) = line.split(': ')
deps = deps_string.split(' ')
self.failUnlessEqual("resource.h", dep_output_file)
self.failUnlessEqual(2, len(deps))
self.failUnlessEqual(deps[0],
util.PathFromRoot('grit/testdata/substitute.xmb'))
self.failUnlessEqual(deps[1],
util.PathFromRoot('grit/testdata/resource_ids'))
def testAssertOutputs(self):
output_dir = tempfile.mkdtemp()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute.grd')
self.verbose = False
self.extra_verbose = False
# Incomplete output file list should fail.
builder_fail = build.RcBuilder()
self.failUnlessEqual(2,
builder_fail.Run(DummyOpts(), [
'-o', output_dir,
'-a', os.path.abspath(
os.path.join(output_dir, 'en_generated_resources.rc'))]))
# Complete output file list should succeed.
builder_ok = build.RcBuilder()
self.failUnlessEqual(0,
builder_ok.Run(DummyOpts(), [
'-o', output_dir,
'-a', os.path.abspath(
os.path.join(output_dir, 'en_generated_resources.rc')),
'-a', os.path.abspath(
os.path.join(output_dir, 'sv_generated_resources.rc')),
'-a', os.path.abspath(
os.path.join(output_dir, 'resource.h'))]))
def _verifyWhitelistedOutput(self,
filename,
whitelisted_ids,
non_whitelisted_ids,
encoding='utf8'):
self.failUnless(os.path.exists(filename))
whitelisted_ids_found = []
non_whitelisted_ids_found = []
with codecs.open(filename, encoding=encoding) as f:
for line in f.readlines():
for whitelisted_id in whitelisted_ids:
if whitelisted_id in line:
whitelisted_ids_found.append(whitelisted_id)
for non_whitelisted_id in non_whitelisted_ids:
if non_whitelisted_id in line:
non_whitelisted_ids_found.append(non_whitelisted_id)
self.longMessage = True
self.assertEqual(whitelisted_ids,
whitelisted_ids_found,
'\nin file {}'.format(os.path.basename(filename)))
non_whitelisted_msg = ('Non-Whitelisted IDs {} found in {}'
.format(non_whitelisted_ids_found, os.path.basename(filename)))
self.assertFalse(non_whitelisted_ids_found, non_whitelisted_msg)
def testWhitelistStrings(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/whitelist_strings.grd')
self.verbose = False
self.extra_verbose = False
whitelist_file = util.PathFromRoot('grit/testdata/whitelist.txt')
builder.Run(DummyOpts(), ['-o', output_dir,
'-w', whitelist_file])
header = os.path.join(output_dir, 'whitelist_test_resources.h')
rc = os.path.join(output_dir, 'en_whitelist_test_strings.rc')
whitelisted_ids = ['IDS_MESSAGE_WHITELISTED']
non_whitelisted_ids = ['IDS_MESSAGE_NOT_WHITELISTED']
self._verifyWhitelistedOutput(
header,
whitelisted_ids,
non_whitelisted_ids,
)
self._verifyWhitelistedOutput(
rc,
whitelisted_ids,
non_whitelisted_ids,
encoding='utf16'
)
def testWhitelistResources(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/whitelist_resources.grd')
self.verbose = False
self.extra_verbose = False
whitelist_file = util.PathFromRoot('grit/testdata/whitelist.txt')
builder.Run(DummyOpts(), ['-o', output_dir,
'-w', whitelist_file])
header = os.path.join(output_dir, 'whitelist_test_resources.h')
map_cc = os.path.join(output_dir, 'whitelist_test_resources_map.cc')
map_h = os.path.join(output_dir, 'whitelist_test_resources_map.h')
pak = os.path.join(output_dir, 'whitelist_test_resources.pak')
# Ensure the resource map header and .pak files exist, but don't verify
# their content.
self.failUnless(os.path.exists(map_h))
self.failUnless(os.path.exists(pak))
whitelisted_ids = [
'IDR_STRUCTURE_WHITELISTED',
'IDR_STRUCTURE_IN_TRUE_IF_WHITELISTED',
'IDR_INCLUDE_WHITELISTED',
]
non_whitelisted_ids = [
'IDR_STRUCTURE_NOT_WHITELISTED',
'IDR_STRUCTURE_IN_TRUE_IF_NOT_WHITELISTED',
'IDR_STRUCTURE_IN_FALSE_IF_WHITELISTED',
'IDR_STRUCTURE_IN_FALSE_IF_NOT_WHITELISTED',
'IDR_INCLUDE_NOT_WHITELISTED',
]
for output_file in (header, map_cc):
self._verifyWhitelistedOutput(
output_file,
whitelisted_ids,
non_whitelisted_ids,
)
def testOutputAllResourceDefinesTrue(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/whitelist_resources.grd')
self.verbose = False
self.extra_verbose = False
whitelist_file = util.PathFromRoot('grit/testdata/whitelist.txt')
builder.Run(DummyOpts(), ['-o', output_dir,
'-w', whitelist_file,
'--output-all-resource-defines',])
header = os.path.join(output_dir, 'whitelist_test_resources.h')
map_cc = os.path.join(output_dir, 'whitelist_test_resources_map.cc')
whitelisted_ids = [
'IDR_STRUCTURE_WHITELISTED',
'IDR_STRUCTURE_NOT_WHITELISTED',
'IDR_STRUCTURE_IN_TRUE_IF_WHITELISTED',
'IDR_STRUCTURE_IN_TRUE_IF_NOT_WHITELISTED',
'IDR_STRUCTURE_IN_FALSE_IF_WHITELISTED',
'IDR_STRUCTURE_IN_FALSE_IF_NOT_WHITELISTED',
'IDR_INCLUDE_WHITELISTED',
'IDR_INCLUDE_NOT_WHITELISTED',
]
non_whitelisted_ids = []
for output_file in (header, map_cc):
self._verifyWhitelistedOutput(
output_file,
whitelisted_ids,
non_whitelisted_ids,
)
def testOutputAllResourceDefinesFalse(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/whitelist_resources.grd')
self.verbose = False
self.extra_verbose = False
whitelist_file = util.PathFromRoot('grit/testdata/whitelist.txt')
builder.Run(DummyOpts(), ['-o', output_dir,
'-w', whitelist_file,
'--no-output-all-resource-defines',])
header = os.path.join(output_dir, 'whitelist_test_resources.h')
map_cc = os.path.join(output_dir, 'whitelist_test_resources_map.cc')
whitelisted_ids = [
'IDR_STRUCTURE_WHITELISTED',
'IDR_STRUCTURE_IN_TRUE_IF_WHITELISTED',
'IDR_INCLUDE_WHITELISTED',
]
non_whitelisted_ids = [
'IDR_STRUCTURE_NOT_WHITELISTED',
'IDR_STRUCTURE_IN_TRUE_IF_NOT_WHITELISTED',
'IDR_STRUCTURE_IN_FALSE_IF_WHITELISTED',
'IDR_STRUCTURE_IN_FALSE_IF_NOT_WHITELISTED',
'IDR_INCLUDE_NOT_WHITELISTED',
]
for output_file in (header, map_cc):
self._verifyWhitelistedOutput(
output_file,
whitelisted_ids,
non_whitelisted_ids,
)
def testWriteOnlyNew(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute.grd')
self.verbose = False
self.extra_verbose = False
UNCHANGED = 10
header = os.path.join(output_dir, 'resource.h')
builder.Run(DummyOpts(), ['-o', output_dir])
self.failUnless(os.path.exists(header))
first_mtime = os.stat(header).st_mtime
os.utime(header, (UNCHANGED, UNCHANGED))
builder.Run(DummyOpts(), ['-o', output_dir, '--write-only-new', '0'])
self.failUnless(os.path.exists(header))
second_mtime = os.stat(header).st_mtime
os.utime(header, (UNCHANGED, UNCHANGED))
builder.Run(DummyOpts(), ['-o', output_dir, '--write-only-new', '1'])
self.failUnless(os.path.exists(header))
third_mtime = os.stat(header).st_mtime
self.assertTrue(abs(second_mtime - UNCHANGED) > 5)
self.assertTrue(abs(third_mtime - UNCHANGED) < 5)
def testGenerateDepFileWithDependOnStamp(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute.grd')
self.verbose = False
self.extra_verbose = False
expected_dep_file_name = 'substitute.grd.d'
expected_stamp_file_name = expected_dep_file_name + '.stamp'
expected_dep_file = os.path.join(output_dir, expected_dep_file_name)
expected_stamp_file = os.path.join(output_dir, expected_stamp_file_name)
if os.path.isfile(expected_stamp_file):
os.remove(expected_stamp_file)
builder.Run(DummyOpts(), ['-o', output_dir,
'--depdir', output_dir,
'--depfile', expected_dep_file,
'--depend-on-stamp'])
self.failUnless(os.path.isfile(expected_stamp_file))
first_mtime = os.stat(expected_stamp_file).st_mtime
# Reset mtime to very old.
OLDTIME = 10
os.utime(expected_stamp_file, (OLDTIME, OLDTIME))
builder.Run(DummyOpts(), ['-o', output_dir,
'--depdir', output_dir,
'--depfile', expected_dep_file,
'--depend-on-stamp'])
self.failUnless(os.path.isfile(expected_stamp_file))
second_mtime = os.stat(expected_stamp_file).st_mtime
# Some OS have a 2s stat resolution window, so can't do a direct comparison.
self.assertTrue((second_mtime - OLDTIME) > 5)
self.assertTrue(abs(second_mtime - first_mtime) < 5)
self.failUnless(os.path.isfile(expected_dep_file))
with open(expected_dep_file) as f:
line = f.readline()
(dep_output_file, deps_string) = line.split(': ')
deps = deps_string.split(' ')
self.failUnlessEqual(expected_stamp_file_name, dep_output_file)
self.failUnlessEqual(1, len(deps))
self.failUnlessEqual(deps[0],
util.PathFromRoot('grit/testdata/substitute.xmb'))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
sjug/origin | vendor/github.com/heketi/heketi/extras/tools/comparison.py | 8 | 10218 | #!/usr/bin/env python
#
# Copyright (c) 2018 The heketi Authors
#
# This file is licensed to you under your choice of the GNU Lesser
# General Public License, version 3 or any later version (LGPLv3 or
# later), or the GNU General Public License, version 2 (GPLv2), in all
# cases as published by the Free Software Foundation.
#
import argparse
import json
import sys
import yaml
DESC = """
Compare outputs of gluster and/or heketi and/or openshift/k8s.
Prints lists of volumes where sources differ.
"""
EXAMPLE = """
Example:
$ python3 comparison.py
--gluster-info gluster-volume-info.txt
--heketi-json heketi-db.json
--pv-yaml openshift-pv-yaml.yaml
"""
# flag constants
IN_GLUSTER = 'gluster'
IN_HEKETI = 'heketi'
IN_PVS = 'pvs'
IS_BLOCK = 'BV'
class CliError(ValueError):
pass
def main():
parser = argparse.ArgumentParser(description=DESC, epilog=EXAMPLE)
parser.add_argument(
'--gluster-info', '-g',
help='Path to a file containing gluster volume info')
parser.add_argument(
'--heketi-json', '-j',
help='Path to a file containing Heketi db json export')
parser.add_argument(
'--pv-yaml', '-y',
help='Path to a file containing PV yaml data')
parser.add_argument(
'--skip-ok', '-K', action='store_true',
help='Exclude matching items from output')
parser.add_argument(
'--pending', action='store_true',
help='Show heketi pending status (best effort)')
parser.add_argument(
'--no-header', '-H', action='store_true',
help='Do not print column header')
parser.add_argument(
'--ignore', '-I', action='append',
help='Exlude given volume name (multiple allowed)')
parser.add_argument(
'--match-storage-class', '-S', action='append',
help='Match one or more storage class names')
parser.add_argument(
'--skip-block', action='store_true',
help='Exclude block volumes from output')
parser.add_argument(
'--bricks', action='store_true',
help='Compare bricks rather than volumes')
cli = parser.parse_args()
try:
if cli.bricks:
return examine_bricks(cli)
return examine_volumes(cli)
except CliError as err:
parser.error(str(err))
def examine_volumes(cli):
check = []
gvinfo = heketi = pvdata = None
if cli.gluster_info:
check.append(IN_GLUSTER)
gvinfo = parse_gvinfo(cli.gluster_info)
if cli.heketi_json:
check.append(IN_HEKETI)
heketi = parse_heketi(cli.heketi_json)
if cli.pv_yaml:
check.append(IN_PVS)
pvdata = parse_oshift(cli.pv_yaml)
if not check:
raise CliError(
"Must provide: --gluster-info OR --heketi-json OR --pv-yaml")
summary = compile_summary(cli, gvinfo, heketi, pvdata)
for ign in (cli.ignore or []):
if summary.pop(ign, None):
sys.stderr.write('ignoring: {}\n'.format(ign))
compare(summary, check, cli.skip_ok,
header=(not cli.no_header),
show_pending=(cli.pending),
skip_block=cli.skip_block)
return
def examine_bricks(cli):
check = []
gvinfo = heketi = None
if cli.gluster_info:
check.append(IN_GLUSTER)
gvinfo = parse_gvinfo(cli.gluster_info)
if cli.heketi_json:
check.append(IN_HEKETI)
heketi = parse_heketi(cli.heketi_json)
if not check:
raise CliError(
"Must provide: --gluster-info and --heketi-json")
summary = compile_brick_summary(cli, gvinfo, heketi)
compare_bricks(summary, check,
skip_ok=cli.skip_ok)
def parse_heketi(h_json):
with open(h_json) as fh:
return json.load(fh)
def parse_oshift(yf):
with open(yf) as fh:
return yaml.safe_load(fh)
def parse_gvlist(gvl):
vols = {}
with open(gvl) as fh:
for line in fh:
vols[line.strip()] = []
return vols
def parse_gvinfo(gvi):
vols = {}
volume = None
with open(gvi) as fh:
for line in fh:
l = line.strip()
if l.startswith("Volume Name:"):
volume = l.split(":", 1)[-1].strip()
vols[volume] = []
if l.startswith('Brick') and l != "Bricks:":
if volume is None:
raise ValueError("Got Brick before volume: %s" % l)
vols[volume].append(l.split(":", 1)[-1].strip())
return vols
def compile_heketi(summary, heketi):
for vid, v in heketi['volumeentries'].items():
n = v['Info']['name']
summary[n] = {'id': vid, IN_HEKETI: True}
if v['Pending']['Id']:
summary[n]['heketi-pending'] = True
if v['Info'].get('block'):
summary[n]['heketi-bhv'] = True
for bvid, bv in heketi['blockvolumeentries'].items():
n = bv['Info']['name']
summary[n] = {
IN_HEKETI: True,
'block': True,
'id': bvid,
}
if bv['Pending']['Id']:
summary[n]['heketi-pending'] = True
def compile_heketi_bricks(summary, heketi):
for bid, b in heketi['brickentries'].items():
path = b['Info']['path']
node_id = b['Info']['node']
vol_id = b['Info']['volume']
host = (heketi['nodeentries'][node_id]
['Info']['hostnames']['storage'][0])
vol_name = heketi['volumeentries'][vol_id]['Info']['name']
fbp = '{}:{}'.format(host, path)
dest = summary.setdefault(fbp, {})
dest[IN_HEKETI] = True
dest['heketi_volume'] = vol_name
def compile_gvinfo(summary, gvinfo):
for vn in gvinfo:
if vn in summary:
summary[vn][IN_GLUSTER] = True
else:
summary[vn] = {IN_GLUSTER: True}
def compile_gvinfo_bricks(summary, gvinfo):
for vn, content in gvinfo.items():
for bn in content:
dest = summary.setdefault(bn, {})
dest[IN_GLUSTER] = True
dest['gluster_volume'] = vn
def compile_pvdata(summary, pvdata, matchsc):
for elem in pvdata['items']:
g = elem.get('spec', {}).get('glusterfs', {})
ma = elem.get('metadata', {}).get('annotations', {})
if not g and 'glusterBlockShare' not in ma:
continue
sc = elem.get('spec', {}).get('storageClassName', '')
if matchsc and sc not in matchsc:
sys.stderr.write(
'ignoring: {} from storage class "{}"\n'.format(g["path"], sc))
continue
if 'path' in g:
vn = g['path']
block = False
elif 'glusterBlockShare' in ma:
vn = ma['glusterBlockShare']
block = True
else:
raise KeyError('path (volume name) not found in PV data')
dest = summary.setdefault(vn, {})
dest[IN_PVS] = True
if block:
dest['block'] = True
def compile_summary(cli, gvinfo, heketi, pvdata):
summary = {}
if heketi:
compile_heketi(summary, heketi)
if gvinfo:
compile_gvinfo(summary, gvinfo)
if pvdata:
compile_pvdata(summary, pvdata, matchsc=cli.match_storage_class)
return summary
def compile_brick_summary(cli, gvinfo, heketi):
summary = {}
if gvinfo:
compile_gvinfo_bricks(summary, gvinfo)
if heketi:
compile_heketi_bricks(summary, heketi)
return summary
def _check_item(vname, vstate, check):
tocheck = set(check)
flags = []
if vstate.get('block'):
flags.append(IS_BLOCK)
# block volumes will never be found in gluster info
tocheck.discard(IN_GLUSTER)
m = set(c for c in tocheck if vstate.get(c))
flags.extend(sorted(m))
return m == tocheck, flags
def compare(summary, check, skip_ok=False, header=True, show_pending=False,
skip_block=False):
if header:
_print = Printer(['Volume-Name', 'Match', 'Volume-ID'])
else:
_print = Printer([])
for vn, vs in summary.items():
ok, flags = _check_item(vn, vs, check)
if ok and skip_ok:
continue
if 'BV' in flags and skip_block:
continue
heketi_info = vs.get('id', '')
if show_pending and vs.get('heketi-pending'):
heketi_info += '/pending'
if vs.get('heketi-bhv'):
heketi_info += '/block-hosting'
if ok:
sts = 'ok'
else:
sts = ','.join(flags)
_print.line(vn, sts, heketi_info)
def _check_brick(bpath, bstate, check):
tocheck = set(check)
flags = []
volumes = []
m = set(c for c in tocheck if bstate.get(c))
flags.extend(sorted(m))
gv = bstate.get('gluster_volume')
hv = bstate.get('heketi_volume')
ok = False
if m == tocheck and gv == hv:
ok = True
volumes = ['match={}'.format(gv)]
else:
if gv:
volumes.append('gluster={}'.format(gv))
if hv:
volumes.append('heketi={}'.format(hv))
return ok, flags, volumes
def compare_bricks(summary, check, header=True, skip_ok=False):
if header:
_print = Printer(['Brick-Path', 'Match', 'Volumes'])
else:
_print = Printer([])
for bp, bstate in summary.items():
ok, flags, volumes = _check_brick(bp, bstate, check)
if ok and skip_ok:
continue
if ok:
sts = 'ok'
else:
sts = ','.join(flags)
_print.line(bp, sts, ','.join(volumes))
class Printer(object):
"""Utility class for printing columns w/ headers."""
def __init__(self, header):
self._did_header = False
self.header = header or []
def line(self, *columns):
if self.header and not self._did_header:
self._print_header(columns)
self._did_header = True
print (' '.join(columns))
def _print_header(self, columns):
parts = []
for idx, hdr in enumerate(self.header):
pad = max(0, len(columns[idx]) - len(hdr))
parts.append('{}{}'.format(hdr, ' ' * pad))
print (' '.join(parts))
if __name__ == '__main__':
main()
| apache-2.0 |
Orav/kbengine | kbe/res/scripts/common/Lib/test/test_netrc.py | 2 | 4737 | import netrc, os, unittest, sys, textwrap
from test import support
temp_filename = support.TESTFN
class NetrcTestCase(unittest.TestCase):
def make_nrc(self, test_data):
test_data = textwrap.dedent(test_data)
mode = 'w'
if sys.platform != 'cygwin':
mode += 't'
with open(temp_filename, mode) as fp:
fp.write(test_data)
self.addCleanup(os.unlink, temp_filename)
return netrc.netrc(temp_filename)
def test_default(self):
nrc = self.make_nrc("""\
machine host1.domain.com login log1 password pass1 account acct1
default login log2 password pass2
""")
self.assertEqual(nrc.hosts['host1.domain.com'],
('log1', 'acct1', 'pass1'))
self.assertEqual(nrc.hosts['default'], ('log2', None, 'pass2'))
def test_macros(self):
nrc = self.make_nrc("""\
macdef macro1
line1
line2
macdef macro2
line3
line4
""")
self.assertEqual(nrc.macros, {'macro1': ['line1\n', 'line2\n'],
'macro2': ['line3\n', 'line4\n']})
def _test_passwords(self, nrc, passwd):
nrc = self.make_nrc(nrc)
self.assertEqual(nrc.hosts['host.domain.com'], ('log', 'acct', passwd))
def test_password_with_leading_hash(self):
self._test_passwords("""\
machine host.domain.com login log password #pass account acct
""", '#pass')
def test_password_with_trailing_hash(self):
self._test_passwords("""\
machine host.domain.com login log password pass# account acct
""", 'pass#')
def test_password_with_internal_hash(self):
self._test_passwords("""\
machine host.domain.com login log password pa#ss account acct
""", 'pa#ss')
def _test_comment(self, nrc, passwd='pass'):
nrc = self.make_nrc(nrc)
self.assertEqual(nrc.hosts['foo.domain.com'], ('bar', None, passwd))
self.assertEqual(nrc.hosts['bar.domain.com'], ('foo', None, 'pass'))
def test_comment_before_machine_line(self):
self._test_comment("""\
# comment
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
def test_comment_before_machine_line_no_space(self):
self._test_comment("""\
#comment
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
def test_comment_before_machine_line_hash_only(self):
self._test_comment("""\
#
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
def test_comment_at_end_of_machine_line(self):
self._test_comment("""\
machine foo.domain.com login bar password pass # comment
machine bar.domain.com login foo password pass
""")
def test_comment_at_end_of_machine_line_no_space(self):
self._test_comment("""\
machine foo.domain.com login bar password pass #comment
machine bar.domain.com login foo password pass
""")
def test_comment_at_end_of_machine_line_pass_has_hash(self):
self._test_comment("""\
machine foo.domain.com login bar password #pass #comment
machine bar.domain.com login foo password pass
""", '#pass')
@unittest.skipUnless(os.name == 'posix', 'POSIX only test')
def test_security(self):
# This test is incomplete since we are normally not run as root and
# therefore can't test the file ownership being wrong.
d = support.TESTFN
os.mkdir(d)
self.addCleanup(support.rmtree, d)
fn = os.path.join(d, '.netrc')
with open(fn, 'wt') as f:
f.write("""\
machine foo.domain.com login bar password pass
default login foo password pass
""")
with support.EnvironmentVarGuard() as environ:
environ.set('HOME', d)
os.chmod(fn, 0o600)
nrc = netrc.netrc()
self.assertEqual(nrc.hosts['foo.domain.com'],
('bar', None, 'pass'))
os.chmod(fn, 0o622)
self.assertRaises(netrc.NetrcParseError, netrc.netrc)
def test_main():
support.run_unittest(NetrcTestCase)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.