repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
samedder/azure-cli | src/command_modules/azure-cli-acs/azure/cli/command_modules/acs/tests/test_service_principals.py | 4 | 4792 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import tempfile
import unittest
import mock
from azure.cli.core.util import CLIError
from azure.cli.command_modules.acs.custom import (
_validate_service_principal,
load_acs_service_principal,
store_acs_service_principal,
_build_service_principal)
class AcsServicePrincipalTest(unittest.TestCase):
def test_load_non_existent_service_principal(self):
principal = load_acs_service_principal('some-id', config_path='non-existent-file.json')
self.assertIsNone(principal)
def test_round_trip_one_subscription(self):
store_file = tempfile.NamedTemporaryFile(delete=False)
store_file.close()
service_principal = '12345'
sub_id = '67890'
client_secret = 'foobar'
store_acs_service_principal(
sub_id, client_secret, service_principal, config_path=store_file.name)
obj = load_acs_service_principal(sub_id, config_path=store_file.name)
self.assertIsNotNone(obj)
self.assertEqual(obj.get('service_principal'), service_principal)
self.assertEqual(obj.get('client_secret'), client_secret)
os.remove(store_file.name)
def test_round_trip_multi_subscription(self):
store_file = tempfile.NamedTemporaryFile(delete=False)
store_file.close()
principals = [
('12345', '67890', 'foobar'),
('abcde', 'fghij', 'foobaz'),
]
# Store them all
for principal in principals:
store_acs_service_principal(
principal[0], principal[1], principal[2], config_path=store_file.name)
# Make sure it worked
for principal in principals:
obj = load_acs_service_principal(principal[0], config_path=store_file.name)
self.assertIsNotNone(obj, 'expected non-None for {}'.format(principal[0]))
self.assertEqual(obj.get('service_principal'), principal[2])
self.assertEqual(obj.get('client_secret'), principal[1])
# Change one
new_principal = 'foo'
new_secret = 'bar'
store_acs_service_principal(
principals[0][0], new_secret, new_principal, config_path=store_file.name)
obj = load_acs_service_principal(principals[0][0], config_path=store_file.name)
self.assertIsNotNone(obj, 'expected non-None for {}'.format(principals[0][0]))
self.assertEqual(obj.get('service_principal'), new_principal)
self.assertEqual(obj.get('client_secret'), new_secret)
os.remove(store_file.name)
def test_validate_service_principal_ok(self):
client = mock.MagicMock()
client.service_principals = mock.Mock()
client.service_principals.list.return_value = []
_validate_service_principal(client, '27497b5e-7ea6-4ff2-a883-b3db4e08d937')
self.assertTrue(client.service_principals.list.called)
expected_calls = [
mock.call(
filter="servicePrincipalNames/any(c:c eq '27497b5e-7ea6-4ff2-a883-b3db4e08d937')"),
]
client.service_principals.list.assert_has_calls(expected_calls)
def test_validate_service_principal_fail(self):
client = mock.MagicMock()
client.service_principals = mock.Mock()
client.service_principals.list.side_effect = KeyError('foo')
with self.assertRaises(CLIError):
_validate_service_principal(client, '27497b5e-7ea6-4ff2-a883-b3db4e08d937')
def test_build_service_principal(self):
app_id = '27497b5e-7ea6-4ff2-a883-b3db4e08d937'
client = mock.MagicMock()
client.service_principals = mock.Mock()
client.applications = mock.Mock()
client.applications.create.return_value.app_id = app_id
client.applications.list.return_value = []
name = "foo"
url = "http://contuso.com"
secret = "notASecret"
_build_service_principal(client, name, url, secret)
self.assertTrue(client.applications.create.called)
self.assertTrue(client.applications.list.called)
self.assertTrue(client.service_principals.create.called)
expected_calls = [
mock.call(
filter="appId eq '{}'".format(app_id))
]
client.applications.list.assert_has_calls(expected_calls)
# TODO better matcher here
client.applications.create.assert_called_with(mock.ANY)
if __name__ == '__main__':
unittest.main()
| mit |
slawler/slawler.github.io | nwm_pages/plts/usgs_for_nwm_datagrabber.py | 1 | 5379 | # -*- coding: utf-8 - Python 3.5.1 *-
"""
Description: Grab Time Series data From USGS Web Service
Input(s) : USGS Gages, Parameters
Output(s) : .rdb time series files
slawler@dewberry.com
Created on Tue Apr 19 15:08:33 2016
"""
# Import libraries
import pandas as pd
import requests
import json
from datetime import datetime, timedelta
from collections import OrderedDict
import matplotlib.pyplot as plt
from matplotlib import pylab
from matplotlib.dates import DayLocator, HourLocator, DateFormatter
from matplotlib.font_manager import FontProperties
def GetTimeSeries(gage, start, stop ):
#parameter = ["00060","00065"] # Try Flow first
parameter = ["00065","00060"] # Try Stage First
dformat = "json" # Data Format
url = 'http://waterservices.usgs.gov/nwis/iv' # USGS API
# Format Datetime Objects for USGS API
first = datetime.date(start).strftime('%Y-%m-%d')
last = datetime.date(stop).strftime('%Y-%m-%d')
# Ping the USGS API for data
try:
params = OrderedDict([('format',dformat),('sites',gage),('startDT',first),
('endDT',last), ('parameterCD',parameter[0])])
r = requests.get(url, params = params)
print("\nRetrieved Data for USGS Gage: ", gage)
data = r.content.decode()
d = json.loads(data)
mydict = dict(d['value']['timeSeries'][0])
except:
params = OrderedDict([('format',dformat),('sites',gage),('startDT',first),
('endDT',last), ('parameterCD',parameter[1])])
r = requests.get(url, params = params)
print("\nRetrieved Data for USGS Gage: ", gage)
data = r.content.decode()
d = json.loads(data)
mydict = dict(d['value']['timeSeries'][0])
if params['parameterCD'] == '00060':
obser = "StreamFlow"
else:
obser = "Stage"
# Great, We can pull the station name, and assign to a variable for use later:
SiteName = mydict['sourceInfo']['siteName']
print('\n', SiteName)
# After reveiwing the JSON Data structure, select only data we need:
tseries = d['value']['timeSeries'][0]['values'][0]['value'][:]
# Create a Dataframe, format Datetime data,and assign numeric type to observations
df = pd.DataFrame.from_dict(tseries)
df.index = pd.to_datetime(df['dateTime'],format='%Y-%m-%d{}%H:%M:%S'.format('T'))
df['UTC Offset'] = df['dateTime'].apply(lambda x: x.split('-')[3][1])
df['UTC Offset'] = df['UTC Offset'].apply(lambda x: pd.to_timedelta('{} hours'.format(x)))
df.index = df.index - df['UTC Offset']
df.value = pd.to_numeric(df.value)
# Get Rid of unwanted data, rename observed data
df = df.drop('dateTime', 1)
df.drop('qualifiers',axis = 1, inplace = True)
df.drop('UTC Offset',axis = 1, inplace = True)
df = df.rename(columns = {'value':obser})
return df
# Enter Desired Data Download Period
y0, m0 ,d0 = 2004, 10, 6 # Start date (year, month, day)
y1, m1 ,d1 = 2017, 1, 1
# Create Datetime Objects
start = datetime(y0, m0, d0,0)
stop = datetime(y1, m1 ,d1,0)
gage = "01651750" # 'Anacostia, DS Tidal Gage Max'
df_ANAD2 = GetTimeSeries(gage, start, stop)
max_anad = df_ANAD2.idxmax()[0]
gage = "01649500" #'Anacostia, NE Branch'
df_RVDM2 = GetTimeSeries(gage, start, stop)
max_rvdm = df_RVDM2.idxmax()[0]
gage = "01651000" # 'Anacostia, NW Branch'
df_ACOM2 = GetTimeSeries(gage, start, stop)
max_acom = df_ACOM2.idxmax()[0]
#---Set Plotting Window & Station Max ID
curr_plot = 'Anacostia, DS Tidal Gage Max'
pltfrom = max_anad- timedelta(days = 2)
pltto = max_anad + timedelta(days = 2)
curr_plot = 'Anacostia, NW Branch'
pltfrom = max_acom- timedelta(days = 2)
pltto = max_acom + timedelta(days = 2)
plt.interactive(False)
curr_plot = 'Anacostia, NE Branch'
pltfrom = max_rvdm- timedelta(days = 2)
pltto = max_rvdm + timedelta(days = 2)
#--------PLOT
fig, ax = plt.subplots(figsize=(14,8))
#--Plot medium_range NWM
x0 = df_ANAD2[pltfrom :pltto].index
y0 = df_ANAD2[pltfrom :pltto]['Stage']
x1 = df_RVDM2[pltfrom :pltto].index
y1 = df_RVDM2[pltfrom :pltto]['Stage']
x2 = df_ACOM2[pltfrom :pltto].index
y2 = df_ACOM2[pltfrom :pltto]['Stage']
ax.scatter(x0,y0, color='black', label='Anacostia, DS Tidal Gage')
ax.plot(x1,y1, color='r', label='Anacostia, NE Branch')
ax.plot(x2,y2, color='b', label='Anacostia, NW Branch')
ax.set_xlim(pltfrom,pltto)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, scatterpoints = 1)
ax.legend(loc='best', fontsize = 'small')
#--Write in Labels
plt.ylabel('Stage (ft)')
plt.xlabel(pltto.year)
plt.title('Local Max: {}'.format(curr_plot))
#plot_name = os.path.join(root_dir, 'Levee_Seg_{}.png'.format(segment))
plt.grid(True)
plt.gca().xaxis.set_major_formatter(DateFormatter('%I%p\n%a\n%b%d'))
plt.gca().xaxis.set_major_locator(HourLocator(byhour=range(24), interval=12))
plt.savefig(curr_plot+'.png', dpi=600)
| mit |
stanlyxiang/incubator-hawq | tools/bin/pythonSrc/pychecker-0.8.18/pychecker/Warning.py | 7 | 1437 | #!/usr/bin/env python
# Copyright (c) 2001, MetaSlash Inc. All rights reserved.
# Portions Copyright (c) 2005, Google, Inc. All rights reserved.
"""
Warning class to hold info about each warning.
"""
class Warning :
"Class which holds error information."
def __init__(self, file, line, err) :
if hasattr(file, "function") :
file = file.function.func_code.co_filename
elif hasattr(file, "co_filename") :
file = file.co_filename
elif hasattr(line, "co_filename") :
file = line.co_filename
if file[:2] == './' :
file = file[2:]
self.file = file
if hasattr(line, "co_firstlineno") :
line = line.co_firstlineno
if line == None :
line = 1
self.line = line
self.err = err
self.level = err.level
def __cmp__(self, warn) :
if warn == None :
return 1
if not self.file and not self.line:
return 1
if self.file != warn.file :
return cmp(self.file, warn.file)
if self.line != warn.line :
return cmp(self.line, warn.line)
return cmp(self.err, warn.err)
def format(self) :
if not self.file and not self.line:
return str(self.err)
return "%s:%d: %s" % (self.file, self.line, self.err)
def output(self, stream) :
stream.write(self.format() + "\n")
| apache-2.0 |
ricotabor/opendrop | opendrop/app/common/image_acquisition/image_acquisition.py | 2 | 3492 | # Copyright © 2020, Joseph Berry, Rico Tabor (opendrop.dev@gmail.com)
# OpenDrop is released under the GNU GPL License. You are free to
# modify and distribute the code, but always under the same license
# (i.e. you cannot make commercial derivatives).
#
# If you use this software in your research, please cite the following
# journal articles:
#
# J. D. Berry, M. J. Neeson, R. R. Dagastine, D. Y. C. Chan and
# R. F. Tabor, Measurement of surface and interfacial tension using
# pendant drop tensiometry. Journal of Colloid and Interface Science 454
# (2015) 226–237. https://doi.org/10.1016/j.jcis.2015.05.012
#
# E. Huang, T. Denning, A. Skoufis, J. Qi, R. R. Dagastine, R. F. Tabor
# and J. D. Berry, OpenDrop: Open-source software for pendant drop
# tensiometry & contact angle measurements, submitted to the Journal of
# Open Source Software
#
# These citations help us not only to understand who is using and
# developing OpenDrop, and for what purpose, but also to justify
# continued development of this code and other open source resources.
#
# OpenDrop is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this software. If not, see <https://www.gnu.org/licenses/>.
from typing import Optional
from gi.repository import Gtk, GObject
from injector import inject
from opendrop.app.common.services.acquisition import AcquirerType, ImageAcquisitionService, ImageAcquirer
from opendrop.appfw import Presenter, TemplateChild, component
@component(
template_path='./image_acquisition.ui',
)
class ImageAcquisitionPresenter(Presenter):
combo_box = TemplateChild('combo_box') # type: TemplateChild[Gtk.ComboBoxText]
@inject
def __init__(self, acquisition_service: ImageAcquisitionService) -> None:
self.acquisition_service = acquisition_service
self.event_connections = ()
def after_view_init(self) -> None:
self.populate_combobox()
self.event_connections = (
self.acquisition_service.bn_acquirer.on_changed.connect(self.acquisition_service_acquirer_changed),
)
self.combo_box.connect('notify::active-id', self.combo_box_active_id_changed)
self.acquisition_service_acquirer_changed()
def combo_box_active_id_changed(self, *_) -> None:
active_id = self.combo_box.props.active_id
if active_id is not None:
self.acquisition_service.use_acquirer_type(AcquirerType[active_id])
else:
self.acquisition_service.use_acquirer_type(None)
def acquisition_service_acquirer_changed(self, *_) -> None:
acquirer_type = self.acquisition_service.get_acquirer_type()
if acquirer_type is not None:
self.combo_box.props.active_id = acquirer_type.name
else:
self.combo_box.props.active_id = None
self.notify('acquirer')
@GObject.Property(flags=GObject.ParamFlags.READABLE|GObject.ParamFlags.EXPLICIT_NOTIFY)
def acquirer(self) -> Optional[ImageAcquirer]:
return self.acquisition_service.bn_acquirer.get()
def populate_combobox(self) -> None:
for typ in AcquirerType:
self.combo_box.append(id=typ.name, text=typ.display_name)
def destroy(self, *_) -> None:
for conn in self.event_connections:
conn.disconnect()
| gpl-2.0 |
juanmont/one | .vscode/extensions/tht13.rst-vscode-2.0.0/src/python/docutils/utils/math/tex2mathml_extern.py | 16 | 5634 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# :Id: $Id: tex2mathml_extern.py 7861 2015-04-10 23:48:51Z milde $
# :Copyright: © 2015 Günter Milde.
# :License: Released under the terms of the `2-Clause BSD license`_, in short:
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
# Wrappers for TeX->MathML conversion by external tools
# =====================================================
import subprocess
document_template = r"""\documentclass{article}
\usepackage{amsmath}
\begin{document}
%s
\end{document}
"""
def latexml(math_code, reporter=None):
"""Convert LaTeX math code to MathML with LaTeXML_
.. _LaTeXML: http://dlmf.nist.gov/LaTeXML/
"""
p = subprocess.Popen(['latexml',
'-', # read from stdin
# '--preload=amsmath',
'--inputencoding=utf8',
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
p.stdin.write((document_template % math_code).encode('utf8'))
p.stdin.close()
latexml_code = p.stdout.read()
latexml_err = p.stderr.read().decode('utf8')
if reporter and latexml_err.find('Error') >= 0 or not latexml_code:
reporter.error(latexml_err)
post_p = subprocess.Popen(['latexmlpost',
'-',
'--nonumbersections',
'--format=xhtml',
# '--linelength=78', # experimental
'--'
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
post_p.stdin.write(latexml_code)
post_p.stdin.close()
result = post_p.stdout.read().decode('utf8')
post_p_err = post_p.stderr.read().decode('utf8')
if reporter and post_p_err.find('Error') >= 0 or not result:
reporter.error(post_p_err)
# extract MathML code:
start,end = result.find('<math'), result.find('</math>')+7
result = result[start:end]
if 'class="ltx_ERROR' in result:
raise SyntaxError(result)
return result
def ttm(math_code, reporter=None):
"""Convert LaTeX math code to MathML with TtM_
.. _TtM: http://hutchinson.belmont.ma.us/tth/mml/
"""
p = subprocess.Popen(['ttm',
# '-i', # italic font for equations. Default roman.
'-u', # unicode character encoding. (Default iso-8859-1).
'-r', # output raw MathML (no preamble or postlude)
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
p.stdin.write((document_template % math_code).encode('utf8'))
p.stdin.close()
result = p.stdout.read()
err = p.stderr.read().decode('utf8')
if err.find('**** Unknown') >= 0:
msg = '\n'.join([line for line in err.splitlines()
if line.startswith('****')])
raise SyntaxError('\nMessage from external converter TtM:\n'+ msg)
if reporter and err.find('**** Error') >= 0 or not result:
reporter.error(err)
start,end = result.find('<math'), result.find('</math>')+7
result = result[start:end]
return result
def blahtexml(math_code, inline=True, reporter=None):
"""Convert LaTeX math code to MathML with blahtexml_
.. _blahtexml: http://gva.noekeon.org/blahtexml/
"""
options = ['--mathml',
'--indented',
'--spacing', 'moderate',
'--mathml-encoding', 'raw',
'--other-encoding', 'raw',
'--doctype-xhtml+mathml',
'--annotate-TeX',
]
if inline:
mathmode_arg = ''
else:
mathmode_arg = 'mode="display"'
options.append('--displaymath')
p = subprocess.Popen(['blahtexml']+options,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
p.stdin.write(math_code.encode('utf8'))
p.stdin.close()
result = p.stdout.read().decode('utf8')
err = p.stderr.read().decode('utf8')
print err
if result.find('<error>') >= 0:
raise SyntaxError('\nMessage from external converter blahtexml:\n'
+result[result.find('<message>')+9:result.find('</message>')])
if reporter and (err.find('**** Error') >= 0 or not result):
reporter.error(err)
start,end = result.find('<markup>')+9, result.find('</markup>')
result = ('<math xmlns="http://www.w3.org/1998/Math/MathML"%s>\n'
'%s</math>\n') % (mathmode_arg, result[start:end])
return result
# self-test
if __name__ == "__main__":
example = ur'\frac{\partial \sin^2(\alpha)}{\partial \vec r} \varpi \, \text{Grüße}'
# print latexml(example).encode('utf8')
# print ttm(example)#.encode('utf8')
print blahtexml(example).encode('utf8')
| apache-2.0 |
johnkerl/bridge-walk-count | logcorrectfit.py | 1 | 4842 | #!/usr/bin/python -Wall
# ================================================================
# John Kerl
# kerl.john.r@gmail.com
# 2010-02-02
# ================================================================
from __future__ import division
import sys, re
import tabutil_m, stats_m
from math import log, exp
# ----------------------------------------------------------------
def inv2x2(matrix):
[[a, b], [c, d]] = matrix
det = a*d - b*c
return [[d/det, -b/det], [-c/det, a/det]]
# ----------------------------------------------------------------
# [log a] [ n sum log N_i ]-1 [sum log ci - log mu sum N_i ]
# [ ] = [ ] [ ]
# [ c ] [sum log N_i sum log^2 N_i] [sum logci logNi - logmu sum NilogNi]
def find_a_and_b(Ns, cs, mu):
n = len(Ns)
log_Ns = map(log, Ns)
log2Ns = map(lambda x: log(x)**2, Ns)
NlogNs = map(lambda x: log(x)*x, Ns)
log_cs = map(log, cs)
sum_Ni = sum(Ns)
sum_log_Ni = sum(log_Ns)
sum_log2Ni = sum(log2Ns)
sum_Ni_log_Ni = sum(NlogNs)
sum_log_ci = sum(log_cs)
sum_log_ci_log_Ni = 0.0
for i in range(0, n):
sum_log_ci_log_Ni += log_Ns[i] * log_cs[i]
matrix = [[n, sum_log_Ni], [sum_log_Ni, sum_log2Ni]]
matrixinv = inv2x2(matrix)
rhsvec = [sum_log_ci - log(mu) * sum_Ni,
sum_log_ci_log_Ni - log(mu) * sum_Ni_log_Ni]
log_a = matrixinv[0][0] * rhsvec[0] + matrixinv[0][1] * rhsvec[1]
b = matrixinv[1][0] * rhsvec[0] + matrixinv[1][1] * rhsvec[1]
return [exp(log_a), b]
# ----------------------------------------------------------------
def scale(Ns, cs, theta):
log_Ns = map(log, Ns)
log_cs = map(log, cs)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# First fit:
# c(N) = a b^N
#
# log c = log a + N log b
# Linear fit:
# x = N
# y = log c
# slope = log b
# yint = log a
#
# a = exp(yint)
# b = exp(slope)
[slope, yint, csm, csb] = stats_m.linear_regression(Ns, log_cs)
a1 = exp(yint)
b1 = exp(slope)
print '# theta = %.4f a1 = %11.7f b1 = %11.7f' % (theta, a1, b1)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Second fit:
#
# c(N) = a N^b
# log c = log a + b log N
# Linear fit:
# x = log N
# y = log c
# slope = b
# yint = log a
#
# a = exp(yint)
# b = slope
[slope, yint, csm, csb] = stats_m.linear_regression(log_Ns, log_cs)
a2 = exp(yint)
b2 = slope
#print '# a2 = %11.7f b2 = %11.7f' % (a2, b2)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Third fit:
#
# c(N) = a mu^N N^b
#
# Note: the b estimate is very sensitive to minute changes in mu.
[a3, b3] = find_a_and_b(Ns, cs, mu)
print '# theta = %.4f a3 = %11.7f b3 = %11.7f' % (theta, a3, b3)
print '# mu = %11.7f' % (mu)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Display the original data along with the fits
#print '#N exact_c appx1c appx2c appx3c'
#print '#- ------------- ------------- ------------- --------------'
print '#N exact_c a b^N a mu^N N^b'
print '#- ----------------------- ----------------------- ------------------------'
#for N in range(1, 25):
for N in range(1, 45):
# c = a b^N
approx1_c = a1 * b1**N
# c = a N^b
#approx2_c = a2 * N**b2
# c = a mu^N N^b
approx3_c = a3 * mu**N * N**b3
print '%2d' % (N),
if N in Ns:
print '%21.0f' % (cs[Ns.index(N)]),
else:
print '%21s' % ('_'),
#print '%11.4e %11.4e %11.4e' % (approx1_c, approx2_c, approx3_c)
#print '%14.0f %14.0f %14.0f' % (approx1_c, approx2_c, approx3_c)
#print '%11.4e %11.4e' % (approx1_c, approx3_c)
print '%21.0f %21.0f' % (approx1_c, approx3_c)
# ----------------------------------------------------------------
# Sample input data:
# #UPSAW
# #N c(N)
# #-- ---------
# #11 6199
# #12 16225
# #13 42811
# #14 112285
# #15 296051
# 16 777411
# 17 2049025
# 18 5384855
# 19 14190509
# 20 37313977
# 21 98324565
# 22 258654441
# 23 681552747
mu = 2.61987
input_file_name = 's'
if len(sys.argv) == 2:
input_file_name = sys.argv[1]
if len(sys.argv) == 3:
input_file_name = sys.argv[1]
mu = float(sys.argv[2])
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Acquire the data.
columns = tabutil_m.float_columns_from_file(input_file_name)
rows = tabutil_m.float_rows_from_file (input_file_name)
labels = tabutil_m.labels_from_file (input_file_name)
thetas = columns[0]
N_strings = labels[1:]
Ns = []
for N_string in N_strings:
N = int(re.sub('N=', '', N_string))
Ns.append(N)
num_thetas = len(thetas)
for i in range(0, num_thetas):
theta = thetas[i]
counts_for_theta = rows[i][1:]
print 'theta = %.4f' % (theta)
scale(Ns, counts_for_theta, theta)
print
| bsd-2-clause |
dvliman/jaikuengine | .google_appengine/lib/django-1.2/tests/regressiontests/conditional_processing/models.py | 57 | 5691 | # -*- coding:utf-8 -*-
from datetime import datetime, timedelta
from calendar import timegm
from django.test import TestCase
from django.utils.http import parse_etags, quote_etag
FULL_RESPONSE = 'Test conditional get response'
LAST_MODIFIED = datetime(2007, 10, 21, 23, 21, 47)
LAST_MODIFIED_STR = 'Sun, 21 Oct 2007 23:21:47 GMT'
EXPIRED_LAST_MODIFIED_STR = 'Sat, 20 Oct 2007 23:21:47 GMT'
ETAG = 'b4246ffc4f62314ca13147c9d4f76974'
EXPIRED_ETAG = '7fae4cd4b0f81e7d2914700043aa8ed6'
class ConditionalGet(TestCase):
def assertFullResponse(self, response, check_last_modified=True, check_etag=True):
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content, FULL_RESPONSE)
if check_last_modified:
self.assertEquals(response['Last-Modified'], LAST_MODIFIED_STR)
if check_etag:
self.assertEquals(response['ETag'], '"%s"' % ETAG)
def assertNotModified(self, response):
self.assertEquals(response.status_code, 304)
self.assertEquals(response.content, '')
def testWithoutConditions(self):
response = self.client.get('/condition/')
self.assertFullResponse(response)
def testIfModifiedSince(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get('/condition/')
self.assertFullResponse(response)
def testIfNoneMatch(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % EXPIRED_ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
# Several etags in If-None-Match is a bit exotic but why not?
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s", "%s"' % (ETAG, EXPIRED_ETAG)
response = self.client.get('/condition/')
self.assertNotModified(response)
def testIfMatch(self):
self.client.defaults['HTTP_IF_MATCH'] = '"%s"' % ETAG
response = self.client.put('/condition/etag/', {'data': ''})
self.assertEquals(response.status_code, 200)
self.client.defaults['HTTP_IF_MATCH'] = '"%s"' % EXPIRED_ETAG
response = self.client.put('/condition/etag/', {'data': ''})
self.assertEquals(response.status_code, 412)
def testBothHeaders(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % EXPIRED_ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
def testSingleCondition1(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified/')
self.assertNotModified(response)
response = self.client.get('/condition/etag/')
self.assertFullResponse(response, check_last_modified=False)
def testSingleCondition2(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/etag/')
self.assertNotModified(response)
response = self.client.get('/condition/last_modified/')
self.assertFullResponse(response, check_etag=False)
def testSingleCondition3(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified/')
self.assertFullResponse(response, check_etag=False)
def testSingleCondition4(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % EXPIRED_ETAG
response = self.client.get('/condition/etag/')
self.assertFullResponse(response, check_last_modified=False)
def testSingleCondition5(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified2/')
self.assertNotModified(response)
response = self.client.get('/condition/etag2/')
self.assertFullResponse(response, check_last_modified=False)
def testSingleCondition6(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/etag2/')
self.assertNotModified(response)
response = self.client.get('/condition/last_modified2/')
self.assertFullResponse(response, check_etag=False)
def testInvalidETag(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = r'"\"'
response = self.client.get('/condition/etag/')
self.assertFullResponse(response, check_last_modified=False)
class ETagProcesing(TestCase):
def testParsing(self):
etags = parse_etags(r'"", "etag", "e\"t\"ag", "e\\tag", W/"weak"')
self.assertEquals(etags, ['', 'etag', 'e"t"ag', r'e\tag', 'weak'])
def testQuoting(self):
quoted_etag = quote_etag(r'e\t"ag')
self.assertEquals(quoted_etag, r'"e\\t\"ag"')
| apache-2.0 |
mujiansu/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/chunk.py | 386 | 5372 | """Simple class to read IFF chunks.
An IFF chunk (used in formats such as AIFF, TIFF, RMFF (RealMedia File
Format)) has the following structure:
+----------------+
| ID (4 bytes) |
+----------------+
| size (4 bytes) |
+----------------+
| data |
| ... |
+----------------+
The ID is a 4-byte string which identifies the type of chunk.
The size field (a 32-bit value, encoded using big-endian byte order)
gives the size of the whole chunk, including the 8-byte header.
Usually an IFF-type file consists of one or more chunks. The proposed
usage of the Chunk class defined here is to instantiate an instance at
the start of each chunk and read from the instance until it reaches
the end, after which a new instance can be instantiated. At the end
of the file, creating a new instance will fail with a EOFError
exception.
Usage:
while True:
try:
chunk = Chunk(file)
except EOFError:
break
chunktype = chunk.getname()
while True:
data = chunk.read(nbytes)
if not data:
pass
# do something with data
The interface is file-like. The implemented methods are:
read, close, seek, tell, isatty.
Extra methods are: skip() (called by close, skips to the end of the chunk),
getname() (returns the name (ID) of the chunk)
The __init__ method has one required argument, a file-like object
(including a chunk instance), and one optional argument, a flag which
specifies whether or not chunks are aligned on 2-byte boundaries. The
default is 1, i.e. aligned.
"""
class Chunk:
def __init__(self, file, align=True, bigendian=True, inclheader=False):
import struct
self.closed = False
self.align = align # whether to align to word (2-byte) boundaries
if bigendian:
strflag = '>'
else:
strflag = '<'
self.file = file
self.chunkname = file.read(4)
if len(self.chunkname) < 4:
raise EOFError
try:
self.chunksize = struct.unpack(strflag+'L', file.read(4))[0]
except struct.error:
raise EOFError
if inclheader:
self.chunksize = self.chunksize - 8 # subtract header
self.size_read = 0
try:
self.offset = self.file.tell()
except (AttributeError, IOError):
self.seekable = False
else:
self.seekable = True
def getname(self):
"""Return the name (ID) of the current chunk."""
return self.chunkname
def getsize(self):
"""Return the size of the current chunk."""
return self.chunksize
def close(self):
if not self.closed:
self.skip()
self.closed = True
def isatty(self):
if self.closed:
raise ValueError, "I/O operation on closed file"
return False
def seek(self, pos, whence=0):
"""Seek to specified position into the chunk.
Default position is 0 (start of chunk).
If the file is not seekable, this will result in an error.
"""
if self.closed:
raise ValueError, "I/O operation on closed file"
if not self.seekable:
raise IOError, "cannot seek"
if whence == 1:
pos = pos + self.size_read
elif whence == 2:
pos = pos + self.chunksize
if pos < 0 or pos > self.chunksize:
raise RuntimeError
self.file.seek(self.offset + pos, 0)
self.size_read = pos
def tell(self):
if self.closed:
raise ValueError, "I/O operation on closed file"
return self.size_read
def read(self, size=-1):
"""Read at most size bytes from the chunk.
If size is omitted or negative, read until the end
of the chunk.
"""
if self.closed:
raise ValueError, "I/O operation on closed file"
if self.size_read >= self.chunksize:
return ''
if size < 0:
size = self.chunksize - self.size_read
if size > self.chunksize - self.size_read:
size = self.chunksize - self.size_read
data = self.file.read(size)
self.size_read = self.size_read + len(data)
if self.size_read == self.chunksize and \
self.align and \
(self.chunksize & 1):
dummy = self.file.read(1)
self.size_read = self.size_read + len(dummy)
return data
def skip(self):
"""Skip the rest of the chunk.
If you are not interested in the contents of the chunk,
this method should be called so that the file points to
the start of the next chunk.
"""
if self.closed:
raise ValueError, "I/O operation on closed file"
if self.seekable:
try:
n = self.chunksize - self.size_read
# maybe fix alignment
if self.align and (self.chunksize & 1):
n = n + 1
self.file.seek(n, 1)
self.size_read = self.size_read + n
return
except IOError:
pass
while self.size_read < self.chunksize:
n = min(8192, self.chunksize - self.size_read)
dummy = self.read(n)
if not dummy:
raise EOFError
| apache-2.0 |
NSLS-II-XPD/ipython_ophyd | profile_collection_germ/startup/11-temperature-controller.py | 1 | 4034 | from ophyd import PVPositioner, EpicsSignal, EpicsSignalRO, Device
from ophyd.signal import AttributeSignal
from ophyd.mixins import EpicsSignalPositioner
from ophyd import Component as C
from ophyd import Component as Cpt
from ophyd.device import DeviceStatus
class CS700TemperatureController(PVPositioner):
readback = C(EpicsSignalRO, 'T-I')
setpoint = C(EpicsSignal, 'T-SP')
done = C(EpicsSignalRO, 'Cmd-Busy')
stop_signal = C(EpicsSignal, 'Cmd-Cmd')
def set(self, *args, timeout=None, **kwargs):
return super().set(*args, timeout=timeout, **kwargs)
def trigger(self):
# There is nothing to do. Just report that we are done.
# Note: This really should not necessary to do --
# future changes to PVPositioner may obviate this code.
status = DeviceStatus(self)
status._finished()
return status
# To allow for sample temperature equilibration time, increase
# the `settle_time` parameter (units: seconds).
cs700 = CS700TemperatureController('XF:28IDC-ES:1{Env:01}', name='cs700',
settle_time=0)
cs700.done_value = 0
cs700.read_attrs = ['setpoint', 'readback']
cs700.readback.name = 'temperature'
cs700.setpoint.name = 'temperature_setpoint'
class Eurotherm(EpicsSignalPositioner):
def set(self, *args, **kwargs):
# override #@!$(#$ hard-coded timeouts
return super().set(*args, timeout=1000000, **kwargs)
eurotherm = Eurotherm('XF:28IDC-ES:1{Env:04}T-I',
write_pv='XF:28IDC-ES:1{Env:04}T-SP',
tolerance= 3, name='eurotherm')
class CryoStat(Device):
# readback
T = Cpt(EpicsSignalRO, ':IN1')
# setpoint
setpoint = Cpt(EpicsSignal, read_pv=":OUT1:SP_RBV",
write_pv=":OUT1:SP",
add_prefix=('suffix', 'read_pv', 'write_pv'))
# heater power level
heater = Cpt(EpicsSignal, ':HTR1')
# configuration
dead_band = Cpt(AttributeSignal, attr='_dead_band')
heater_range = Cpt(EpicsSignal, ':HTR1:Range', string=True)
scan = Cpt(EpicsSignal, ':read.SCAN', string=True)
mode = Cpt(EpicsSignal, ':OUT1:Mode', string=True)
cntrl = Cpt(EpicsSignal, ':OUT1:Cntrl', string=True)
# trigger signal
trig = Cpt(EpicsSignal, ':read.PROC')
def trigger(self):
self.trig.put(1, wait=True)
return DeviceStatus(self, done=True, success=True)
def __init__(self, *args, dead_band, read_attrs=None,
configuration_attrs=None, **kwargs):
if read_attrs is None:
read_attrs = ['T', 'setpoint']
if configuration_attrs is None:
configuration_attrs = ['heater_range', 'dead_band',
'mode', 'cntrl']
super().__init__(*args, read_attrs=read_attrs,
configuration_attrs=configuration_attrs,
**kwargs)
self._target = None
self._dead_band = dead_band
self._sts = None
def _sts_mon(self, value, **kwargs):
if (self._target is None or
np.abs(self._target - value) < self._dead_band):
self.T.clear_sub(self._sts_mon)
self.scan.put('Passive', wait=True)
if self._sts is not None:
self._sts._finished()
self._sts = None
self._target = None
def set(self, val):
self._target = val
self.setpoint.put(val, wait=True)
sts = self._sts = DeviceStatus(self)
self.scan.put('.2 second')
self.T.subscribe(self._sts_mon)
return sts
def stop(self, *, success=False):
self.setpoint.put(self.T.get())
if self._sts is not None:
self._sts._finished(success=success)
self._sts = None
self._target = None
self.scan.put('Passive', wait=True)
cryostat = CryoStat('XF:28IDC_ES1:LS335:{CryoStat}', name='cryostat', dead_band=1)
| bsd-2-clause |
dharamgollapudi/jaikuengine | common/util.py | 29 | 9112 | # Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import hmac
import logging
import math
import random
import re
import sys
import time
import urllib
from django import http
from django.conf import settings
from django.utils import safestring
from common import clean
from common import exception
try:
import uuid
_generate_uuid = lambda: uuid.uuid4().hex
except ImportError:
logging.info("No uuid module, using fake")
_generate_uuid = lambda: str(random.randint(10000000, 20000000))
try:
import hashlib
# NOTE: hmac changed its behavior regarding unicode strings in 2.6,
# even though App Engine is still on 2.5 this is necessary for
# testing with 2.6
if sys.version_info[:2] >= (2, 6):
_hash = lambda k, m: hmac.new(str(k), m, hashlib.sha1).hexdigest()
else:
_hash = lambda k, m: hmac.new(k, m, hashlib.sha1).hexdigest()
sha1 = lambda k: hashlib.sha1(k).hexdigest()
except ImportError:
import sha
logging.info("No hashlib module, using sha1")
_hash = lambda k, m: sha.new(k + m).hexdigest()
sha1 = lambda k: sha.new(k).hexdigest()
VALID_METHODS = ('GET', 'HEAD', 'POST', 'PUT', 'DELETE')
DEFAULT_AVATAR_PATH = 'avatar_default'
def add_caching_headers(response, headers):
# already has caching headers set
if response.has_header('Cache-control'):
return response
# this is a redirect or an error
if response.status_code != 200:
return response
for k, v in headers.iteritems():
response[k] = v
return response
CACHE_NEVER_HEADERS = {'Cache-control': 'no-cache, must-revalidate',
'Pragma': 'no-cache',
'Expires': 'Fri, 01 Jan 1990 00:00:00 GMT',
}
a_bit_less_than_one_year_from_when_this_file_was_loaded = (
datetime.datetime.now() + datetime.timedelta(days=364)
).strftime('%a, %d %b %Y %H:%M:%S GMT')
CACHE_FOREVER_HEADERS = {
'Expires': a_bit_less_than_one_year_from_when_this_file_was_loaded,
'Cache-control': 'public, max-age=%d' % (86400*364)
}
def HttpRssResponse(content, request):
response = http.HttpResponse(content)
response['Content-type'] = 'application/rss+xml; charset=utf-8'
return response
def HttpAtomResponse(content, request):
response = http.HttpResponse(content)
response['Content-type'] = 'application/atom+xml; charset=utf-8'
return response
def HttpJsonResponse(content, request):
""" Returns a JSON response
If there's a callback parameter in the URL ("?callback=function"), the
returned JSON will be wrapped like below:
callback_function(json_output);
Remember that brackets in the callback must be URL encoded:
"hello.world%5B5%5D".
"""
content_type = 'application/json'
if (request and request.method == 'GET'
and 'callback' in request.GET):
callback = clean_jsonp_callback(request.GET['callback'])
if callback:
content_type = 'text/javascript'
content = "%s(%s);" % (callback, content)
response = http.HttpResponse(content)
response['Content-type'] = "%s; charset=utf-8" % content_type
return response
def clean_jsonp_callback(callback):
""" Returns JSONP callback function name or None.
Callback function names can contain alphanumeric characters as well
as periods (.) and brackets ([ and ]), allowing names like "one.two[3]".
"""
callback = re.sub('[^a-zA-Z0-9\._\[\]]', '', callback)
if len(callback) > 0:
return callback
else:
return None
def hash_password(nick, password):
return sha1(password)
def hash_password_intermediate(nick, password):
return _hash(hash_salt() + nick, password)
def domain(request):
domain = request.META['wsgi.url_scheme']+"://"+request.META['SERVER_NAME']
if request.META['SERVER_PORT'] != '80':
domain += ":%s" % request.META['SERVER_PORT']
return domain
def here(request):
base = domain(request)
url = base + request.META['PATH_INFO']
return url
def hash_salt():
return settings.SECRET_KEY;
def hash_generic(value):
value = clean.encoding.smart_str(value)
return _hash(hash_salt(), value)
def generate_uuid():
return _generate_uuid()
def generate_password():
"""Create a password for the user (to change)."""
return hash_generic(generate_uuid())[:8]
def create_nonce(user, action, offset=0):
if not user:
nick = ""
else:
try:
nick = user.nick
except AttributeError:
if settings.MANAGE_PY:
# extra case to make testing easier
nick = clean.nick(user)
else:
raise
i = math.ceil(time.time() / 43200)
i += offset
nonce = hash_generic(str(i) + action + nick)
return nonce[-12:-2]
def safe(f):
def _wrapper(value, arg=None):
rv = f(value, arg)
return safestring.mark_safe(rv)
#_wrapper.func_name = f.func_name
_wrapper.__name__ = f.__name__
return _wrapper
def get_redirect_to(request, default=None):
redirect_to = request.REQUEST.get('redirect_to', default)
if redirect_to is None:
# TODO make this domain aware
redirect_to = request.META.get('PATH_INFO')
return redirect_to
def RedirectFlash(url, message):
url = qsa(url,
params={'flash': message,
'_flash': create_nonce(None, message)
}
)
return http.HttpResponseRedirect(url)
def RedirectError(message):
url = qsa('http://%s/error' % settings.DOMAIN,
params={'error': message,
'_error': create_nonce(None, message)
}
)
return http.HttpResponseRedirect(url)
def query_dict_to_keywords(query_dict):
if settings.DEBUG:
# support for profiling, pretend profiling stuff doesn't exist
return dict([(str(k), v) for k, v in query_dict.items() if not k.startswith('_prof')])
return dict([(str(k), v) for k, v in query_dict.items()])
def href_to_queryparam_dict(href):
ret = {}
qparamstr_parts = href.split('?')
if len(qparamstr_parts) > 1:
qparamstr = qparamstr_parts[1]
for qparam in qparamstr.split('&'):
keyvalue = [urllib.unquote(kv) for kv in qparam.split('=')]
ret[keyvalue[0]] = keyvalue[1]
return ret
def email_domain(s):
"""Returns the domain part of an email address."""
return s.split('@')[-1]
def is_remote(s):
# XXX termie: this should look up something in a list of local domains
return s.split('@')[-1] != settings.NS_DOMAIN
def is_channel_nick(nick):
return nick.startswith("#")
def get_user_from_topic(s):
"""Extracts the username from a topic or Stream object.
Topics look like: 'stream/bar@example.com/comments'
Returns:
A string, the username, or None if the topic name didn't appear to contain a
valid userid.
"""
o = None
# Check whether we got a topic name or a Stream instance
if not (isinstance(s, str) or isinstance(s, unicode)):
s = s.key().name()
list = s.split('/')
try:
email = list[1]
if '@' in email:
o = email
except IndexError: # No '/' in s.
pass
return o
def qsa(url, params):
# TODO termie make better
sep = "?"
if sep in url:
sep = "&"
url = url + sep + urllib.urlencode(params)
return url
def datetime_to_timestamp(dt):
return time.mktime(dt.utctimetuple())
def page_offset(request):
"""attempts to normalize timestamps into datetimes for offsets"""
offset = request.GET.get('offset', None)
if offset:
try:
offset = datetime.datetime.fromtimestamp(float(offset))
except (TypeError, ValueError):
offset = None
return offset, (offset and True or False)
def page_offset_nick(request):
offset = request.GET.get('offset', None)
return offset, (offset and True or False)
def page_entries(request, entries, per_page):
if len(entries) > per_page > 0:
more = datetime_to_timestamp(entries[-2].created_at)
return entries[:-1], more
return entries, None
def page_actors(request, actors, per_page):
""" attempts to break a result into pages
if the number of actors is greater than per_page hand over the nick
of the second-to-last actor to use as an offset.
the length of actors should never be more than per_page + 1
"""
if len(actors) > per_page:
more = actors[-2].nick
return actors[:-1], more
return actors, None
def display_nick(nick):
# TODO(teemu): combine nick functionality from models.py with this
return nick.split("@")[0]
def url_nick(nick):
short = nick.split("@")[0]
if re.match('^#', short):
return short[1:]
return short
def BREAKPOINT():
import pdb
p = pdb.Pdb(None, sys.__stdin__, sys.__stdout__)
p.set_trace()
| apache-2.0 |
nazo/ansible | lib/ansible/plugins/action/vyos_config.py | 131 | 4194 | #
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.vyos import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 |
boooka/GeoPowerOff | venv/lib/python2.7/site-packages/grab/djangoui/grabstat/migrations/0007_auto__chg_field_task_elapsed_time.py | 2 | 1923 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Task.elapsed_time'
db.alter_column(u'grabstat_task', 'elapsed_time', self.gf('django.db.models.fields.IntegerField')(default=0))
def backwards(self, orm):
# Changing field 'Task.elapsed_time'
db.alter_column(u'grabstat_task', 'elapsed_time', self.gf('django.db.models.fields.IntegerField')(null=True))
models = {
u'grabstat.task': {
'Meta': {'object_name': 'Task'},
'elapsed_time': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'error_traceback': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_ok': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'pid': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'record_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'spider_stats': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'spider_timing': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'})
}
}
complete_apps = ['grabstat'] | apache-2.0 |
Michael-Z/shooter-player | Thirdparty/jsoncpp/devtools/tarball.py | 253 | 2071 | import os.path
import gzip
import tarfile
TARGZ_DEFAULT_COMPRESSION_LEVEL = 9
def make_tarball(tarball_path, sources, base_dir, prefix_dir=''):
"""Parameters:
tarball_path: output path of the .tar.gz file
sources: list of sources to include in the tarball, relative to the current directory
base_dir: if a source file is in a sub-directory of base_dir, then base_dir is stripped
from path in the tarball.
prefix_dir: all files stored in the tarball be sub-directory of prefix_dir. Set to ''
to make them child of root.
"""
base_dir = os.path.normpath( os.path.abspath( base_dir ) )
def archive_name( path ):
"""Makes path relative to base_dir."""
path = os.path.normpath( os.path.abspath( path ) )
common_path = os.path.commonprefix( (base_dir, path) )
archive_name = path[len(common_path):]
if os.path.isabs( archive_name ):
archive_name = archive_name[1:]
return os.path.join( prefix_dir, archive_name )
def visit(tar, dirname, names):
for name in names:
path = os.path.join(dirname, name)
if os.path.isfile(path):
path_in_tar = archive_name(path)
tar.add(path, path_in_tar )
compression = TARGZ_DEFAULT_COMPRESSION_LEVEL
tar = tarfile.TarFile.gzopen( tarball_path, 'w', compresslevel=compression )
try:
for source in sources:
source_path = source
if os.path.isdir( source ):
os.path.walk(source_path, visit, tar)
else:
path_in_tar = archive_name(source_path)
tar.add(source_path, path_in_tar ) # filename, arcname
finally:
tar.close()
def decompress( tarball_path, base_dir ):
"""Decompress the gzipped tarball into directory base_dir.
"""
# !!! This class method is not documented in the online doc
# nor is bz2open!
tar = tarfile.TarFile.gzopen(tarball_path, mode='r')
try:
tar.extractall( base_dir )
finally:
tar.close()
| gpl-2.0 |
ITURO/ituro | ituro/accounts/forms.py | 2 | 1106 | from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.contrib.auth import get_user_model
from django import forms
from django.utils.translation import ugettext, ugettext_lazy as _
from captcha.fields import CaptchaField
from accounts.models import CustomUser
from accounts.mixins import RemoveUsernameFieldMixin
class CustomUserCreationForm(RemoveUsernameFieldMixin, UserCreationForm):
class Meta:
model = CustomUser
fields = ("email",)
class CustomUserChangeForm(RemoveUsernameFieldMixin, UserChangeForm):
class Meta:
model = CustomUser
fields = "__all__"
class RegistrationForm(forms.ModelForm):
captcha = CaptchaField()
class Meta:
model = CustomUser
fields = ("email", "password", "name", "phone", "school")
widgets = {
"password": forms.PasswordInput,
}
def save(self, commit=False):
user = super(RegistrationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password"])
if commit:
user.save()
return user
| mit |
allotria/intellij-community | python/helpers/pycharm/django_manage.py | 21 | 1655 | #!/usr/bin/env python
import os
import sys
from _jb_utils import FileChangesTracker, jb_escape_output
from fix_getpass import fixGetpass
from pycharm_run_utils import adjust_django_sys_path
try:
from runpy import run_module
except ImportError:
from runpy_compat import run_module
adjust_django_sys_path()
base_path = sys.argv.pop()
manage_file = os.getenv('PYCHARM_DJANGO_MANAGE_MODULE')
track_files_pattern = os.environ.get('PYCHARM_TRACK_FILES_PATTERN', None)
if not manage_file:
manage_file = 'manage'
class _PseudoTTY(object):
"""
Wraps stdin to return "true" for isatty() to fool
"""
def __init__(self, underlying):
self.__underlying = underlying
def __getattr__(self, name):
return getattr(self.__underlying, name)
def isatty(self):
return True
if __name__ == "__main__":
fixGetpass()
command = sys.argv[1]
if command in ["syncdb", "createsuperuser"]: # List of commands that need stdin to be cheated
sys.stdin = _PseudoTTY(sys.stdin)
def run_command():
run_module(manage_file, None, '__main__', True)
if track_files_pattern:
print("Tracking file by folder pattern: ", track_files_pattern)
file_changes_tracker = FileChangesTracker(os.getcwd(), track_files_pattern.split(":"))
run_command()
# Report files affected/created by commands. This info is used on Java side.
changed_files = list(file_changes_tracker.get_changed_files())
if changed_files:
print("\n" + jb_escape_output(",".join(changed_files)))
else:
print("File tracking disabled")
run_command()
| apache-2.0 |
pshchelo/heat | heat/engine/resources/openstack/neutron/security_group.py | 5 | 9210 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
class SecurityGroup(neutron.NeutronResource):
support_status = support.SupportStatus(version='2014.1')
PROPERTIES = (
NAME, DESCRIPTION, RULES,
) = (
'name', 'description', 'rules',
)
_RULE_KEYS = (
RULE_DIRECTION, RULE_ETHERTYPE, RULE_PORT_RANGE_MIN,
RULE_PORT_RANGE_MAX, RULE_PROTOCOL, RULE_REMOTE_MODE,
RULE_REMOTE_GROUP_ID, RULE_REMOTE_IP_PREFIX,
) = (
'direction', 'ethertype', 'port_range_min',
'port_range_max', 'protocol', 'remote_mode',
'remote_group_id', 'remote_ip_prefix',
)
_rule_schema = {
RULE_DIRECTION: properties.Schema(
properties.Schema.STRING,
_('The direction in which the security group rule is applied. '
'For a compute instance, an ingress security group rule '
'matches traffic that is incoming (ingress) for that '
'instance. An egress rule is applied to traffic leaving '
'the instance.'),
default='ingress',
constraints=[
constraints.AllowedValues(['ingress', 'egress']),
]
),
RULE_ETHERTYPE: properties.Schema(
properties.Schema.STRING,
_('Ethertype of the traffic.'),
default='IPv4',
constraints=[
constraints.AllowedValues(['IPv4', 'IPv6']),
]
),
RULE_PORT_RANGE_MIN: properties.Schema(
properties.Schema.INTEGER,
_('The minimum port number in the range that is matched by the '
'security group rule. If the protocol is TCP or UDP, this '
'value must be less than or equal to the value of the '
'port_range_max attribute. If the protocol is ICMP, this '
'value must be an ICMP type.'),
constraints=[
constraints.Range(0, 65535)
]
),
RULE_PORT_RANGE_MAX: properties.Schema(
properties.Schema.INTEGER,
_('The maximum port number in the range that is matched by the '
'security group rule. The port_range_min attribute constrains '
'the port_range_max attribute. If the protocol is ICMP, this '
'value must be an ICMP type.'),
constraints=[
constraints.Range(0, 65535)
]
),
RULE_PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('The protocol that is matched by the security group rule. '
'Valid values include tcp, udp, and icmp.')
),
RULE_REMOTE_MODE: properties.Schema(
properties.Schema.STRING,
_('Whether to specify a remote group or a remote IP prefix.'),
default='remote_ip_prefix',
constraints=[
constraints.AllowedValues(['remote_ip_prefix',
'remote_group_id']),
]
),
RULE_REMOTE_GROUP_ID: properties.Schema(
properties.Schema.STRING,
_('The remote group ID to be associated with this security group '
'rule. If no value is specified then this rule will use this '
'security group for the remote_group_id. The remote mode '
'parameter must be set to "remote_group_id".')
),
RULE_REMOTE_IP_PREFIX: properties.Schema(
properties.Schema.STRING,
_('The remote IP prefix (CIDR) to be associated with this '
'security group rule.'),
constraints=[
constraints.CustomConstraint('net_cidr')
]
),
}
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('A string specifying a symbolic name for the security group, '
'which is not required to be unique.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the security group.'),
update_allowed=True
),
RULES: properties.Schema(
properties.Schema.LIST,
_('List of security group rules.'),
default=[],
schema=properties.Schema(
properties.Schema.MAP,
schema=_rule_schema
),
update_allowed=True
),
}
default_egress_rules = [
{"direction": "egress", "ethertype": "IPv4"},
{"direction": "egress", "ethertype": "IPv6"}
]
def validate(self):
super(SecurityGroup, self).validate()
if self.properties[self.NAME] == 'default':
msg = _('Security groups cannot be assigned the name "default".')
raise exception.StackValidationFailed(message=msg)
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
rules = props.pop(self.RULES, [])
sec = self.neutron().create_security_group(
{'security_group': props})['security_group']
self.resource_id_set(sec['id'])
self._create_rules(rules)
def _format_rule(self, r):
rule = dict(r)
rule['security_group_id'] = self.resource_id
if 'remote_mode' in rule:
remote_mode = rule.get(self.RULE_REMOTE_MODE)
del(rule[self.RULE_REMOTE_MODE])
if remote_mode == self.RULE_REMOTE_GROUP_ID:
rule[self.RULE_REMOTE_IP_PREFIX] = None
if not rule.get(self.RULE_REMOTE_GROUP_ID):
# if remote group is not specified then make this
# a self-referencing rule
rule[self.RULE_REMOTE_GROUP_ID] = self.resource_id
else:
rule[self.RULE_REMOTE_GROUP_ID] = None
for key in (self.RULE_PORT_RANGE_MIN, self.RULE_PORT_RANGE_MAX):
if rule.get(key) is not None:
rule[key] = str(rule[key])
return rule
def _create_rules(self, rules):
egress_deleted = False
for i in rules:
if i[self.RULE_DIRECTION] == 'egress' and not egress_deleted:
# There is at least one egress rule, so delete the default
# rules which allow all egress traffic
egress_deleted = True
def is_egress(rule):
return rule[self.RULE_DIRECTION] == 'egress'
self._delete_rules(is_egress)
rule = self._format_rule(i)
try:
self.neutron().create_security_group_rule(
{'security_group_rule': rule})
except Exception as ex:
if not self.client_plugin().is_conflict(ex):
raise
def _delete_rules(self, to_delete=None):
try:
sec = self.neutron().show_security_group(
self.resource_id)['security_group']
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
for rule in sec['security_group_rules']:
if to_delete is None or to_delete(rule):
try:
self.neutron().delete_security_group_rule(rule['id'])
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
def handle_delete(self):
if self.resource_id is None:
return
self._delete_rules()
try:
self.neutron().delete_security_group(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
props = self.prepare_update_properties(json_snippet)
rules = props.pop(self.RULES, [])
self.neutron().update_security_group(
self.resource_id, {'security_group': props})
# handle rules changes by:
# * deleting all rules
# * restoring the default egress rules
# * creating new rules
self._delete_rules()
self._create_rules(self.default_egress_rules)
if rules:
self._create_rules(rules)
def resource_mapping():
return {
'OS::Neutron::SecurityGroup': SecurityGroup,
}
| apache-2.0 |
2014cdbg4/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_program.py | 738 | 10833 | import io
import os
import sys
import unittest
class Test_TestProgram(unittest.TestCase):
def test_discovery_from_dotted_path(self):
loader = unittest.TestLoader()
tests = [self]
expectedPath = os.path.abspath(os.path.dirname(unittest.test.__file__))
self.wasRun = False
def _find_tests(start_dir, pattern):
self.wasRun = True
self.assertEqual(start_dir, expectedPath)
return tests
loader._find_tests = _find_tests
suite = loader.discover('unittest.test')
self.assertTrue(self.wasRun)
self.assertEqual(suite._tests, tests)
# Horrible white box test
def testNoExit(self):
result = object()
test = object()
class FakeRunner(object):
def run(self, test):
self.test = test
return result
runner = FakeRunner()
oldParseArgs = unittest.TestProgram.parseArgs
def restoreParseArgs():
unittest.TestProgram.parseArgs = oldParseArgs
unittest.TestProgram.parseArgs = lambda *args: None
self.addCleanup(restoreParseArgs)
def removeTest():
del unittest.TestProgram.test
unittest.TestProgram.test = test
self.addCleanup(removeTest)
program = unittest.TestProgram(testRunner=runner, exit=False, verbosity=2)
self.assertEqual(program.result, result)
self.assertEqual(runner.test, test)
self.assertEqual(program.verbosity, 2)
class FooBar(unittest.TestCase):
def testPass(self):
assert True
def testFail(self):
assert False
class FooBarLoader(unittest.TestLoader):
"""Test loader that returns a suite containing FooBar."""
def loadTestsFromModule(self, module):
return self.suiteClass(
[self.loadTestsFromTestCase(Test_TestProgram.FooBar)])
def test_NonExit(self):
program = unittest.main(exit=False,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=io.StringIO()),
testLoader=self.FooBarLoader())
self.assertTrue(hasattr(program, 'result'))
def test_Exit(self):
self.assertRaises(
SystemExit,
unittest.main,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=io.StringIO()),
exit=True,
testLoader=self.FooBarLoader())
def test_ExitAsDefault(self):
self.assertRaises(
SystemExit,
unittest.main,
argv=["foobar"],
testRunner=unittest.TextTestRunner(stream=io.StringIO()),
testLoader=self.FooBarLoader())
class InitialisableProgram(unittest.TestProgram):
exit = False
result = None
verbosity = 1
defaultTest = None
testRunner = None
testLoader = unittest.defaultTestLoader
module = '__main__'
progName = 'test'
test = 'test'
def __init__(self, *args):
pass
RESULT = object()
class FakeRunner(object):
initArgs = None
test = None
raiseError = False
def __init__(self, **kwargs):
FakeRunner.initArgs = kwargs
if FakeRunner.raiseError:
FakeRunner.raiseError = False
raise TypeError
def run(self, test):
FakeRunner.test = test
return RESULT
class TestCommandLineArgs(unittest.TestCase):
def setUp(self):
self.program = InitialisableProgram()
self.program.createTests = lambda: None
FakeRunner.initArgs = None
FakeRunner.test = None
FakeRunner.raiseError = False
def testVerbosity(self):
program = self.program
for opt in '-q', '--quiet':
program.verbosity = 1
program.parseArgs([None, opt])
self.assertEqual(program.verbosity, 0)
for opt in '-v', '--verbose':
program.verbosity = 1
program.parseArgs([None, opt])
self.assertEqual(program.verbosity, 2)
def testBufferCatchFailfast(self):
program = self.program
for arg, attr in (('buffer', 'buffer'), ('failfast', 'failfast'),
('catch', 'catchbreak')):
if attr == 'catch' and not hasInstallHandler:
continue
short_opt = '-%s' % arg[0]
long_opt = '--%s' % arg
for opt in short_opt, long_opt:
setattr(program, attr, None)
program.parseArgs([None, opt])
self.assertTrue(getattr(program, attr))
for opt in short_opt, long_opt:
not_none = object()
setattr(program, attr, not_none)
program.parseArgs([None, opt])
self.assertEqual(getattr(program, attr), not_none)
def testWarning(self):
"""Test the warnings argument"""
# see #10535
class FakeTP(unittest.TestProgram):
def parseArgs(self, *args, **kw): pass
def runTests(self, *args, **kw): pass
warnoptions = sys.warnoptions[:]
try:
sys.warnoptions[:] = []
# no warn options, no arg -> default
self.assertEqual(FakeTP().warnings, 'default')
# no warn options, w/ arg -> arg value
self.assertEqual(FakeTP(warnings='ignore').warnings, 'ignore')
sys.warnoptions[:] = ['somevalue']
# warn options, no arg -> None
# warn options, w/ arg -> arg value
self.assertEqual(FakeTP().warnings, None)
self.assertEqual(FakeTP(warnings='ignore').warnings, 'ignore')
finally:
sys.warnoptions[:] = warnoptions
def testRunTestsRunnerClass(self):
program = self.program
program.testRunner = FakeRunner
program.verbosity = 'verbosity'
program.failfast = 'failfast'
program.buffer = 'buffer'
program.warnings = 'warnings'
program.runTests()
self.assertEqual(FakeRunner.initArgs, {'verbosity': 'verbosity',
'failfast': 'failfast',
'buffer': 'buffer',
'warnings': 'warnings'})
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testRunTestsRunnerInstance(self):
program = self.program
program.testRunner = FakeRunner()
FakeRunner.initArgs = None
program.runTests()
# A new FakeRunner should not have been instantiated
self.assertIsNone(FakeRunner.initArgs)
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testRunTestsOldRunnerClass(self):
program = self.program
FakeRunner.raiseError = True
program.testRunner = FakeRunner
program.verbosity = 'verbosity'
program.failfast = 'failfast'
program.buffer = 'buffer'
program.test = 'test'
program.runTests()
# If initialising raises a type error it should be retried
# without the new keyword arguments
self.assertEqual(FakeRunner.initArgs, {})
self.assertEqual(FakeRunner.test, 'test')
self.assertIs(program.result, RESULT)
def testCatchBreakInstallsHandler(self):
module = sys.modules['unittest.main']
original = module.installHandler
def restore():
module.installHandler = original
self.addCleanup(restore)
self.installed = False
def fakeInstallHandler():
self.installed = True
module.installHandler = fakeInstallHandler
program = self.program
program.catchbreak = True
program.testRunner = FakeRunner
program.runTests()
self.assertTrue(self.installed)
def _patch_isfile(self, names, exists=True):
def isfile(path):
return path in names
original = os.path.isfile
os.path.isfile = isfile
def restore():
os.path.isfile = original
self.addCleanup(restore)
def testParseArgsFileNames(self):
# running tests with filenames instead of module names
program = self.program
argv = ['progname', 'foo.py', 'bar.Py', 'baz.PY', 'wing.txt']
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
# note that 'wing.txt' is not a Python file so the name should
# *not* be converted to a module name
expected = ['foo', 'bar', 'baz', 'wing.txt']
self.assertEqual(program.testNames, expected)
def testParseArgsFilePaths(self):
program = self.program
argv = ['progname', 'foo/bar/baz.py', 'green\\red.py']
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
expected = ['foo.bar.baz', 'green.red']
self.assertEqual(program.testNames, expected)
def testParseArgsNonExistentFiles(self):
program = self.program
argv = ['progname', 'foo/bar/baz.py', 'green\\red.py']
self._patch_isfile([])
program.createTests = lambda: None
program.parseArgs(argv)
self.assertEqual(program.testNames, argv[1:])
def testParseArgsAbsolutePathsThatCanBeConverted(self):
cur_dir = os.getcwd()
program = self.program
def _join(name):
return os.path.join(cur_dir, name)
argv = ['progname', _join('foo/bar/baz.py'), _join('green\\red.py')]
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
expected = ['foo.bar.baz', 'green.red']
self.assertEqual(program.testNames, expected)
def testParseArgsAbsolutePathsThatCannotBeConverted(self):
program = self.program
# even on Windows '/...' is considered absolute by os.path.abspath
argv = ['progname', '/foo/bar/baz.py', '/green/red.py']
self._patch_isfile(argv)
program.createTests = lambda: None
program.parseArgs(argv)
self.assertEqual(program.testNames, argv[1:])
# it may be better to use platform specific functions to normalise paths
# rather than accepting '.PY' and '\' as file seprator on Linux / Mac
# it would also be better to check that a filename is a valid module
# identifier (we have a regex for this in loader.py)
# for invalid filenames should we raise a useful error rather than
# leaving the current error message (import of filename fails) in place?
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
tpsatish95/Python-Workshop | Python Environment Setup/Alternate/1. Python/1. Installer/Python-3.4.0(Linux)/Lib/_bootlocale.py | 168 | 1301 | """A minimal subset of the locale module used at interpreter startup
(imported by the _io module), in order to reduce startup time.
Don't import directly from third-party code; use the `locale` module instead!
"""
import sys
import _locale
if sys.platform.startswith("win"):
def getpreferredencoding(do_setlocale=True):
return _locale._getdefaultlocale()[1]
else:
try:
_locale.CODESET
except AttributeError:
def getpreferredencoding(do_setlocale=True):
# This path for legacy systems needs the more complex
# getdefaultlocale() function, import the full locale module.
import locale
return locale.getpreferredencoding(do_setlocale)
else:
def getpreferredencoding(do_setlocale=True):
assert not do_setlocale
result = _locale.nl_langinfo(_locale.CODESET)
if not result and sys.platform == 'darwin':
# nl_langinfo can return an empty string
# when the setting has an invalid value.
# Default to UTF-8 in that case because
# UTF-8 is the default charset on OSX and
# returning nothing will crash the
# interpreter.
result = 'UTF-8'
return result
| apache-2.0 |
keithhendry/treadmill | treadmill/cli/admin/__init__.py | 3 | 2234 | """Implementation of treadmill-admin CLI plugin."""
import logging
import pkgutil
import click
import dns.exception # pylint: disable=E0611
import kazoo
import kazoo.exceptions
import ldap3
from treadmill import restclient
from treadmill import cli
from treadmill import context
__path__ = pkgutil.extend_path(__path__, __name__)
def _handle_no_such_ldap_obj(err):
"""Handle LDAPNoSuchObjectResult exception."""
if err.dn.find('ou=cells') != -1:
rsrc_type = 'cell'
elif err.dn.find('ou=allocations') != -1:
rsrc_type = 'allocation'
elif err.dn.find('ou=apps') != -1:
rsrc_type = 'app'
elif err.dn.find('ou=dns-servers') != -1:
rsrc_type = 'dns configuration'
else:
rsrc_type = None
if rsrc_type is None:
rsrc_type = 'resource [%s]' % err.dn
click.echo('Error: %s does not exist.' % rsrc_type, err=True)
ON_EXCEPTIONS = cli.handle_exceptions([
(ldap3.LDAPInsufficientAccessRightsResult, 'Error: access denied.'),
(ldap3.LDAPBindError, 'Error: invalid credentials.'),
(ldap3.LDAPNoSuchObjectResult, _handle_no_such_ldap_obj),
(kazoo.exceptions.NoAuthError, 'Error: not authorized.'),
(kazoo.exceptions.NoNodeError, 'Error: resource does not exist.'),
(restclient.NotAuthorizedError, cli.handle_not_authorized),
(restclient.MaxRequestRetriesError, None),
(dns.exception.Timeout, 'Error: DNS server timeout.'),
(dns.resolver.NXDOMAIN, 'Error: Could not resolve DNS record.'),
(dns.resolver.YXDOMAIN, 'Error: DNS error.'),
(context.ContextError, None),
])
def init():
"""Return top level command handler."""
@click.group(cls=cli.make_multi_command(__name__))
@click.option('--zookeeper', required=False,
envvar='TREADMILL_ZOOKEEPER',
callback=cli.handle_context_opt,
expose_value=False)
@click.pass_context
def run(ctx):
"""Admin commands."""
cli.init_logger('admin.conf')
log_level = logging.WARN
if ctx.obj.get('logging.debug'):
log_level = logging.DEBUG
logging.getLogger('treadmill').setLevel(log_level)
logging.getLogger().setLevel(log_level)
return run
| apache-2.0 |
knipknap/exscript | Exscript/protocols/osguesser.py | 3 | 4852 | #
# Copyright (C) 2010-2017 Samuel Abels
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from builtins import object
from Exscript.protocols.drivers import drivers
class OsGuesser(object):
"""
The OsGuesser monitors everything that happens on a Protocol,
and attempts to collect data out of the network activity.
It watches for specific patterns in the network traffic to decide
what operating system a connected host is running.
It is completely passive, and attempts no changes on the protocol
adapter. However, the protocol adapter may request information
from the OsGuesser, and perform changes based on the information
provided.
"""
def __init__(self):
self.info = {}
self.debug = False
self.protocol_os_map = [d._check_protocol for d in drivers]
self.auth_os_map = [d._check_head for d in drivers]
self.os_map = [d._check_response for d in drivers]
self.auth_buffer = ''
self.set('os', 'unknown', 0)
def reset(self, auth_buffer=''):
self.__init__()
self.auth_buffer = auth_buffer
def set(self, key, value, confidence=100):
"""
Defines the given value with the given confidence, unless the same
value is already defined with a higher confidence level.
"""
if value is None:
return
if key in self.info:
old_confidence, old_value = self.info.get(key)
if old_confidence >= confidence:
return
self.info[key] = (confidence, value)
def set_from_match(self, key, regex_list, string):
"""
Given a list of functions or three-tuples (regex, value, confidence),
this function walks through them and checks whether any of the
items in the list matches the given string.
If the list item is a function, it must have the following
signature::
func(string) : (string, int)
Where the return value specifies the resulting value and the
confidence of the match.
If a match is found, and the confidence level is higher
than the currently defined one, the given value is defined with
the given confidence.
"""
for item in regex_list:
if hasattr(item, '__call__'):
self.set(key, *item(string))
else:
regex, value, confidence = item
if regex.search(string):
self.set(key, value, confidence)
def get(self, key, confidence=0):
"""
Returns the info with the given key, if it has at least the given
confidence. Returns None otherwise.
"""
if key not in self.info:
return None
conf, value = self.info.get(key)
if conf >= confidence:
return value
return None
def data_received(self, data, app_authentication_done):
# If the authentication procedure is complete, use the normal
# "runtime" matchers.
if app_authentication_done:
# Stop looking if we are already 80 percent certain.
if self.get('os', 80) in ('unknown', None):
self.set_from_match('os', self.os_map, data)
return
# Else, check the head that we collected so far.
self.auth_buffer += data
if self.debug:
print("DEBUG: Matching buffer:", repr(self.auth_buffer))
self.set_from_match('os', self.auth_os_map, self.auth_buffer)
self.set_from_match('os', self.os_map, self.auth_buffer)
def protocol_info(self, data):
if self.debug:
print("DEBUG: Protocol info:", repr(data))
self.set_from_match('os', self.protocol_os_map, data)
| mit |
luiscarlosgph/nas | env/lib/python2.7/site-packages/django/core/cache/backends/base.py | 104 | 8793 | "Base Cache class."
from __future__ import unicode_literals
import time
import warnings
from django.core.exceptions import ImproperlyConfigured, DjangoRuntimeWarning
from django.utils.module_loading import import_string
class InvalidCacheBackendError(ImproperlyConfigured):
pass
class CacheKeyWarning(DjangoRuntimeWarning):
pass
# Stub class to ensure not passing in a `timeout` argument results in
# the default timeout
DEFAULT_TIMEOUT = object()
# Memcached does not accept keys longer than this.
MEMCACHE_MAX_KEY_LENGTH = 250
def default_key_func(key, key_prefix, version):
"""
Default function to generate keys.
Constructs the key used by all other methods. By default it prepends
the `key_prefix'. KEY_FUNCTION can be used to specify an alternate
function with custom key making behavior.
"""
return '%s:%s:%s' % (key_prefix, version, key)
def get_key_func(key_func):
"""
Function to decide which key function to use.
Defaults to ``default_key_func``.
"""
if key_func is not None:
if callable(key_func):
return key_func
else:
return import_string(key_func)
return default_key_func
class BaseCache(object):
def __init__(self, params):
timeout = params.get('timeout', params.get('TIMEOUT', 300))
if timeout is not None:
try:
timeout = int(timeout)
except (ValueError, TypeError):
timeout = 300
self.default_timeout = timeout
options = params.get('OPTIONS', {})
max_entries = params.get('max_entries', options.get('MAX_ENTRIES', 300))
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', options.get('CULL_FREQUENCY', 3))
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
self.key_prefix = params.get('KEY_PREFIX', '')
self.version = params.get('VERSION', 1)
self.key_func = get_key_func(params.get('KEY_FUNCTION', None))
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
"""
Returns the timeout value usable by this backend based upon the provided
timeout.
"""
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
elif timeout == 0:
# ticket 21147 - avoid time.time() related precision issues
timeout = -1
return None if timeout is None else time.time() + timeout
def make_key(self, key, version=None):
"""Constructs the key used by all other methods. By default it
uses the key_func to generate a key (which, by default,
prepends the `key_prefix' and 'version'). An different key
function can be provided at the time of cache construction;
alternatively, you can subclass the cache backend to provide
custom key making behavior.
"""
if version is None:
version = self.version
new_key = self.key_func(key, self.key_prefix, version)
return new_key
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a value in the cache if the key does not already exist. If
timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
Returns True if the value was stored, False otherwise.
"""
raise NotImplementedError('subclasses of BaseCache must provide an add() method')
def get(self, key, default=None, version=None):
"""
Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
"""
raise NotImplementedError('subclasses of BaseCache must provide a get() method')
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a value in the cache. If timeout is given, that timeout will be
used for the key; otherwise the default cache timeout will be used.
"""
raise NotImplementedError('subclasses of BaseCache must provide a set() method')
def delete(self, key, version=None):
"""
Delete a key from the cache, failing silently.
"""
raise NotImplementedError('subclasses of BaseCache must provide a delete() method')
def get_many(self, keys, version=None):
"""
Fetch a bunch of keys from the cache. For certain backends (memcached,
pgsql) this can be *much* faster when fetching multiple values.
Returns a dict mapping each key in keys to its value. If the given
key is missing, it will be missing from the response dict.
"""
d = {}
for k in keys:
val = self.get(k, version=version)
if val is not None:
d[k] = val
return d
def has_key(self, key, version=None):
"""
Returns True if the key is in the cache and has not expired.
"""
return self.get(key, version=version) is not None
def incr(self, key, delta=1, version=None):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
value = self.get(key, version=version)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
self.set(key, new_value, version=version)
return new_value
def decr(self, key, delta=1, version=None):
"""
Subtract delta from value in the cache. If the key does not exist, raise
a ValueError exception.
"""
return self.incr(key, -delta, version=version)
def __contains__(self, key):
"""
Returns True if the key is in the cache and has not expired.
"""
# This is a separate method, rather than just a copy of has_key(),
# so that it always has the same functionality as has_key(), even
# if a subclass overrides it.
return self.has_key(key)
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a bunch of values in the cache at once from a dict of key/value
pairs. For certain backends (memcached), this is much more efficient
than calling set() multiple times.
If timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
"""
for key, value in data.items():
self.set(key, value, timeout=timeout, version=version)
def delete_many(self, keys, version=None):
"""
Set a bunch of values in the cache at once. For certain backends
(memcached), this is much more efficient than calling delete() multiple
times.
"""
for key in keys:
self.delete(key, version=version)
def clear(self):
"""Remove *all* values from the cache at once."""
raise NotImplementedError('subclasses of BaseCache must provide a clear() method')
def validate_key(self, key):
"""
Warn about keys that would not be portable to the memcached
backend. This encourages (but does not force) writing backend-portable
cache code.
"""
if len(key) > MEMCACHE_MAX_KEY_LENGTH:
warnings.warn('Cache key will cause errors if used with memcached: '
'%s (longer than %s)' % (key, MEMCACHE_MAX_KEY_LENGTH),
CacheKeyWarning)
for char in key:
if ord(char) < 33 or ord(char) == 127:
warnings.warn('Cache key contains characters that will cause '
'errors if used with memcached: %r' % key,
CacheKeyWarning)
def incr_version(self, key, delta=1, version=None):
"""Adds delta to the cache version for the supplied key. Returns the
new version.
"""
if version is None:
version = self.version
value = self.get(key, version=version)
if value is None:
raise ValueError("Key '%s' not found" % key)
self.set(key, value, version=version + delta)
self.delete(key, version=version)
return version + delta
def decr_version(self, key, delta=1, version=None):
"""Substracts delta from the cache version for the supplied key. Returns
the new version.
"""
return self.incr_version(key, -delta, version)
def close(self, **kwargs):
"""Close the cache connection"""
pass
| mit |
popazerty/beyonwiz-4.1 | lib/python/Components/MovieList.py | 1 | 24988 | import os
import struct
import random
from enigma import eListboxPythonMultiContent, eListbox, gFont, iServiceInformation, eSize, RT_HALIGN_LEFT, RT_HALIGN_RIGHT, RT_VALIGN_CENTER, eServiceReference, eServiceCenter, eTimer
from GUIComponent import GUIComponent
from Tools.FuzzyDate import FuzzyTime
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest, MultiContentEntryProgress
from Components.config import config
from Tools.LoadPixmap import LoadPixmap
from Tools.Directories import SCOPE_ACTIVE_SKIN, resolveFilename
from Screens.LocationBox import defaultInhibitDirs
import NavigationInstance
import skin
AUDIO_EXTENSIONS = frozenset((".dts", ".mp3", ".wav", ".wave", ".ogg", ".flac", ".m4a", ".mp2", ".m2a", ".3gp", ".3g2", ".asf", ".wma"))
DVD_EXTENSIONS = ('.iso', '.img')
IMAGE_EXTENSIONS = frozenset((".jpg", ".jpeg", ".png", ".gif", ".bmp"))
MOVIE_EXTENSIONS = frozenset((".mpg", ".mpeg", ".vob", ".wav", ".m4v", ".mkv", ".avi", ".divx", ".dat", ".flv", ".mp4", ".mov", ".wmv", ".m2ts"))
KNOWN_EXTENSIONS = MOVIE_EXTENSIONS.union(IMAGE_EXTENSIONS, DVD_EXTENSIONS, AUDIO_EXTENSIONS)
cutsParser = struct.Struct('>QI') # big-endian, 64-bit PTS and 32-bit type
class MovieListData:
def __init__(self):
pass
# iStaticServiceInformation
class StubInfo:
def __init__(self):
pass
def getName(self, serviceref):
if serviceref.getPath().endswith('/'):
return serviceref.getPath()
else:
return os.path.basename(serviceref.getPath())
def getLength(self, serviceref):
return -1
def getEvent(self, serviceref, *args):
return None
def isPlayable(self):
return True
def getInfo(self, serviceref, w):
try:
if w == iServiceInformation.sTimeCreate:
return os.stat(serviceref.getPath()).st_ctime
if w == iServiceInformation.sFileSize:
return os.stat(serviceref.getPath()).st_size
if w == iServiceInformation.sDescription:
return serviceref.getPath()
except:
pass
return 0
def getInfoString(self, serviceref, w):
return ''
justStubInfo = StubInfo()
def lastPlayPosFromCache(ref):
from Screens.InfoBarGenerics import resumePointCache
return resumePointCache.get(ref.toString(), None)
def moviePlayState(cutsFileName, ref, length):
"""Returns None, 0..100 for percentage"""
try:
# read the cuts file first
f = open(cutsFileName, 'rb')
lastCut = None
cutPTS = None
while 1:
data = f.read(cutsParser.size)
if len(data) < cutsParser.size:
break
cut, cutType = cutsParser.unpack(data)
if cutType == 3: # undocumented, but 3 appears to be the stop
cutPTS = cut
else:
lastCut = cut
f.close()
# See what we have in RAM (it might help)
last = lastPlayPosFromCache(ref)
if last:
# Get the length from the cache
if not lastCut:
lastCut = last[2]
# Get the cut point from the cache if not in the file
if not cutPTS:
cutPTS = last[1]
if cutPTS is None:
# Unseen movie
return None
if not lastCut:
if length and (length > 0):
lastCut = length * 90000
else:
# dunno
return 0
if cutPTS >= lastCut:
return 100
return (100 * cutPTS) // lastCut
except:
cutPTS = lastPlayPosFromCache(ref)
if cutPTS:
if not length or (length<0):
length = cutPTS[2]
if length:
if cutPTS[1] >= length:
return 100
return (100 * cutPTS[1]) // length
else:
return 0
return None
def resetMoviePlayState(cutsFileName, ref=None):
try:
if ref is not None:
from Screens.InfoBarGenerics import delResumePoint
delResumePoint(ref)
f = open(cutsFileName, 'rb')
cutlist = []
while 1:
data = f.read(cutsParser.size)
if len(data) < cutsParser.size:
break
cut, cutType = cutsParser.unpack(data)
if cutType != 3:
cutlist.append(data)
f.close()
f = open(cutsFileName, 'wb')
f.write(''.join(cutlist))
f.close()
except:
pass
#import sys
#print "[MovieList] Exception in resetMoviePlayState: %s: %s" % sys.exc_info()[:2]
class MovieList(GUIComponent):
SORT_ALPHANUMERIC = 1
SORT_RECORDED = 2
SHUFFLE = 3
SORT_ALPHANUMERIC_REVERSE = 4
SORT_RECORDED_REVERSE = 5
SORT_ALPHANUMERIC_FLAT = 6
SORT_ALPHANUMERIC_FLAT_REVERSE = 7
HIDE_DESCRIPTION = 1
SHOW_DESCRIPTION = 2
dirNameExclusions = ['.AppleDouble', '.AppleDesktop', '.AppleDB',
'Network Trash Folder', 'Temporary Items',
'.TemporaryItems']
def __init__(self, root, sort_type=None, descr_state=None):
GUIComponent.__init__(self)
self.list = []
self.descr_state = descr_state or self.HIDE_DESCRIPTION
self.sort_type = sort_type or self.SORT_RECORDED
self.firstFileEntry = 0
self.parentDirectory = 0
self.fontName = "Regular"
self.fontSize = 20
self.listHeight = None
self.listWidth = None
self.reloadDelayTimer = None
self.l = eListboxPythonMultiContent()
self.tags = set()
self.root = None
self.list = None
self._playInBackground = None
self._playInForeground = None
self._char = ''
if root is not None:
self.reload(root)
self.l.setBuildFunc(self.buildMovieListEntry)
self.onSelectionChanged = [ ]
self.iconPart = []
for part in range(5):
self.iconPart.append(LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/part_%d_4.png" % part)))
self.iconMovieRec = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/part_new.png"))
self.iconMoviePlay = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/movie_play.png"))
self.iconMoviePlayRec = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/movie_play_rec.png"))
self.iconUnwatched = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/part_unwatched.png"))
self.iconFolder = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/folder.png"))
self.iconTrash = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/trashcan.png"))
self.runningTimers = {}
self.updateRecordings()
self.updatePlayPosCache()
def applySkin(self, desktop, screen):
if self.skinAttributes is not None:
attribs = [ ]
for (attrib, value) in self.skinAttributes:
if attrib == "font":
font = skin.parseFont(value, ((1,1),(1,1)))
self.fontName = font.family
self.fontSize = font.pointSize
else:
attribs.append((attrib,value))
self.skinAttributes = attribs
rc = GUIComponent.applySkin(self, desktop, screen)
self.listHeight = self.instance.size().height()
self.listWidth = self.instance.size().width()
self.setItemsPerPage()
return rc
def get_playInBackground(self):
return self._playInBackground
def set_playInBackground(self, value):
if self._playInBackground is not value:
index = self.findService(self._playInBackground)
if index is not None:
self.invalidateItem(index)
self.l.invalidateEntry(index)
index = self.findService(value)
if index is not None:
self.invalidateItem(index)
self.l.invalidateEntry(index)
self._playInBackground = value
playInBackground = property(get_playInBackground, set_playInBackground)
def get_playInForeground(self):
return self._playInForeground
def set_playInForeground(self, value):
self._playInForeground = value
playInForeground = property(get_playInForeground, set_playInForeground)
def updatePlayPosCache(self):
from Screens.InfoBarGenerics import updateresumePointCache
updateresumePointCache()
def updateRecordings(self, timer=None):
if timer is not None:
if timer.justplay:
return
result = {}
for timer in NavigationInstance.instance.RecordTimer.timer_list:
if timer.isRunning() and not timer.justplay:
result[os.path.basename(timer.Filename)+'.ts'] = timer
if self.runningTimers == result:
return
self.runningTimers = result
if timer is not None:
if self.reloadDelayTimer is not None:
self.reloadDelayTimer.stop()
self.reloadDelayTimer = eTimer()
self.reloadDelayTimer.callback.append(self.reload)
self.reloadDelayTimer.start(5000, 1)
def connectSelChanged(self, fnc):
if not fnc in self.onSelectionChanged:
self.onSelectionChanged.append(fnc)
def disconnectSelChanged(self, fnc):
if fnc in self.onSelectionChanged:
self.onSelectionChanged.remove(fnc)
def selectionChanged(self):
for x in self.onSelectionChanged:
x()
def setDescriptionState(self, val):
self.descr_state = val
def setSortType(self, type):
self.sort_type = type
def setItemsPerPage(self):
if self.listHeight > 0:
itemHeight = self.listHeight / config.movielist.itemsperpage.value
else:
itemHeight = 25 # some default (270/5)
self.itemHeight = itemHeight
self.l.setItemHeight(itemHeight)
self.instance.resize(eSize(self.listWidth, self.listHeight / itemHeight * itemHeight))
def setFontsize(self):
self.l.setFont(0, gFont(self.fontName, self.fontSize + config.movielist.fontsize.value))
self.l.setFont(1, gFont(self.fontName, (self.fontSize - 3) + config.movielist.fontsize.value))
def invalidateItem(self, index):
x = self.list[index]
self.list[index] = (x[0], x[1], x[2], None)
def invalidateCurrentItem(self):
self.invalidateItem(self.getCurrentIndex())
def buildMovieListEntry(self, serviceref, info, begin, data):
switch = config.usage.show_icons_in_movielist.value
width = self.l.getItemSize().width()
pathName = serviceref.getPath()
res = [None]
if serviceref.flags & eServiceReference.mustDescent:
# Directory
iconSize = 22
# Name is full path name
if info is None:
# Special case: "parent"
txt = ".."
else:
txt = os.path.basename(os.path.normpath(pathName))
if txt == ".Trash":
res.append(MultiContentEntryPixmapAlphaTest(pos=(0, (self.itemHeight - 24) / 2), size=(iconSize, 24), flags=RT_HALIGN_LEFT | RT_VALIGN_CENTER, png=self.iconTrash))
res.append(MultiContentEntryText(pos=(iconSize + 2, 0), size=(width - 166, self.itemHeight), font=0, flags=RT_HALIGN_LEFT | RT_VALIGN_CENTER, text=_("Deleted items")))
res.append(MultiContentEntryText(pos=(width - 145, 0), size=(145, self.itemHeight), font=1, flags=RT_HALIGN_RIGHT | RT_VALIGN_CENTER, text=_("Trashcan")))
return res
res.append(MultiContentEntryPixmapAlphaTest(pos=(0, (self.itemHeight - 24) / 2), size=(iconSize, iconSize), flags=RT_HALIGN_LEFT | RT_VALIGN_CENTER, png=self.iconFolder))
res.append(MultiContentEntryText(pos=(iconSize + 2, 0), size=(width - 166, self.itemHeight), font=0, flags=RT_HALIGN_LEFT | RT_VALIGN_CENTER, text=txt))
res.append(MultiContentEntryText(pos=(width - 145, 0), size=(145, self.itemHeight), font=1, flags=RT_HALIGN_RIGHT | RT_VALIGN_CENTER, text=_("Directory")))
return res
if (data == -1) or (data is None):
data = MovieListData()
cur_idx = self.l.getCurrentSelectionIndex()
x = self.list[cur_idx] # x = ref,info,begin,...
data.len = 0 # dont recalc movielist to speedup loading the list
self.list[cur_idx] = (x[0], x[1], x[2], data) # update entry in list... so next time we don't need to recalc
data.txt = info.getName(serviceref)
if config.movielist.hide_extensions.value:
fileName, fileExtension = os.path.splitext(data.txt)
if fileExtension in KNOWN_EXTENSIONS:
data.txt = fileName
data.icon = None
data.part = None
if os.path.basename(pathName) in self.runningTimers:
if switch == 'i':
if (self.playInBackground or self.playInForeground) and serviceref == (self.playInBackground or self.playInForeground):
data.icon = self.iconMoviePlayRec
else:
data.icon = self.iconMovieRec
elif switch == 'p' or switch == 's':
data.part = 100
if (self.playInBackground or self.playInForeground) and serviceref == (self.playInBackground or self.playInForeground):
data.partcol = 0xffc71d
else:
data.partcol = 0xff001d
elif (self.playInBackground or self.playInForeground) and serviceref == (self.playInBackground or self.playInForeground):
data.icon = self.iconMoviePlay
else:
data.part = moviePlayState(pathName + '.cuts', serviceref, data.len)
if data.part is not None and data.part <= 3:
data.part = 0
if data.part is not None and data.part >= 97:
data.part = 100
if switch == 'i':
if data.part is not None and data.part > 0:
data.icon = self.iconPart[data.part // 25]
else:
if config.usage.movielist_unseen.value:
data.icon = self.iconUnwatched
elif switch == 'p' or switch == 's':
if data.part is not None and data.part > 0:
data.partcol = 0xffc71d
else:
if config.usage.movielist_unseen.value:
data.part = 100
data.partcol = 0x206333
len = data.len
if len > 0:
len = "%d:%02d" % (len / 60, len % 60)
else:
len = ""
iconSize = 0
if switch == 'i':
iconSize = 22
res.append(MultiContentEntryPixmapAlphaTest(pos=(0, (self.itemHeight - 20) / 2), size=(iconSize, 20), flags=RT_HALIGN_LEFT | RT_VALIGN_CENTER, png=data.icon))
elif switch == 'p':
iconSize = 48
if data.part is not None and data.part > 0:
res.append(MultiContentEntryProgress(pos=(0, (self.itemHeight - 16) / 2), size=(iconSize - 2, 16), percent=data.part, borderWidth=2, foreColor=data.partcol, foreColorSelected=None, backColor=None, backColorSelected=None))
else:
res.append(MultiContentEntryPixmapAlphaTest(pos=(0, (self.itemHeight - 20) / 2), size=(iconSize, 20), png=data.icon))
elif switch == 's':
iconSize = 22
if data.part is not None and data.part > 0:
res.append(MultiContentEntryProgress(pos=(0, (self.itemHeight - 16) / 2), size=(iconSize - 2, 16), percent=data.part, borderWidth=2, foreColor=data.partcol, foreColorSelected=None, backColor=None, backColorSelected=None))
else:
res.append(MultiContentEntryPixmapAlphaTest(pos=(0, (self.itemHeight - 20) / 2), size=(iconSize, 20), png=data.icon))
begin_string = ""
if begin > 0:
begin_string = ' '.join(FuzzyTime(begin, inPast=True))
ih = self.itemHeight
lenSize = ih * 3 # 25 -> 75
dateSize = ih * 145 / 25 # 25 -> 145
res.append(MultiContentEntryText(pos=(iconSize, 0), size=(width - iconSize - dateSize, ih), flags=RT_HALIGN_LEFT | RT_VALIGN_CENTER, font=0, text=data.txt))
res.append(MultiContentEntryText(pos=(width - dateSize, 0), size=(dateSize, ih), flags=RT_HALIGN_RIGHT | RT_VALIGN_CENTER, font=1, text=begin_string))
return res
def moveToFirstMovie(self):
if self.firstFileEntry < len(self.list):
self.instance.moveSelectionTo(self.firstFileEntry)
else:
# there are no movies, just directories...
self.moveToFirst()
def moveToParentDirectory(self):
if self.parentDirectory < len(self.list):
self.instance.moveSelectionTo(self.parentDirectory)
else:
self.moveToFirst()
def moveToLast(self):
if self.list:
self.instance.moveSelectionTo(len(self.list) - 1)
def moveToFirst(self):
if self.list:
self.instance.moveSelectionTo(0)
def moveToIndex(self, index):
self.instance.moveSelectionTo(index)
def getCurrentIndex(self):
return self.instance.getCurrentIndex()
def getCurrentEvent(self):
l = self.l.getCurrentSelection()
return l and l[0] and l[1] and l[1].getEvent(l[0])
def getCurrent(self):
l = self.l.getCurrentSelection()
return l and l[0]
def getItem(self, index):
if self.list:
if len(self.list) > index:
return self.list[index] and self.list[index][0]
GUI_WIDGET = eListbox
def postWidgetCreate(self, instance):
instance.setContent(self.l)
instance.selectionChanged.get().append(self.selectionChanged)
self.setFontsize()
def preWidgetRemove(self, instance):
instance.setContent(None)
instance.selectionChanged.get().remove(self.selectionChanged)
def reload(self, root = None, filter_tags = None):
if self.reloadDelayTimer is not None:
self.reloadDelayTimer.stop()
self.reloadDelayTimer = None
if root is not None:
self.load(root, filter_tags)
else:
self.load(self.root, filter_tags)
self.l.setList(self.list)
def removeService(self, service):
index = self.findService(service)
if index is not None:
del self.list[index]
self.l.setList(self.list)
def findService(self, service):
if service is None:
return None
for index, l in enumerate(self.list):
if l[0] == service:
return index
return None
def __len__(self):
return len(self.list)
def __getitem__(self, index):
return self.list[index]
def __iter__(self):
return self.list.__iter__()
def load(self, root, filter_tags):
# this lists our root service, then building a
# nice list
self.list = [ ]
serviceHandler = eServiceCenter.getInstance()
numberOfDirs = 0
reflist = serviceHandler.list(root)
if reflist is None:
print "[MovieList] listing of movies failed"
return
realtags = set()
tags = {}
rootPath = os.path.normpath(root.getPath())
parent = None
# Don't navigate above the "root"
if len(rootPath) > 1 and (os.path.realpath(rootPath) != config.movielist.root.value):
parent = os.path.dirname(rootPath)
# enigma wants an extra '/' appended
if not parent.endswith('/'):
parent += '/'
ref = eServiceReference("2:0:1:0:0:0:0:0:0:0:" + parent)
ref.flags = eServiceReference.flagDirectory
self.list.append((ref, None, 0, -1))
numberOfDirs += 1
while 1:
serviceref = reflist.getNext()
if not serviceref.valid():
break
info = serviceHandler.info(serviceref)
if info is None:
info = justStubInfo
begin = info.getInfo(serviceref, iServiceInformation.sTimeCreate)
if serviceref.flags & eServiceReference.mustDescent:
dirname = info.getName(serviceref)
normdirname = os.path.normpath(dirname)
normname = os.path.basename(normdirname)
if normname not in MovieList.dirNameExclusions and normdirname not in defaultInhibitDirs:
self.list.append((serviceref, info, begin, -1))
numberOfDirs += 1
continue
# convert space-seperated list of tags into a set
this_tags = info.getInfoString(serviceref, iServiceInformation.sTags).split(' ')
name = info.getName(serviceref)
# OSX put a lot of stupid files ._* everywhere... we need to skip them
if name[:2] == "._":
continue
if this_tags == ['']:
# No tags? Auto tag!
this_tags = name.replace(',',' ').replace('.',' ').replace('_',' ').replace(':',' ').split()
else:
realtags.update(this_tags)
for tag in this_tags:
if len(tag) >= 4:
if tags.has_key(tag):
tags[tag].append(name)
else:
tags[tag] = [name]
# filter_tags is either None (which means no filter at all), or
# a set. In this case, all elements of filter_tags must be present,
# otherwise the entry will be dropped.
if filter_tags is not None:
this_tags_fullname = [" ".join(this_tags)]
this_tags_fullname = set(this_tags_fullname)
this_tags = set(this_tags)
if not this_tags.issuperset(filter_tags) and not this_tags_fullname.issuperset(filter_tags):
# print "[MovieList] Skipping", name, "tags=", this_tags, " filter=", filter_tags
continue
self.list.append((serviceref, info, begin, -1))
self.firstFileEntry = numberOfDirs
self.parentDirectory = 0
if self.sort_type == MovieList.SORT_ALPHANUMERIC:
self.list.sort(key=self.buildAlphaNumericSortKey)
elif self.sort_type == MovieList.SORT_ALPHANUMERIC_REVERSE:
self.list.sort(key=self.buildAlphaNumericSortKey, reverse=True)
elif self.sort_type == MovieList.SORT_ALPHANUMERIC_FLAT:
self.list.sort(key=self.buildAlphaNumericFlatSortKey)
elif self.sort_type == MovieList.SORT_ALPHANUMERIC_FLAT_REVERSE:
self.list.sort(key=self.buildAlphaNumericFlatSortKey, reverse=True)
elif self.sort_type == MovieList.SORT_RECORDED:
self.list.sort(key=self.buildBeginTimeSortKey)
elif self.sort_type == MovieList.SORT_RECORDED_REVERSE:
self.list.sort(key=self.buildBeginTimeSortKey, reverse=True)
elif self.sort_type == MovieList.SHUFFLE:
self.list.sort(key=self.buildBeginTimeSortKey)
dirlist = self.list[:numberOfDirs]
shufflelist = self.list[numberOfDirs:]
random.shuffle(shufflelist)
self.list = dirlist + shufflelist
for x in self.list[:]:
if x[1]:
tmppath = x[1].getName(x[0])[:-1] if x[1].getName(x[0]).endswith('/') else x[1].getName(x[0])
if tmppath.endswith('.Trash'):
self.list.append(self.list.pop(self.list.index(x)))
else:
self.list.insert(0, self.list.pop(self.list.index(x)))
if self.root and numberOfDirs > 0:
rootPath = os.path.normpath(self.root.getPath())
if not rootPath.endswith('/'):
rootPath += '/'
if rootPath != parent:
# with new sort types directories may be in between files, so scan whole
# list for parentDirectory index. Usually it is the first one anyway
for index, item in enumerate(self.list):
if item[0].flags & eServiceReference.mustDescent:
itempath = os.path.normpath(item[0].getPath())
if not itempath.endswith('/'):
itempath += '/'
if itempath == rootPath:
self.parentDirectory = index
break
self.root = root
# finally, store a list of all tags which were found. these can be presented
# to the user to filter the list
# ML: Only use the tags that occur more than once in the list OR that were
# really in the tag set of some file.
# reverse the dictionary to see which unique movie each tag now references
rtags = {}
for tag, movies in tags.items():
if (len(movies) > 1) or (tag in realtags):
movies = tuple(movies) # a tuple can be hashed, but a list not
item = rtags.get(movies, [])
if not item: rtags[movies] = item
item.append(tag)
self.tags = {}
for movies, tags in rtags.items():
movie = movies[0]
# format the tag lists so that they are in 'original' order
tags.sort(key = movie.find)
first = movie.find(tags[0])
last = movie.find(tags[-1]) + len(tags[-1])
match = movie
start = 0
end = len(movie)
# Check if the set has a complete sentence in common, and how far
for m in movies[1:]:
if m[start:end] != match:
if not m.startswith(movie[:last]):
start = first
if not m.endswith(movie[first:]):
end = last
match = movie[start:end]
if m[start:end] != match:
match = ''
break
if match:
self.tags[match] = set(tags)
continue
else:
match = ' '.join(tags)
if len(match) > 2: #Omit small words
self.tags[match] = set(tags)
def buildAlphaNumericSortKey(self, x):
# x = ref,info,begin,...
ref = x[0]
name = x[1] and x[1].getName(ref)
if ref.flags & eServiceReference.mustDescent:
return 0, name and name.lower() or "", -x[2]
return 1, name and name.lower() or "", -x[2]
def buildAlphaNumericFlatSortKey(self, x):
# x = ref,info,begin,...
ref = x[0]
name = x[1] and x[1].getName(ref) or ".."
if name and ref.flags & eServiceReference.mustDescent:
# only use directory basename for sorting
try:
name = os.path.basename(os.path.normpath(name))
except:
pass
if name.endswith(".Trash"):
name = "Deleted Items"
# print "[MovieList] Sorting for -%s-" % name
return 1, name and name.lower() or "", -x[2]
def buildBeginTimeSortKey(self, x):
ref = x[0]
if ref.flags & eServiceReference.mustDescent:
try:
mtime = -os.stat(ref.getPath()).st_mtime
except:
mtime = 0
return 0, x[1] and mtime
return 1, -x[2]
def moveTo(self, serviceref):
index = self.findService(serviceref)
if index is not None:
self.instance.moveSelectionTo(index)
return True
return False
def moveDown(self):
self.instance.moveSelection(self.instance.moveDown)
def moveUp(self):
self.instance.moveSelection(self.instance.moveUp)
def moveToChar(self, char, lbl=None):
self._char = char
self._lbl = lbl
if lbl:
lbl.setText(self._char)
lbl.visible = True
self.moveToCharTimer = eTimer()
self.moveToCharTimer.callback.append(self._moveToChrStr)
self.moveToCharTimer.start(1000, True) #time to wait for next key press to decide which letter to use...
def moveToString(self, char, lbl=None):
self._char = self._char + char.upper()
self._lbl = lbl
if lbl:
lbl.setText(self._char)
lbl.visible = True
self.moveToCharTimer = eTimer()
self.moveToCharTimer.callback.append(self._moveToChrStr)
self.moveToCharTimer.start(1000, True) #time to wait for next key press to decide which letter to use...
def _moveToChrStr(self):
currentIndex = self.instance.getCurrentIndex()
index = currentIndex + 1
if index >= len(self.list):
index = 0
while index != currentIndex:
item = self.list[index]
if item[1] is not None:
ref = item[0]
itemName = getShortName(item[1].getName(ref), ref)
strlen = len(self._char)
if strlen == 1 and itemName.startswith(self._char) \
or strlen > 1 and itemName.find(self._char) >= 0:
self.instance.moveSelectionTo(index)
break
index += 1
if index >= len(self.list):
index = 0
self._char = ''
if self._lbl:
self._lbl.visible = False
def getShortName(name, serviceref):
if serviceref.flags & eServiceReference.mustDescent: #Directory
pathName = serviceref.getPath()
name = os.path.basename(os.path.normpath(pathName))
if name == '.Trash':
name = _("Deleted items")
return name.upper()
| gpl-2.0 |
ngpestelos/ansible | lib/ansible/errors/__init__.py | 11 | 7738 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors.yaml_strings import ( YAML_POSITION_DETAILS,
YAML_COMMON_UNQUOTED_VARIABLE_ERROR,
YAML_COMMON_DICT_ERROR,
YAML_COMMON_UNQUOTED_COLON_ERROR,
YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR,
YAML_COMMON_UNBALANCED_QUOTES_ERROR )
from ansible.utils.unicode import to_unicode, to_str
class AnsibleError(Exception):
'''
This is the base class for all errors raised from Ansible code,
and can be instantiated with two optional parameters beyond the
error message to control whether detailed information is displayed
when the error occurred while parsing a data file of some kind.
Usage:
raise AnsibleError('some message here', obj=obj, show_content=True)
Where "obj" is some subclass of ansible.parsing.yaml.objects.AnsibleBaseYAMLObject,
which should be returned by the DataLoader() class.
'''
def __init__(self, message="", obj=None, show_content=True):
# we import this here to prevent an import loop problem,
# since the objects code also imports ansible.errors
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
self._obj = obj
self._show_content = show_content
if obj and isinstance(obj, AnsibleBaseYAMLObject):
extended_error = self._get_extended_error()
if extended_error:
self.message = 'ERROR! %s\n\n%s' % (message, to_str(extended_error))
else:
self.message = 'ERROR! %s' % message
def __str__(self):
return self.message
def __repr__(self):
return self.message
def _get_error_lines_from_file(self, file_name, line_number):
'''
Returns the line in the file which coresponds to the reported error
location, as well as the line preceding it (if the error did not
occur on the first line), to provide context to the error.
'''
target_line = ''
prev_line = ''
with open(file_name, 'r') as f:
lines = f.readlines()
target_line = lines[line_number]
if line_number > 0:
prev_line = lines[line_number - 1]
return (target_line, prev_line)
def _get_extended_error(self):
'''
Given an object reporting the location of the exception in a file, return
detailed information regarding it including:
* the line which caused the error as well as the one preceding it
* causes and suggested remedies for common syntax errors
If this error was created with show_content=False, the reporting of content
is suppressed, as the file contents may be sensitive (ie. vault data).
'''
error_message = ''
try:
(src_file, line_number, col_number) = self._obj.ansible_pos
error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number)
if src_file not in ('<string>', '<unicode>') and self._show_content:
(target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1)
target_line = to_unicode(target_line)
prev_line = to_unicode(prev_line)
if target_line:
stripped_line = target_line.replace(" ","")
arrow_line = (" " * (col_number-1)) + "^ here"
#header_line = ("=" * 73)
error_message += "\nThe offending line appears to be:\n\n%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line)
# common error/remediation checking here:
# check for unquoted vars starting lines
if ('{{' in target_line and '}}' in target_line) and ('"{{' not in target_line or "'{{" not in target_line):
error_message += YAML_COMMON_UNQUOTED_VARIABLE_ERROR
# check for common dictionary mistakes
elif ":{{" in stripped_line and "}}" in stripped_line:
error_message += YAML_COMMON_DICT_ERROR
# check for common unquoted colon mistakes
elif len(target_line) and len(target_line) > 1 and len(target_line) > col_number and target_line[col_number] == ":" and target_line.count(':') > 1:
error_message += YAML_COMMON_UNQUOTED_COLON_ERROR
# otherwise, check for some common quoting mistakes
else:
parts = target_line.split(":")
if len(parts) > 1:
middle = parts[1].strip()
match = False
unbalanced = False
if middle.startswith("'") and not middle.endswith("'"):
match = True
elif middle.startswith('"') and not middle.endswith('"'):
match = True
if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and target_line.count("'") > 2 or target_line.count('"') > 2:
unbalanced = True
if match:
error_message += YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR
if unbalanced:
error_message += YAML_COMMON_UNBALANCED_QUOTES_ERROR
except (IOError, TypeError):
error_message += '\n(could not open file to display line)'
except IndexError:
error_message += '\n(specified line no longer in file, maybe it changed?)'
return error_message
class AnsibleOptionsError(AnsibleError):
''' bad or incomplete options passed '''
pass
class AnsibleParserError(AnsibleError):
''' something was detected early that is wrong about a playbook or data file '''
pass
class AnsibleInternalError(AnsibleError):
''' internal safeguards tripped, something happened in the code that should never happen '''
pass
class AnsibleRuntimeError(AnsibleError):
''' ansible had a problem while running a playbook '''
pass
class AnsibleModuleError(AnsibleRuntimeError):
''' a module failed somehow '''
pass
class AnsibleConnectionFailure(AnsibleRuntimeError):
''' the transport / connection_plugin had a fatal error '''
pass
class AnsibleFilterError(AnsibleRuntimeError):
''' a templating failure '''
pass
class AnsibleLookupError(AnsibleRuntimeError):
''' a lookup failure '''
pass
class AnsibleCallbackError(AnsibleRuntimeError):
''' a callback failure '''
pass
class AnsibleUndefinedVariable(AnsibleRuntimeError):
''' a templating failure '''
pass
class AnsibleFileNotFound(AnsibleRuntimeError):
''' a file missing failure '''
pass
| gpl-3.0 |
junhuac/MQUIC | depot_tools/external_bin/gsutil/gsutil_4.15/gsutil/gslib/tracker_file.py | 12 | 17959 | # -*- coding: utf-8 -*-
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for tracker file functionality."""
import errno
import hashlib
import json
import os
import re
from boto import config
from gslib.exception import CommandException
from gslib.util import CreateDirIfNeeded
from gslib.util import GetGsutilStateDir
from gslib.util import ResumableThreshold
from gslib.util import UTF8
# The maximum length of a file name can vary wildly between different
# operating systems, so we always ensure that tracker files are less
# than 100 characters in order to avoid any such issues.
MAX_TRACKER_FILE_NAME_LENGTH = 100
TRACKER_FILE_UNWRITABLE_EXCEPTION_TEXT = (
'Couldn\'t write tracker file (%s): %s. This can happen if gsutil is '
'configured to save tracker files to an unwritable directory)')
class TrackerFileType(object):
UPLOAD = 'upload'
DOWNLOAD = 'download'
DOWNLOAD_COMPONENT = 'download_component'
PARALLEL_UPLOAD = 'parallel_upload'
SLICED_DOWNLOAD = 'sliced_download'
REWRITE = 'rewrite'
def _HashFilename(filename):
"""Apply a hash function (SHA1) to shorten the passed file name.
The spec for the hashed file name is as follows:
TRACKER_<hash>_<trailing>
where hash is a SHA1 hash on the original file name and trailing is
the last 16 chars from the original file name. Max file name lengths
vary by operating system so the goal of this function is to ensure
the hashed version takes fewer than 100 characters.
Args:
filename: file name to be hashed.
Returns:
shorter, hashed version of passed file name
"""
if isinstance(filename, unicode):
filename = filename.encode(UTF8)
else:
filename = unicode(filename, UTF8).encode(UTF8)
m = hashlib.sha1(filename)
return 'TRACKER_' + m.hexdigest() + '.' + filename[-16:]
def CreateTrackerDirIfNeeded():
"""Looks up or creates the gsutil tracker file directory.
This is the configured directory where gsutil keeps its resumable transfer
tracker files. This function creates it if it doesn't already exist.
Returns:
The pathname to the tracker directory.
"""
tracker_dir = config.get(
'GSUtil', 'resumable_tracker_dir',
os.path.join(GetGsutilStateDir(), 'tracker-files'))
CreateDirIfNeeded(tracker_dir)
return tracker_dir
def GetRewriteTrackerFilePath(src_bucket_name, src_obj_name, dst_bucket_name,
dst_obj_name, api_selector):
"""Gets the tracker file name described by the arguments.
Args:
src_bucket_name: Source bucket (string).
src_obj_name: Source object (string).
dst_bucket_name: Destination bucket (string).
dst_obj_name: Destination object (string)
api_selector: API to use for this operation.
Returns:
File path to tracker file.
"""
# Encode the src and dest bucket and object names into the tracker file
# name.
res_tracker_file_name = (
re.sub('[/\\\\]', '_', 'rewrite__%s__%s__%s__%s__%s.token' %
(src_bucket_name, src_obj_name, dst_bucket_name,
dst_obj_name, api_selector)))
return _HashAndReturnPath(res_tracker_file_name, TrackerFileType.REWRITE)
def GetTrackerFilePath(dst_url, tracker_file_type, api_selector, src_url=None,
component_num=None):
"""Gets the tracker file name described by the arguments.
Args:
dst_url: Destination URL for tracker file.
tracker_file_type: TrackerFileType for this operation.
api_selector: API to use for this operation.
src_url: Source URL for the source file name for parallel uploads.
component_num: Component number if this is a download component, else None.
Returns:
File path to tracker file.
"""
if tracker_file_type == TrackerFileType.UPLOAD:
# Encode the dest bucket and object name into the tracker file name.
res_tracker_file_name = (
re.sub('[/\\\\]', '_', 'resumable_upload__%s__%s__%s.url' %
(dst_url.bucket_name, dst_url.object_name, api_selector)))
elif tracker_file_type == TrackerFileType.DOWNLOAD:
# Encode the fully-qualified dest file name into the tracker file name.
res_tracker_file_name = (
re.sub('[/\\\\]', '_', 'resumable_download__%s__%s.etag' %
(os.path.realpath(dst_url.object_name), api_selector)))
elif tracker_file_type == TrackerFileType.DOWNLOAD_COMPONENT:
# Encode the fully-qualified dest file name and the component number
# into the tracker file name.
res_tracker_file_name = (
re.sub('[/\\\\]', '_', 'resumable_download__%s__%s__%d.etag' %
(os.path.realpath(dst_url.object_name), api_selector,
component_num)))
elif tracker_file_type == TrackerFileType.PARALLEL_UPLOAD:
# Encode the dest bucket and object names as well as the source file name
# into the tracker file name.
res_tracker_file_name = (
re.sub('[/\\\\]', '_', 'parallel_upload__%s__%s__%s__%s.url' %
(dst_url.bucket_name, dst_url.object_name,
src_url, api_selector)))
elif tracker_file_type == TrackerFileType.SLICED_DOWNLOAD:
# Encode the fully-qualified dest file name into the tracker file name.
res_tracker_file_name = (
re.sub('[/\\\\]', '_', 'sliced_download__%s__%s.etag' %
(os.path.realpath(dst_url.object_name), api_selector)))
elif tracker_file_type == TrackerFileType.REWRITE:
# Should use GetRewriteTrackerFilePath instead.
raise NotImplementedError()
return _HashAndReturnPath(res_tracker_file_name, tracker_file_type)
def DeleteDownloadTrackerFiles(dst_url, api_selector):
"""Deletes all tracker files corresponding to an object download.
Args:
dst_url: StorageUrl describing the destination file.
api_selector: The Cloud API implementation used.
"""
# Delete non-sliced download tracker file.
DeleteTrackerFile(GetTrackerFilePath(dst_url, TrackerFileType.DOWNLOAD,
api_selector))
# Delete all sliced download tracker files.
tracker_files = GetSlicedDownloadTrackerFilePaths(dst_url, api_selector)
for tracker_file in tracker_files:
DeleteTrackerFile(tracker_file)
def GetSlicedDownloadTrackerFilePaths(dst_url, api_selector,
num_components=None):
"""Gets a list of sliced download tracker file paths.
The list consists of the parent tracker file path in index 0, and then
any existing component tracker files in [1:].
Args:
dst_url: Destination URL for tracker file.
api_selector: API to use for this operation.
num_components: The number of component tracker files, if already known.
If not known, the number will be retrieved from the parent
tracker file on disk.
Returns:
File path to tracker file.
"""
parallel_tracker_file_path = GetTrackerFilePath(
dst_url, TrackerFileType.SLICED_DOWNLOAD, api_selector)
tracker_file_paths = [parallel_tracker_file_path]
# If we don't know the number of components, check the tracker file.
if num_components is None:
tracker_file = None
try:
tracker_file = open(parallel_tracker_file_path, 'r')
num_components = json.load(tracker_file)['num_components']
except (IOError, ValueError):
return tracker_file_paths
finally:
if tracker_file:
tracker_file.close()
for i in range(num_components):
tracker_file_paths.append(GetTrackerFilePath(
dst_url, TrackerFileType.DOWNLOAD_COMPONENT, api_selector,
component_num=i))
return tracker_file_paths
def _HashAndReturnPath(res_tracker_file_name, tracker_file_type):
"""Hashes and returns a tracker file path.
Args:
res_tracker_file_name: The tracker file name prior to it being hashed.
tracker_file_type: The TrackerFileType of res_tracker_file_name.
Returns:
Final (hashed) tracker file path.
"""
resumable_tracker_dir = CreateTrackerDirIfNeeded()
hashed_tracker_file_name = _HashFilename(res_tracker_file_name)
tracker_file_name = '%s_%s' % (str(tracker_file_type).lower(),
hashed_tracker_file_name)
tracker_file_path = '%s%s%s' % (resumable_tracker_dir, os.sep,
tracker_file_name)
assert len(tracker_file_name) < MAX_TRACKER_FILE_NAME_LENGTH
return tracker_file_path
def DeleteTrackerFile(tracker_file_name):
if tracker_file_name and os.path.exists(tracker_file_name):
os.unlink(tracker_file_name)
def HashRewriteParameters(
src_obj_metadata, dst_obj_metadata, projection, src_generation=None,
gen_match=None, meta_gen_match=None, canned_acl=None, fields=None,
max_bytes_per_call=None):
"""Creates an MD5 hex digest of the parameters for a rewrite call.
Resuming rewrites requires that the input parameters are identical. Thus,
the rewrite tracker file needs to represent the input parameters. For
easy comparison, hash the input values. If a user does a performs a
same-source/same-destination rewrite via a different command (for example,
with a changed ACL), the hashes will not match and we will restart the
rewrite from the beginning.
Args:
src_obj_metadata: apitools Object describing source object. Must include
bucket, name, and etag.
dst_obj_metadata: apitools Object describing destination object. Must
include bucket and object name
projection: Projection used for the API call.
src_generation: Optional source generation.
gen_match: Optional generation precondition.
meta_gen_match: Optional metageneration precondition.
canned_acl: Optional canned ACL string.
fields: Optional fields to include in response.
max_bytes_per_call: Optional maximum bytes rewritten per call.
Returns:
MD5 hex digest Hash of the input parameters, or None if required parameters
are missing.
"""
if (not src_obj_metadata or
not src_obj_metadata.bucket or
not src_obj_metadata.name or
not src_obj_metadata.etag or
not dst_obj_metadata or
not dst_obj_metadata.bucket or
not dst_obj_metadata.name or
not projection):
return
md5_hash = hashlib.md5()
for input_param in (
src_obj_metadata, dst_obj_metadata, projection, src_generation,
gen_match, meta_gen_match, canned_acl, fields, max_bytes_per_call):
md5_hash.update(str(input_param))
return md5_hash.hexdigest()
def ReadRewriteTrackerFile(tracker_file_name, rewrite_params_hash):
"""Attempts to read a rewrite tracker file.
Args:
tracker_file_name: Tracker file path string.
rewrite_params_hash: MD5 hex digest of rewrite call parameters constructed
by HashRewriteParameters.
Returns:
String rewrite_token for resuming rewrite requests if a matching tracker
file exists, None otherwise (which will result in starting a new rewrite).
"""
# Check to see if we already have a matching tracker file.
tracker_file = None
if not rewrite_params_hash:
return
try:
tracker_file = open(tracker_file_name, 'r')
existing_hash = tracker_file.readline().rstrip('\n')
if existing_hash == rewrite_params_hash:
# Next line is the rewrite token.
return tracker_file.readline().rstrip('\n')
except IOError as e:
# Ignore non-existent file (happens first time a rewrite is attempted.
if e.errno != errno.ENOENT:
print('Couldn\'t read Copy tracker file (%s): %s. Restarting copy '
'from scratch.' %
(tracker_file_name, e.strerror))
finally:
if tracker_file:
tracker_file.close()
def WriteRewriteTrackerFile(tracker_file_name, rewrite_params_hash,
rewrite_token):
"""Writes a rewrite tracker file.
Args:
tracker_file_name: Tracker file path string.
rewrite_params_hash: MD5 hex digest of rewrite call parameters constructed
by HashRewriteParameters.
rewrite_token: Rewrite token string returned by the service.
"""
_WriteTrackerFile(tracker_file_name, '%s\n%s\n' % (rewrite_params_hash,
rewrite_token))
def ReadOrCreateDownloadTrackerFile(src_obj_metadata, dst_url, logger,
api_selector, start_byte,
existing_file_size, component_num=None):
"""Checks for a download tracker file and creates one if it does not exist.
The methodology for determining the download start point differs between
normal and sliced downloads. For normal downloads, the existing bytes in
the file are presumed to be correct and have been previously downloaded from
the server (if a tracker file exists). In this case, the existing file size
is used to determine the download start point. For sliced downloads, the
number of bytes previously retrieved from the server cannot be determined
from the existing file size, and so the number of bytes known to have been
previously downloaded is retrieved from the tracker file.
Args:
src_obj_metadata: Metadata for the source object. Must include etag and
generation.
dst_url: Destination URL for tracker file.
logger: For outputting log messages.
api_selector: API to use for this operation.
start_byte: The start byte of the byte range for this download.
existing_file_size: Size of existing file for this download on disk.
component_num: The component number, if this is a component of a parallel
download, else None.
Returns:
tracker_file_name: The name of the tracker file, if one was used.
download_start_byte: The first byte that still needs to be downloaded.
"""
assert src_obj_metadata.etag
tracker_file_name = None
if src_obj_metadata.size < ResumableThreshold():
# Don't create a tracker file for a small downloads; cross-process resumes
# won't work, but restarting a small download is inexpensive.
return tracker_file_name, start_byte
download_name = dst_url.object_name
if component_num is None:
tracker_file_type = TrackerFileType.DOWNLOAD
else:
tracker_file_type = TrackerFileType.DOWNLOAD_COMPONENT
download_name += ' component %d' % component_num
tracker_file_name = GetTrackerFilePath(dst_url, tracker_file_type,
api_selector,
component_num=component_num)
tracker_file = None
# Check to see if we already have a matching tracker file.
try:
tracker_file = open(tracker_file_name, 'r')
if tracker_file_type is TrackerFileType.DOWNLOAD:
etag_value = tracker_file.readline().rstrip('\n')
if etag_value == src_obj_metadata.etag:
return tracker_file_name, existing_file_size
elif tracker_file_type is TrackerFileType.DOWNLOAD_COMPONENT:
component_data = json.loads(tracker_file.read())
if (component_data['etag'] == src_obj_metadata.etag and
component_data['generation'] == src_obj_metadata.generation):
return tracker_file_name, component_data['download_start_byte']
logger.warn('Tracker file doesn\'t match for download of %s. Restarting '
'download from scratch.' % download_name)
except (IOError, ValueError) as e:
# Ignore non-existent file (happens first time a download
# is attempted on an object), but warn user for other errors.
if isinstance(e, ValueError) or e.errno != errno.ENOENT:
logger.warn('Couldn\'t read download tracker file (%s): %s. Restarting '
'download from scratch.' % (tracker_file_name, str(e)))
finally:
if tracker_file:
tracker_file.close()
# There wasn't a matching tracker file, so create one and then start the
# download from scratch.
if tracker_file_type is TrackerFileType.DOWNLOAD:
_WriteTrackerFile(tracker_file_name, '%s\n' % src_obj_metadata.etag)
elif tracker_file_type is TrackerFileType.DOWNLOAD_COMPONENT:
WriteDownloadComponentTrackerFile(tracker_file_name, src_obj_metadata,
start_byte)
return tracker_file_name, start_byte
def WriteDownloadComponentTrackerFile(tracker_file_name, src_obj_metadata,
current_file_pos):
"""Updates or creates a download component tracker file on disk.
Args:
tracker_file_name: The name of the tracker file.
src_obj_metadata: Metadata for the source object. Must include etag.
current_file_pos: The current position in the file.
"""
component_data = {'etag': src_obj_metadata.etag,
'generation': src_obj_metadata.generation,
'download_start_byte': current_file_pos}
_WriteTrackerFile(tracker_file_name, json.dumps(component_data))
def _WriteTrackerFile(tracker_file_name, data):
"""Creates a tracker file, storing the input data."""
try:
with os.fdopen(os.open(tracker_file_name,
os.O_WRONLY | os.O_CREAT, 0600), 'w') as tf:
tf.write(data)
return False
except (IOError, OSError) as e:
raise RaiseUnwritableTrackerFileException(tracker_file_name, e.strerror)
def RaiseUnwritableTrackerFileException(tracker_file_name, error_str):
"""Raises an exception when unable to write the tracker file."""
raise CommandException(TRACKER_FILE_UNWRITABLE_EXCEPTION_TEXT %
(tracker_file_name, error_str))
| mit |
garnachod/SimpleDoc2Vec | doc2vecClass.py | 1 | 1464 | # classifier
from sklearn.linear_model import LogisticRegression
from gensim.models import Doc2Vec
import numpy
from GeneraVectores import GeneraVectores
from sklearn import svm
from NNet import NeuralNet
if __name__ == '__main__':
model = Doc2Vec.load('./imdb_dbow.d2v')
#print model["TRAIN_POS_8029"]
#exit()
dim = 100
train_arrays = numpy.zeros((25000, dim))
train_labels = numpy.zeros(25000)
generador = GeneraVectores(model)
Pos = generador.getVecsFromFile("data/trainpos.txt")
print "generados vectores Pos"
Neg = generador.getVecsFromFile("data/trainneg.txt")
print "generados vectores Neg"
for i in range(12500):
train_arrays[i] = Pos[i]
train_arrays[12500 + i] = Neg[i]
train_labels[i] = 1
train_labels[12500 + i] = 0
test_arrays = numpy.zeros((25000, dim))
test_labels = numpy.zeros(25000)
Pos = generador.getVecsFromFile("data/testpos.txt")
print "generados vectores Pos TEST"
Neg = generador.getVecsFromFile("data/testneg.txt")
print "generados vectores Neg TEST"
for i in range(12500):
test_arrays[i] = Pos[i]
test_arrays[12500 + i] = Neg[i]
test_labels[i] = 1
test_labels[12500 + i] = 0
classifier = LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, penalty='l2', random_state=None, tol=0.0001)
classifier.fit(train_arrays, train_labels)
print "Regresion logistica"
print classifier.score(test_arrays, test_labels)
| gpl-2.0 |
yashwanth686007/TizenRTOS | external/iotivity/iotivity_1.2-rel/extlibs/gtest/gtest-1.7.0/test/gtest_xml_outfiles_test.py | 2526 | 5340 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "keith.ray@gmail.com (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO(wan@google.com): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
| apache-2.0 |
g-k/servo | tests/wpt/harness/wptrunner/environment.py | 39 | 8465 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os
import multiprocessing
import signal
import socket
import sys
import time
from mozlog.structured import get_default_logger, handlers
from wptlogging import LogLevelRewriter
here = os.path.split(__file__)[0]
serve = None
sslutils = None
hostnames = ["web-platform.test",
"www.web-platform.test",
"www1.web-platform.test",
"www2.web-platform.test",
"xn--n8j6ds53lwwkrqhv28a.web-platform.test",
"xn--lve-6lad.web-platform.test"]
def do_delayed_imports(logger, test_paths):
global serve, sslutils
serve_root = serve_path(test_paths)
sys.path.insert(0, serve_root)
failed = []
try:
from tools.serve import serve
except ImportError:
failed.append("serve")
try:
import sslutils
except ImportError:
raise
failed.append("sslutils")
if failed:
logger.critical(
"Failed to import %s. Ensure that tests path %s contains web-platform-tests" %
(", ".join(failed), serve_root))
sys.exit(1)
def serve_path(test_paths):
return test_paths["/"]["tests_path"]
def get_ssl_kwargs(**kwargs):
if kwargs["ssl_type"] == "openssl":
args = {"openssl_binary": kwargs["openssl_binary"]}
elif kwargs["ssl_type"] == "pregenerated":
args = {"host_key_path": kwargs["host_key_path"],
"host_cert_path": kwargs["host_cert_path"],
"ca_cert_path": kwargs["ca_cert_path"]}
else:
args = {}
return args
def ssl_env(logger, **kwargs):
ssl_env_cls = sslutils.environments[kwargs["ssl_type"]]
return ssl_env_cls(logger, **get_ssl_kwargs(**kwargs))
class TestEnvironmentError(Exception):
pass
class StaticHandler(object):
def __init__(self, path, format_args, content_type, **headers):
with open(path) as f:
self.data = f.read() % format_args
self.resp_headers = [("Content-Type", content_type)]
for k, v in headers.iteritems():
resp_headers.append((k.replace("_", "-"), v))
self.handler = serve.handlers.handler(self.handle_request)
def handle_request(self, request, response):
return self.resp_headers, self.data
def __call__(self, request, response):
rv = self.handler(request, response)
return rv
class TestEnvironment(object):
def __init__(self, test_paths, ssl_env, pause_after_test, debug_info, options):
"""Context manager that owns the test environment i.e. the http and
websockets servers"""
self.test_paths = test_paths
self.ssl_env = ssl_env
self.server = None
self.config = None
self.external_config = None
self.pause_after_test = pause_after_test
self.test_server_port = options.pop("test_server_port", True)
self.debug_info = debug_info
self.options = options if options is not None else {}
self.cache_manager = multiprocessing.Manager()
self.routes = self.get_routes()
def __enter__(self):
self.ssl_env.__enter__()
self.cache_manager.__enter__()
self.setup_server_logging()
self.config = self.load_config()
serve.set_computed_defaults(self.config)
self.external_config, self.servers = serve.start(self.config, self.ssl_env,
self.routes)
if self.options.get("supports_debugger") and self.debug_info and self.debug_info.interactive:
self.ignore_interrupts()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.process_interrupts()
self.cache_manager.__exit__(exc_type, exc_val, exc_tb)
self.ssl_env.__exit__(exc_type, exc_val, exc_tb)
for scheme, servers in self.servers.iteritems():
for port, server in servers:
server.kill()
def ignore_interrupts(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
def process_interrupts(self):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def load_config(self):
default_config_path = os.path.join(serve_path(self.test_paths), "config.default.json")
local_config_path = os.path.join(here, "config.json")
with open(default_config_path) as f:
default_config = json.load(f)
with open(local_config_path) as f:
data = f.read()
local_config = json.loads(data % self.options)
#TODO: allow non-default configuration for ssl
local_config["external_host"] = self.options.get("external_host", None)
local_config["ssl"]["encrypt_after_connect"] = self.options.get("encrypt_after_connect", False)
config = serve.merge_json(default_config, local_config)
config["doc_root"] = serve_path(self.test_paths)
if not self.ssl_env.ssl_enabled:
config["ports"]["https"] = [None]
host = self.options.get("certificate_domain", config["host"])
hosts = [host]
hosts.extend("%s.%s" % (item[0], host) for item in serve.get_subdomains(host).values())
key_file, certificate = self.ssl_env.host_cert_path(hosts)
config["key_file"] = key_file
config["certificate"] = certificate
return config
def setup_server_logging(self):
server_logger = get_default_logger(component="wptserve")
assert server_logger is not None
log_filter = handlers.LogLevelFilter(lambda x:x, "info")
# Downgrade errors to warnings for the server
log_filter = LogLevelRewriter(log_filter, ["error"], "warning")
server_logger.component_filter = log_filter
try:
#Set as the default logger for wptserve
serve.set_logger(server_logger)
serve.logger = server_logger
except Exception:
# This happens if logging has already been set up for wptserve
pass
def get_routes(self):
routes = serve.default_routes()
for path, format_args, content_type, route in [
("testharness_runner.html", {}, "text/html", "/testharness_runner.html"),
(self.options.get("testharnessreport", "testharnessreport.js"),
{"output": self.pause_after_test}, "text/javascript",
"/resources/testharnessreport.js")]:
handler = StaticHandler(os.path.join(here, path), format_args, content_type)
routes.insert(0, (b"GET", str(route), handler))
for url, paths in self.test_paths.iteritems():
if url == "/":
continue
path = paths["tests_path"]
url = "/%s/" % url.strip("/")
for (method,
suffix,
handler_cls) in [(b"*",
b"*.py",
serve.handlers.PythonScriptHandler),
(b"GET",
"*.asis",
serve.handlers.AsIsHandler),
(b"GET",
"*",
serve.handlers.FileHandler)]:
route = (method, b"%s%s" % (str(url), str(suffix)), handler_cls(path, url_base=url))
routes.insert(-3, route)
if "/" not in self.test_paths:
routes = routes[:-3]
return routes
def ensure_started(self):
# Pause for a while to ensure that the server has a chance to start
time.sleep(2)
for scheme, servers in self.servers.iteritems():
for port, server in servers:
if self.test_server_port:
s = socket.socket()
try:
s.connect((self.config["host"], port))
except socket.error:
raise EnvironmentError(
"%s server on port %d failed to start" % (scheme, port))
finally:
s.close()
if not server.is_alive():
raise EnvironmentError("%s server on port %d failed to start" % (scheme, port))
| mpl-2.0 |
dqnykamp/sympy | sympy/printing/tests/test_latex.py | 6 | 54235 | from sympy import (
Abs, Chi, Ci, CosineTransform, Dict, Ei, Eq, FallingFactorial, FiniteSet,
Float, FourierTransform, Function, IndexedBase, Integral, Interval,
InverseCosineTransform, InverseFourierTransform,
InverseLaplaceTransform, InverseMellinTransform, InverseSineTransform,
Lambda, LaplaceTransform, Limit, Matrix, Max, MellinTransform, Min, Mul,
Order, Piecewise, Poly, ring, field, ZZ, Pow, Product, Range, Rational,
RisingFactorial, RootOf, RootSum, S, Shi, Si, SineTransform, Subs,
Sum, Symbol, ImageSet, Tuple, Union, Ynm, Znm, arg, asin,
assoc_laguerre, assoc_legendre, binomial, catalan, ceiling, Complement,
chebyshevt, chebyshevu, conjugate, cot, coth, diff, dirichlet_eta,
exp, expint, factorial, factorial2, floor, gamma, gegenbauer, hermite,
hyper, im, im, jacobi, laguerre, legendre, lerchphi, log, lowergamma,
meijerg, oo, polar_lift, polylog, re, re, root, sin, sqrt, symbols,
uppergamma, zeta, subfactorial, totient, elliptic_k, elliptic_f,
elliptic_e, elliptic_pi, cos, tan, Wild, true, false, Equivalent, Not,
Contains, divisor_sigma)
from sympy.abc import mu, tau
from sympy.printing.latex import latex, translate
from sympy.utilities.pytest import XFAIL, raises
from sympy.functions import DiracDelta, Heaviside, KroneckerDelta, LeviCivita
from sympy.logic import Implies
from sympy.logic.boolalg import And, Or, Xor
from sympy.core.trace import Tr
x, y, z, t, a, b = symbols('x y z t a b')
k, m, n = symbols('k m n', integer=True)
def test_printmethod():
class R(Abs):
def _latex(self, printer):
return "foo(%s)" % printer._print(self.args[0])
assert latex(R(x)) == "foo(x)"
class R(Abs):
def _latex(self, printer):
return "foo"
assert latex(R(x)) == "foo"
def test_latex_basic():
assert latex(1 + x) == "x + 1"
assert latex(x**2) == "x^{2}"
assert latex(x**(1 + x)) == "x^{x + 1}"
assert latex(x**3 + x + 1 + x**2) == "x^{3} + x^{2} + x + 1"
assert latex(2*x*y) == "2 x y"
assert latex(2*x*y, mul_symbol='dot') == r"2 \cdot x \cdot y"
assert latex(1/x) == r"\frac{1}{x}"
assert latex(1/x, fold_short_frac=True) == "1 / x"
assert latex(1/x**2) == r"\frac{1}{x^{2}}"
assert latex(x/2) == r"\frac{x}{2}"
assert latex(x/2, fold_short_frac=True) == "x / 2"
assert latex((x + y)/(2*x)) == r"\frac{x + y}{2 x}"
assert latex((x + y)/(2*x), fold_short_frac=True) == \
r"\left(x + y\right) / 2 x"
assert latex((x + y)/(2*x), long_frac_ratio=0) == \
r"\frac{1}{2 x} \left(x + y\right)"
assert latex((x + y)/x) == r"\frac{1}{x} \left(x + y\right)"
assert latex((x + y)/x, long_frac_ratio=3) == r"\frac{x + y}{x}"
assert latex(2*Integral(x, x)/3) == r"\frac{2}{3} \int x\, dx"
assert latex(2*Integral(x, x)/3, fold_short_frac=True) == \
r"\left(2 \int x\, dx\right) / 3"
assert latex(sqrt(x)) == r"\sqrt{x}"
assert latex(x**Rational(1, 3)) == r"\sqrt[3]{x}"
assert latex(sqrt(x)**3) == r"x^{\frac{3}{2}}"
assert latex(sqrt(x), itex=True) == r"\sqrt{x}"
assert latex(x**Rational(1, 3), itex=True) == r"\root{3}{x}"
assert latex(sqrt(x)**3, itex=True) == r"x^{\frac{3}{2}}"
assert latex(x**Rational(3, 4)) == r"x^{\frac{3}{4}}"
assert latex(x**Rational(3, 4), fold_frac_powers=True) == "x^{3/4}"
assert latex((x + 1)**Rational(3, 4)) == \
r"\left(x + 1\right)^{\frac{3}{4}}"
assert latex((x + 1)**Rational(3, 4), fold_frac_powers=True) == \
r"\left(x + 1\right)^{3/4}"
assert latex(1.5e20*x) == r"1.5 \cdot 10^{20} x"
assert latex(1.5e20*x, mul_symbol='dot') == r"1.5 \cdot 10^{20} \cdot x"
assert latex(1.5e20*x, mul_symbol='times') == r"1.5 \times 10^{20} \times x"
assert latex(1/sin(x)) == r"\frac{1}{\sin{\left (x \right )}}"
assert latex(sin(x)**-1) == r"\frac{1}{\sin{\left (x \right )}}"
assert latex(sin(x)**Rational(3, 2)) == \
r"\sin^{\frac{3}{2}}{\left (x \right )}"
assert latex(sin(x)**Rational(3, 2), fold_frac_powers=True) == \
r"\sin^{3/2}{\left (x \right )}"
assert latex(~x) == r"\neg x"
assert latex(x & y) == r"x \wedge y"
assert latex(x & y & z) == r"x \wedge y \wedge z"
assert latex(x | y) == r"x \vee y"
assert latex(x | y | z) == r"x \vee y \vee z"
assert latex((x & y) | z) == r"z \vee \left(x \wedge y\right)"
assert latex(Implies(x, y)) == r"x \Rightarrow y"
assert latex(~(x >> ~y)) == r"x \not\Rightarrow \neg y"
assert latex(Implies(Or(x,y), z)) == r"\left(x \vee y\right) \Rightarrow z"
assert latex(Implies(z, Or(x,y))) == r"z \Rightarrow \left(x \vee y\right)"
assert latex(~x, symbol_names={x: "x_i"}) == r"\neg x_i"
assert latex(x & y, symbol_names={x: "x_i", y: "y_i"}) == \
r"x_i \wedge y_i"
assert latex(x & y & z, symbol_names={x: "x_i", y: "y_i", z: "z_i"}) == \
r"x_i \wedge y_i \wedge z_i"
assert latex(x | y, symbol_names={x: "x_i", y: "y_i"}) == r"x_i \vee y_i"
assert latex(x | y | z, symbol_names={x: "x_i", y: "y_i", z: "z_i"}) == \
r"x_i \vee y_i \vee z_i"
assert latex((x & y) | z, symbol_names={x: "x_i", y: "y_i", z: "z_i"}) == \
r"z_i \vee \left(x_i \wedge y_i\right)"
assert latex(Implies(x, y), symbol_names={x: "x_i", y: "y_i"}) == \
r"x_i \Rightarrow y_i"
def test_latex_builtins():
assert latex(True) == r"\mathrm{True}"
assert latex(False) == r"\mathrm{False}"
assert latex(None) == r"\mathrm{None}"
assert latex(true) == r"\mathrm{True}"
assert latex(false) == r'\mathrm{False}'
def test_latex_Float():
assert latex(Float(1.0e100)) == r"1.0 \cdot 10^{100}"
assert latex(Float(1.0e-100)) == r"1.0 \cdot 10^{-100}"
assert latex(Float(1.0e-100), mul_symbol="times") == r"1.0 \times 10^{-100}"
assert latex(1.0*oo) == r"\infty"
assert latex(-1.0*oo) == r"- \infty"
def test_latex_symbols():
Gamma, lmbda, rho = symbols('Gamma, lambda, rho')
mass, volume = symbols('mass, volume')
assert latex(Gamma + lmbda) == r"\Gamma + \lambda"
assert latex(Gamma * lmbda) == r"\Gamma \lambda"
assert latex(Symbol('q1')) == r"q_{1}"
assert latex(Symbol('q21')) == r"q_{21}"
assert latex(Symbol('epsilon0')) == r"\epsilon_{0}"
assert latex(Symbol('omega1')) == r"\omega_{1}"
assert latex(Symbol('91')) == r"91"
assert latex(Symbol('alpha_new')) == r"\alpha_{new}"
assert latex(Symbol('C^orig')) == r"C^{orig}"
assert latex(Symbol('x^alpha')) == r"x^{\alpha}"
assert latex(Symbol('beta^alpha')) == r"\beta^{\alpha}"
assert latex(Symbol('e^Alpha')) == r"e^{A}"
assert latex(Symbol('omega_alpha^beta')) == r"\omega^{\beta}_{\alpha}"
assert latex(Symbol('omega') ** Symbol('beta')) == r"\omega^{\beta}"
@XFAIL
def test_latex_symbols_failing():
rho, mass, volume = symbols('rho, mass, volume')
assert latex(
volume * rho == mass) == r"\rho \mathrm{volume} = \mathrm{mass}"
assert latex(volume / mass * rho == 1) == r"\rho \mathrm{volume} {\mathrm{mass}}^{(-1)} = 1"
assert latex(mass**3 * volume**3) == r"{\mathrm{mass}}^{3} \cdot {\mathrm{volume}}^{3}"
def test_latex_functions():
assert latex(exp(x)) == "e^{x}"
assert latex(exp(1) + exp(2)) == "e + e^{2}"
f = Function('f')
assert latex(f(x)) == r'f{\left (x \right )}'
assert latex(f) == r'f'
g = Function('g')
assert latex(g(x, y)) == r'g{\left (x,y \right )}'
assert latex(g) == r'g'
h = Function('h')
assert latex(h(x, y, z)) == r'h{\left (x,y,z \right )}'
assert latex(h) == r'h'
Li = Function('Li')
assert latex(Li) == r'\operatorname{Li}'
assert latex(Li(x)) == r'\operatorname{Li}{\left (x \right )}'
beta = Function('beta')
# not to be confused with the beta function
assert latex(beta(x)) == r"\beta{\left (x \right )}"
assert latex(beta) == r"\beta"
a1 = Function('a_1')
assert latex(a1) == r"\operatorname{a_{1}}"
assert latex(a1(x)) == r"\operatorname{a_{1}}{\left (x \right )}"
# issue 5868
omega1 = Function('omega1')
assert latex(omega1) == r"\omega_{1}"
assert latex(omega1(x)) == r"\omega_{1}{\left (x \right )}"
assert latex(sin(x)) == r"\sin{\left (x \right )}"
assert latex(sin(x), fold_func_brackets=True) == r"\sin {x}"
assert latex(sin(2*x**2), fold_func_brackets=True) == \
r"\sin {2 x^{2}}"
assert latex(sin(x**2), fold_func_brackets=True) == \
r"\sin {x^{2}}"
assert latex(asin(x)**2) == r"\operatorname{asin}^{2}{\left (x \right )}"
assert latex(asin(x)**2, inv_trig_style="full") == \
r"\arcsin^{2}{\left (x \right )}"
assert latex(asin(x)**2, inv_trig_style="power") == \
r"\sin^{-1}{\left (x \right )}^{2}"
assert latex(asin(x**2), inv_trig_style="power",
fold_func_brackets=True) == \
r"\sin^{-1} {x^{2}}"
assert latex(factorial(k)) == r"k!"
assert latex(factorial(-k)) == r"\left(- k\right)!"
assert latex(subfactorial(k)) == r"!k"
assert latex(subfactorial(-k)) == r"!\left(- k\right)"
assert latex(factorial2(k)) == r"k!!"
assert latex(factorial2(-k)) == r"\left(- k\right)!!"
assert latex(binomial(2, k)) == r"{\binom{2}{k}}"
assert latex(
FallingFactorial(3, k)) == r"{\left(3\right)}_{\left(k\right)}"
assert latex(RisingFactorial(3, k)) == r"{\left(3\right)}^{\left(k\right)}"
assert latex(floor(x)) == r"\lfloor{x}\rfloor"
assert latex(ceiling(x)) == r"\lceil{x}\rceil"
assert latex(Min(x, 2, x**3)) == r"\min\left(2, x, x^{3}\right)"
assert latex(Min(x, y)**2) == r"\min\left(x, y\right)^{2}"
assert latex(Max(x, 2, x**3)) == r"\max\left(2, x, x^{3}\right)"
assert latex(Max(x, y)**2) == r"\max\left(x, y\right)^{2}"
assert latex(Abs(x)) == r"\left\lvert{x}\right\rvert"
assert latex(re(x)) == r"\Re{x}"
assert latex(re(x + y)) == r"\Re{x} + \Re{y}"
assert latex(im(x)) == r"\Im{x}"
assert latex(conjugate(x)) == r"\overline{x}"
assert latex(gamma(x)) == r"\Gamma{\left(x \right)}"
w = Wild('w')
assert latex(gamma(w)) == r"\Gamma{\left(w \right)}"
assert latex(Order(x)) == r"\mathcal{O}\left(x\right)"
assert latex(Order(x, x)) == r"\mathcal{O}\left(x\right)"
assert latex(Order(x, (x, 0))) == r"\mathcal{O}\left(x\right)"
assert latex(Order(x, (x, oo))) == r"\mathcal{O}\left(x; x\rightarrow\infty\right)"
assert latex(Order(x, x, y)) == r"\mathcal{O}\left(x; \left ( x, \quad y\right )\rightarrow\left ( 0, \quad 0\right )\right)"
assert latex(Order(x, x, y)) == r"\mathcal{O}\left(x; \left ( x, \quad y\right )\rightarrow\left ( 0, \quad 0\right )\right)"
assert latex(Order(x, (x, oo), (y, oo))) == r"\mathcal{O}\left(x; \left ( x, \quad y\right )\rightarrow\left ( \infty, \quad \infty\right )\right)"
assert latex(lowergamma(x, y)) == r'\gamma\left(x, y\right)'
assert latex(uppergamma(x, y)) == r'\Gamma\left(x, y\right)'
assert latex(cot(x)) == r'\cot{\left (x \right )}'
assert latex(coth(x)) == r'\coth{\left (x \right )}'
assert latex(re(x)) == r'\Re{x}'
assert latex(im(x)) == r'\Im{x}'
assert latex(root(x, y)) == r'x^{\frac{1}{y}}'
assert latex(arg(x)) == r'\arg{\left (x \right )}'
assert latex(zeta(x)) == r'\zeta\left(x\right)'
assert latex(zeta(x)) == r"\zeta\left(x\right)"
assert latex(zeta(x)**2) == r"\zeta^{2}\left(x\right)"
assert latex(zeta(x, y)) == r"\zeta\left(x, y\right)"
assert latex(zeta(x, y)**2) == r"\zeta^{2}\left(x, y\right)"
assert latex(dirichlet_eta(x)) == r"\eta\left(x\right)"
assert latex(dirichlet_eta(x)**2) == r"\eta^{2}\left(x\right)"
assert latex(polylog(x, y)) == r"\operatorname{Li}_{x}\left(y\right)"
assert latex(
polylog(x, y)**2) == r"\operatorname{Li}_{x}^{2}\left(y\right)"
assert latex(lerchphi(x, y, n)) == r"\Phi\left(x, y, n\right)"
assert latex(lerchphi(x, y, n)**2) == r"\Phi^{2}\left(x, y, n\right)"
assert latex(elliptic_k(z)) == r"K\left(z\right)"
assert latex(elliptic_k(z)**2) == r"K^{2}\left(z\right)"
assert latex(elliptic_f(x, y)) == r"F\left(x\middle| y\right)"
assert latex(elliptic_f(x, y)**2) == r"F^{2}\left(x\middle| y\right)"
assert latex(elliptic_e(x, y)) == r"E\left(x\middle| y\right)"
assert latex(elliptic_e(x, y)**2) == r"E^{2}\left(x\middle| y\right)"
assert latex(elliptic_e(z)) == r"E\left(z\right)"
assert latex(elliptic_e(z)**2) == r"E^{2}\left(z\right)"
assert latex(elliptic_pi(x, y, z)) == r"\Pi\left(x; y\middle| z\right)"
assert latex(elliptic_pi(x, y, z)**2) == \
r"\Pi^{2}\left(x; y\middle| z\right)"
assert latex(elliptic_pi(x, y)) == r"\Pi\left(x\middle| y\right)"
assert latex(elliptic_pi(x, y)**2) == r"\Pi^{2}\left(x\middle| y\right)"
assert latex(Ei(x)) == r'\operatorname{Ei}{\left (x \right )}'
assert latex(Ei(x)**2) == r'\operatorname{Ei}^{2}{\left (x \right )}'
assert latex(expint(x, y)**2) == r'\operatorname{E}_{x}^{2}\left(y\right)'
assert latex(Shi(x)**2) == r'\operatorname{Shi}^{2}{\left (x \right )}'
assert latex(Si(x)**2) == r'\operatorname{Si}^{2}{\left (x \right )}'
assert latex(Ci(x)**2) == r'\operatorname{Ci}^{2}{\left (x \right )}'
assert latex(Chi(x)**2) == r'\operatorname{Chi}^{2}{\left (x \right )}'
assert latex(Chi(x)) == r'\operatorname{Chi}{\left (x \right )}'
assert latex(
jacobi(n, a, b, x)) == r'P_{n}^{\left(a,b\right)}\left(x\right)'
assert latex(jacobi(n, a, b, x)**2) == r'\left(P_{n}^{\left(a,b\right)}\left(x\right)\right)^{2}'
assert latex(
gegenbauer(n, a, x)) == r'C_{n}^{\left(a\right)}\left(x\right)'
assert latex(gegenbauer(n, a, x)**2) == r'\left(C_{n}^{\left(a\right)}\left(x\right)\right)^{2}'
assert latex(chebyshevt(n, x)) == r'T_{n}\left(x\right)'
assert latex(
chebyshevt(n, x)**2) == r'\left(T_{n}\left(x\right)\right)^{2}'
assert latex(chebyshevu(n, x)) == r'U_{n}\left(x\right)'
assert latex(
chebyshevu(n, x)**2) == r'\left(U_{n}\left(x\right)\right)^{2}'
assert latex(legendre(n, x)) == r'P_{n}\left(x\right)'
assert latex(legendre(n, x)**2) == r'\left(P_{n}\left(x\right)\right)^{2}'
assert latex(
assoc_legendre(n, a, x)) == r'P_{n}^{\left(a\right)}\left(x\right)'
assert latex(assoc_legendre(n, a, x)**2) == r'\left(P_{n}^{\left(a\right)}\left(x\right)\right)^{2}'
assert latex(laguerre(n, x)) == r'L_{n}\left(x\right)'
assert latex(laguerre(n, x)**2) == r'\left(L_{n}\left(x\right)\right)^{2}'
assert latex(
assoc_laguerre(n, a, x)) == r'L_{n}^{\left(a\right)}\left(x\right)'
assert latex(assoc_laguerre(n, a, x)**2) == r'\left(L_{n}^{\left(a\right)}\left(x\right)\right)^{2}'
assert latex(hermite(n, x)) == r'H_{n}\left(x\right)'
assert latex(hermite(n, x)**2) == r'\left(H_{n}\left(x\right)\right)^{2}'
theta = Symbol("theta", real=True)
phi = Symbol("phi", real=True)
assert latex(Ynm(n,m,theta,phi)) == r'Y_{n}^{m}\left(\theta,\phi\right)'
assert latex(Ynm(n, m, theta, phi)**3) == r'\left(Y_{n}^{m}\left(\theta,\phi\right)\right)^{3}'
assert latex(Znm(n,m,theta,phi)) == r'Z_{n}^{m}\left(\theta,\phi\right)'
assert latex(Znm(n, m, theta, phi)**3) == r'\left(Z_{n}^{m}\left(\theta,\phi\right)\right)^{3}'
# Test latex printing of function names with "_"
assert latex(
polar_lift(0)) == r"\operatorname{polar\_lift}{\left (0 \right )}"
assert latex(polar_lift(
0)**3) == r"\operatorname{polar\_lift}^{3}{\left (0 \right )}"
assert latex(totient(n)) == r'\phi\left( n \right)'
assert latex(divisor_sigma(x)) == r"\sigma\left(x\right)"
assert latex(divisor_sigma(x)**2) == r"\sigma^{2}\left(x\right)"
assert latex(divisor_sigma(x, y)) == r"\sigma_y\left(x\right)"
assert latex(divisor_sigma(x, y)**2) == r"\sigma^{2}_y\left(x\right)"
# some unknown function name should get rendered with \operatorname
fjlkd = Function('fjlkd')
assert latex(fjlkd(x)) == r'\operatorname{fjlkd}{\left (x \right )}'
# even when it is referred to without an argument
assert latex(fjlkd) == r'\operatorname{fjlkd}'
def test_hyper_printing():
from sympy import pi
from sympy.abc import x, z
assert latex(meijerg(Tuple(pi, pi, x), Tuple(1),
(0, 1), Tuple(1, 2, 3/pi), z)) == \
r'{G_{4, 5}^{2, 3}\left(\begin{matrix} \pi, \pi, x & 1 \\0, 1 & 1, 2, \frac{3}{\pi} \end{matrix} \middle| {z} \right)}'
assert latex(meijerg(Tuple(), Tuple(1), (0,), Tuple(), z)) == \
r'{G_{1, 1}^{1, 0}\left(\begin{matrix} & 1 \\0 & \end{matrix} \middle| {z} \right)}'
assert latex(hyper((x, 2), (3,), z)) == \
r'{{}_{2}F_{1}\left(\begin{matrix} x, 2 ' \
r'\\ 3 \end{matrix}\middle| {z} \right)}'
assert latex(hyper(Tuple(), Tuple(1), z)) == \
r'{{}_{0}F_{1}\left(\begin{matrix} ' \
r'\\ 1 \end{matrix}\middle| {z} \right)}'
def test_latex_bessel():
from sympy.functions.special.bessel import (besselj, bessely, besseli,
besselk, hankel1, hankel2, jn, yn)
from sympy.abc import z
assert latex(besselj(n, z**2)**k) == r'J^{k}_{n}\left(z^{2}\right)'
assert latex(bessely(n, z)) == r'Y_{n}\left(z\right)'
assert latex(besseli(n, z)) == r'I_{n}\left(z\right)'
assert latex(besselk(n, z)) == r'K_{n}\left(z\right)'
assert latex(hankel1(n, z**2)**2) == \
r'\left(H^{(1)}_{n}\left(z^{2}\right)\right)^{2}'
assert latex(hankel2(n, z)) == r'H^{(2)}_{n}\left(z\right)'
assert latex(jn(n, z)) == r'j_{n}\left(z\right)'
assert latex(yn(n, z)) == r'y_{n}\left(z\right)'
def test_latex_fresnel():
from sympy.functions.special.error_functions import (fresnels, fresnelc)
from sympy.abc import z
assert latex(fresnels(z)) == r'S\left(z\right)'
assert latex(fresnelc(z)) == r'C\left(z\right)'
assert latex(fresnels(z)**2) == r'S^{2}\left(z\right)'
assert latex(fresnelc(z)**2) == r'C^{2}\left(z\right)'
def test_latex_brackets():
assert latex((-1)**x) == r"\left(-1\right)^{x}"
def test_latex_indexed():
Psi_symbol = Symbol('Psi_0', complex=True, real=False)
Psi_indexed = IndexedBase(Symbol('Psi', complex=True, real=False))
symbol_latex = latex(Psi_symbol * conjugate(Psi_symbol))
indexed_latex = latex(Psi_indexed[0] * conjugate(Psi_indexed[0]))
# \\overline{\\Psi_{0}} \\Psi_{0} vs. \\Psi_{0} \\overline{\\Psi_{0}}
assert symbol_latex.split() == indexed_latex.split() \
or symbol_latex.split() == indexed_latex.split()[::-1]
# Symbol('gamma') gives r'\gamma'
assert latex(IndexedBase('gamma')) == r'\gamma'
assert latex(IndexedBase('a b')) == 'a b'
assert latex(IndexedBase('a_b')) == 'a_{b}'
def test_latex_derivatives():
# regular "d" for ordinary derivatives
assert latex(diff(x**3, x, evaluate=False)) == \
r"\frac{d}{d x} x^{3}"
assert latex(diff(sin(x) + x**2, x, evaluate=False)) == \
r"\frac{d}{d x}\left(x^{2} + \sin{\left (x \right )}\right)"
assert latex(diff(diff(sin(x) + x**2, x, evaluate=False), evaluate=False)) == \
r"\frac{d^{2}}{d x^{2}} \left(x^{2} + \sin{\left (x \right )}\right)"
assert latex(diff(diff(diff(sin(x) + x**2, x, evaluate=False), evaluate=False), evaluate=False)) == \
r"\frac{d^{3}}{d x^{3}} \left(x^{2} + \sin{\left (x \right )}\right)"
# \partial for partial derivatives
assert latex(diff(sin(x * y), x, evaluate=False)) == \
r"\frac{\partial}{\partial x} \sin{\left (x y \right )}"
assert latex(diff(sin(x * y) + x**2, x, evaluate=False)) == \
r"\frac{\partial}{\partial x}\left(x^{2} + \sin{\left (x y \right )}\right)"
assert latex(diff(diff(sin(x*y) + x**2, x, evaluate=False), x, evaluate=False)) == \
r"\frac{\partial^{2}}{\partial x^{2}} \left(x^{2} + \sin{\left (x y \right )}\right)"
assert latex(diff(diff(diff(sin(x*y) + x**2, x, evaluate=False), x, evaluate=False), x, evaluate=False)) == \
r"\frac{\partial^{3}}{\partial x^{3}} \left(x^{2} + \sin{\left (x y \right )}\right)"
# mixed partial derivatives
f = Function("f")
assert latex(diff(diff(f(x,y), x, evaluate=False), y, evaluate=False)) == \
r"\frac{\partial^{2}}{\partial x\partial y} " + latex(f(x,y))
assert latex(diff(diff(diff(f(x,y), x, evaluate=False), x, evaluate=False), y, evaluate=False)) == \
r"\frac{\partial^{3}}{\partial x^{2}\partial y} " + latex(f(x,y))
# use ordinary d when one of the variables has been integrated out
assert latex(diff(Integral(exp(-x * y), (x, 0, oo)), y, evaluate=False)) == \
r"\frac{d}{d y} \int_{0}^{\infty} e^{- x y}\, dx"
def test_latex_subs():
assert latex(Subs(x*y, (
x, y), (1, 2))) == r'\left. x y \right|_{\substack{ x=1\\ y=2 }}'
def test_latex_integrals():
assert latex(Integral(log(x), x)) == r"\int \log{\left (x \right )}\, dx"
assert latex(Integral(x**2, (x, 0, 1))) == r"\int_{0}^{1} x^{2}\, dx"
assert latex(Integral(x**2, (x, 10, 20))) == r"\int_{10}^{20} x^{2}\, dx"
assert latex(Integral(
y*x**2, (x, 0, 1), y)) == r"\int\int_{0}^{1} x^{2} y\, dx\, dy"
assert latex(Integral(y*x**2, (x, 0, 1), y), mode='equation*') \
== r"\begin{equation*}\int\int\limits_{0}^{1} x^{2} y\, dx\, dy\end{equation*}"
assert latex(Integral(y*x**2, (x, 0, 1), y), mode='equation*', itex=True) \
== r"$$\int\int_{0}^{1} x^{2} y\, dx\, dy$$"
assert latex(Integral(x, (x, 0))) == r"\int^{0} x\, dx"
assert latex(Integral(x*y, x, y)) == r"\iint x y\, dx\, dy"
assert latex(Integral(x*y*z, x, y, z)) == r"\iiint x y z\, dx\, dy\, dz"
assert latex(Integral(x*y*z*t, x, y, z, t)) == \
r"\iiiint t x y z\, dx\, dy\, dz\, dt"
assert latex(Integral(x, x, x, x, x, x, x)) == \
r"\int\int\int\int\int\int x\, dx\, dx\, dx\, dx\, dx\, dx"
assert latex(Integral(x, x, y, (z, 0, 1))) == \
r"\int_{0}^{1}\int\int x\, dx\, dy\, dz"
def test_latex_sets():
for s in (frozenset, set):
assert latex(s([x*y, x**2])) == r"\left\{x^{2}, x y\right\}"
assert latex(s(range(1, 6))) == r"\left\{1, 2, 3, 4, 5\right\}"
assert latex(s(range(1, 13))) == \
r"\left\{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12\right\}"
s = FiniteSet
assert latex(s(*[x*y, x**2])) == r"\left\{x^{2}, x y\right\}"
assert latex(s(*range(1, 6))) == r"\left\{1, 2, 3, 4, 5\right\}"
assert latex(s(*range(1, 13))) == \
r"\left\{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12\right\}"
def test_latex_Range():
assert latex(Range(1, 51)) == \
r'\left\{1, 2, \ldots, 50\right\}'
assert latex(Range(1, 4)) == r'\left\{1, 2, 3\right\}'
def test_latex_intervals():
a = Symbol('a', real=True)
assert latex(Interval(0, 0)) == r"\left\{0\right\}"
assert latex(Interval(0, a)) == r"\left[0, a\right]"
assert latex(Interval(0, a, False, False)) == r"\left[0, a\right]"
assert latex(Interval(0, a, True, False)) == r"\left(0, a\right]"
assert latex(Interval(0, a, False, True)) == r"\left[0, a\right)"
assert latex(Interval(0, a, True, True)) == r"\left(0, a\right)"
def test_latex_emptyset():
assert latex(S.EmptySet) == r"\emptyset"
def test_latex_union():
assert latex(Union(Interval(0, 1), Interval(2, 3))) == \
r"\left[0, 1\right] \cup \left[2, 3\right]"
assert latex(Union(Interval(1, 1), Interval(2, 2), Interval(3, 4))) == \
r"\left\{1, 2\right\} \cup \left[3, 4\right]"
def test_latex_Complement():
assert latex(Complement(S.Reals, S.Naturals)) == r"\mathbb{R} \setminus \mathbb{N}"
def test_latex_productset():
line = Interval(0, 1)
bigline = Interval(0, 10)
fset = FiniteSet(1, 2, 3)
assert latex(line**2) == r"%s^2" % latex(line)
assert latex(line * bigline * fset) == r"%s \times %s \times %s" % (
latex(line), latex(bigline), latex(fset))
def test_latex_Naturals():
assert latex(S.Naturals) == r"\mathbb{N}"
assert latex(S.Integers) == r"\mathbb{Z}"
def test_latex_ImageSet():
x = Symbol('x')
assert latex(ImageSet(Lambda(x, x**2), S.Naturals)) == \
r"\left\{x^{2}\; |\; x \in \mathbb{N}\right\}"
def test_latex_Contains():
x = Symbol('x')
assert latex(Contains(x, S.Naturals)) == r"x \in \mathbb{N}"
def test_latex_sum():
assert latex(Sum(x*y**2, (x, -2, 2), (y, -5, 5))) == \
r"\sum_{\substack{-2 \leq x \leq 2\\-5 \leq y \leq 5}} x y^{2}"
assert latex(Sum(x**2, (x, -2, 2))) == \
r"\sum_{x=-2}^{2} x^{2}"
assert latex(Sum(x**2 + y, (x, -2, 2))) == \
r"\sum_{x=-2}^{2} \left(x^{2} + y\right)"
def test_latex_product():
assert latex(Product(x*y**2, (x, -2, 2), (y, -5, 5))) == \
r"\prod_{\substack{-2 \leq x \leq 2\\-5 \leq y \leq 5}} x y^{2}"
assert latex(Product(x**2, (x, -2, 2))) == \
r"\prod_{x=-2}^{2} x^{2}"
assert latex(Product(x**2 + y, (x, -2, 2))) == \
r"\prod_{x=-2}^{2} \left(x^{2} + y\right)"
def test_latex_limits():
assert latex(Limit(x, x, oo)) == r"\lim_{x \to \infty} x"
# issue 8175
f = Function('f')
assert latex(Limit(f(x), x, 0)) == r"\lim_{x \to 0^+} f{\left (x \right )}"
assert latex(Limit(f(x), x, 0, "-")) == r"\lim_{x \to 0^-} f{\left (x \right )}"
def test_issue_3568():
beta = Symbol(r'\beta')
y = beta + x
assert latex(y) in [r'\beta + x', r'x + \beta']
beta = Symbol(r'beta')
y = beta + x
assert latex(y) in [r'\beta + x', r'x + \beta']
def test_latex():
assert latex((2*tau)**Rational(7, 2)) == "8 \\sqrt{2} \\tau^{\\frac{7}{2}}"
assert latex((2*mu)**Rational(7, 2), mode='equation*') == \
"\\begin{equation*}8 \\sqrt{2} \\mu^{\\frac{7}{2}}\\end{equation*}"
assert latex((2*mu)**Rational(7, 2), mode='equation', itex=True) == \
"$$8 \\sqrt{2} \\mu^{\\frac{7}{2}}$$"
assert latex([2/x, y]) == r"\left [ \frac{2}{x}, \quad y\right ]"
def test_latex_dict():
d = {Rational(1): 1, x**2: 2, x: 3, x**3: 4}
assert latex(d) == r'\left \{ 1 : 1, \quad x : 3, \quad x^{2} : 2, \quad x^{3} : 4\right \}'
D = Dict(d)
assert latex(D) == r'\left \{ 1 : 1, \quad x : 3, \quad x^{2} : 2, \quad x^{3} : 4\right \}'
def test_latex_list():
l = [Symbol('omega1'), Symbol('a'), Symbol('alpha')]
assert latex(l) == r'\left [ \omega_{1}, \quad a, \quad \alpha\right ]'
def test_latex_rational():
#tests issue 3973
assert latex(-Rational(1, 2)) == "- \\frac{1}{2}"
assert latex(Rational(-1, 2)) == "- \\frac{1}{2}"
assert latex(Rational(1, -2)) == "- \\frac{1}{2}"
assert latex(-Rational(-1, 2)) == "\\frac{1}{2}"
assert latex(-Rational(1, 2)*x) == "- \\frac{x}{2}"
assert latex(-Rational(1, 2)*x + Rational(-2, 3)*y) == \
"- \\frac{x}{2} - \\frac{2 y}{3}"
def test_latex_inverse():
#tests issue 4129
assert latex(1/x) == "\\frac{1}{x}"
assert latex(1/(x + y)) == "\\frac{1}{x + y}"
def test_latex_DiracDelta():
assert latex(DiracDelta(x)) == r"\delta\left(x\right)"
assert latex(DiracDelta(x)**2) == r"\left(\delta\left(x\right)\right)^{2}"
assert latex(DiracDelta(x, 0)) == r"\delta\left(x\right)"
assert latex(DiracDelta(x, 5)) == \
r"\delta^{\left( 5 \right)}\left( x \right)"
assert latex(DiracDelta(x, 5)**2) == \
r"\left(\delta^{\left( 5 \right)}\left( x \right)\right)^{2}"
def test_latex_Heaviside():
assert latex(Heaviside(x)) == r"\theta\left(x\right)"
assert latex(Heaviside(x)**2) == r"\left(\theta\left(x\right)\right)^{2}"
def test_latex_KroneckerDelta():
assert latex(KroneckerDelta(x, y)) == r"\delta_{x y}"
assert latex(KroneckerDelta(x, y + 1)) == r"\delta_{x, y + 1}"
# issue 6578
assert latex(KroneckerDelta(x + 1, y)) == r"\delta_{y, x + 1}"
def test_latex_LeviCivita():
assert latex(LeviCivita(x, y, z)) == r"\varepsilon_{x y z}"
assert latex(LeviCivita(x, y, z)**2) == r"\left(\varepsilon_{x y z}\right)^{2}"
assert latex(LeviCivita(x, y, z + 1)) == r"\varepsilon_{x, y, z + 1}"
assert latex(LeviCivita(x, y + 1, z)) == r"\varepsilon_{x, y + 1, z}"
assert latex(LeviCivita(x + 1, y, z)) == r"\varepsilon_{x + 1, y, z}"
def test_mode():
expr = x + y
assert latex(expr) == 'x + y'
assert latex(expr, mode='plain') == 'x + y'
assert latex(expr, mode='inline') == '$x + y$'
assert latex(
expr, mode='equation*') == '\\begin{equation*}x + y\\end{equation*}'
assert latex(
expr, mode='equation') == '\\begin{equation}x + y\\end{equation}'
def test_latex_Piecewise():
p = Piecewise((x, x < 1), (x**2, True))
assert latex(p) == "\\begin{cases} x & \\text{for}\: x < 1 \\\\x^{2} &" \
" \\text{otherwise} \\end{cases}"
assert latex(p, itex=True) == "\\begin{cases} x & \\text{for}\: x \\lt 1 \\\\x^{2} &" \
" \\text{otherwise} \\end{cases}"
p = Piecewise((x, x < 0), (0, x >= 0))
assert latex(p) == "\\begin{cases} x & \\text{for}\\: x < 0 \\\\0 &" \
" \\text{for}\\: x \\geq 0 \\end{cases}"
A, B = symbols("A B", commutative=False)
p = Piecewise((A**2, Eq(A, B)), (A*B, True))
s = r"\begin{cases} A^{2} & \text{for}\: A = B \\A B & \text{otherwise} \end{cases}"
assert latex(p) == s
assert latex(A*p) == r"A %s" % s
assert latex(p*A) == r"\left(%s\right) A" % s
def test_latex_Matrix():
M = Matrix([[1 + x, y], [y, x - 1]])
assert latex(M) == \
r'\left[\begin{matrix}x + 1 & y\\y & x - 1\end{matrix}\right]'
assert latex(M, mode='inline') == \
r'$\left[\begin{smallmatrix}x + 1 & y\\' \
r'y & x - 1\end{smallmatrix}\right]$'
assert latex(M, mat_str='array') == \
r'\left[\begin{array}{cc}x + 1 & y\\y & x - 1\end{array}\right]'
assert latex(M, mat_str='bmatrix') == \
r'\left[\begin{bmatrix}x + 1 & y\\y & x - 1\end{bmatrix}\right]'
assert latex(M, mat_delim=None, mat_str='bmatrix') == \
r'\begin{bmatrix}x + 1 & y\\y & x - 1\end{bmatrix}'
M2 = Matrix(1, 11, range(11))
assert latex(M2) == \
r'\left[\begin{array}{ccccccccccc}' \
r'0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10\end{array}\right]'
def test_latex_matrix_with_functions():
t = symbols('t')
theta1 = symbols('theta1', cls=Function)
M = Matrix([[sin(theta1(t)), cos(theta1(t))],
[cos(theta1(t).diff(t)), sin(theta1(t).diff(t))]])
expected = (r'\left[\begin{matrix}\sin{\left '
r'(\theta_{1}{\left (t \right )} \right )} & '
r'\cos{\left (\theta_{1}{\left (t \right )} \right '
r')}\\\cos{\left (\frac{d}{d t} \theta_{1}{\left (t '
r'\right )} \right )} & \sin{\left (\frac{d}{d t} '
r'\theta_{1}{\left (t \right )} \right '
r')}\end{matrix}\right]')
assert latex(M) == expected
def test_latex_mul_symbol():
assert latex(4*4**x, mul_symbol='times') == "4 \\times 4^{x}"
assert latex(4*4**x, mul_symbol='dot') == "4 \\cdot 4^{x}"
assert latex(4*4**x, mul_symbol='ldot') == "4 \,.\, 4^{x}"
assert latex(4*x, mul_symbol='times') == "4 \\times x"
assert latex(4*x, mul_symbol='dot') == "4 \\cdot x"
assert latex(4*x, mul_symbol='ldot') == "4 \,.\, x"
def test_latex_issue_4381():
y = 4*4**log(2)
assert latex(y) == r'4 \cdot 4^{\log{\left (2 \right )}}'
assert latex(1/y) == r'\frac{1}{4 \cdot 4^{\log{\left (2 \right )}}}'
def test_latex_issue_4576():
assert latex(Symbol("beta_13_2")) == r"\beta_{13 2}"
assert latex(Symbol("beta_132_20")) == r"\beta_{132 20}"
assert latex(Symbol("beta_13")) == r"\beta_{13}"
assert latex(Symbol("x_a_b")) == r"x_{a b}"
assert latex(Symbol("x_1_2_3")) == r"x_{1 2 3}"
assert latex(Symbol("x_a_b1")) == r"x_{a b1}"
assert latex(Symbol("x_a_1")) == r"x_{a 1}"
assert latex(Symbol("x_1_a")) == r"x_{1 a}"
assert latex(Symbol("x_1^aa")) == r"x^{aa}_{1}"
assert latex(Symbol("x_1__aa")) == r"x^{aa}_{1}"
assert latex(Symbol("x_11^a")) == r"x^{a}_{11}"
assert latex(Symbol("x_11__a")) == r"x^{a}_{11}"
assert latex(Symbol("x_a_a_a_a")) == r"x_{a a a a}"
assert latex(Symbol("x_a_a^a^a")) == r"x^{a a}_{a a}"
assert latex(Symbol("x_a_a__a__a")) == r"x^{a a}_{a a}"
assert latex(Symbol("alpha_11")) == r"\alpha_{11}"
assert latex(Symbol("alpha_11_11")) == r"\alpha_{11 11}"
assert latex(Symbol("alpha_alpha")) == r"\alpha_{\alpha}"
assert latex(Symbol("alpha^aleph")) == r"\alpha^{\aleph}"
assert latex(Symbol("alpha__aleph")) == r"\alpha^{\aleph}"
def test_latex_pow_fraction():
x = Symbol('x')
# Testing exp
assert 'e^{-x}' in latex(exp(-x)/2).replace(' ', '') # Remove Whitespace
# Testing just e^{-x} in case future changes alter behavior of muls or fracs
# In particular current output is \frac{1}{2}e^{- x} but perhaps this will
# change to \frac{e^{-x}}{2}
# Testing general, non-exp, power
assert '3^{-x}' in latex(3**-x/2).replace(' ', '')
def test_noncommutative():
A, B, C = symbols('A,B,C', commutative=False)
assert latex(A*B*C**-1) == "A B C^{-1}"
assert latex(C**-1*A*B) == "C^{-1} A B"
assert latex(A*C**-1*B) == "A C^{-1} B"
def test_latex_order():
expr = x**3 + x**2*y + 3*x*y**3 + y**4
assert latex(expr, order='lex') == "x^{3} + x^{2} y + 3 x y^{3} + y^{4}"
assert latex(
expr, order='rev-lex') == "y^{4} + 3 x y^{3} + x^{2} y + x^{3}"
def test_latex_Lambda():
assert latex(Lambda(x, x + 1)) == \
r"\left( x \mapsto x + 1 \right)"
assert latex(Lambda((x, y), x + 1)) == \
r"\left( \left ( x, \quad y\right ) \mapsto x + 1 \right)"
def test_latex_PolyElement():
Ruv, u,v = ring("u,v", ZZ)
Rxyz, x,y,z = ring("x,y,z", Ruv)
assert latex(x - x) == r"0"
assert latex(x - 1) == r"x - 1"
assert latex(x + 1) == r"x + 1"
assert latex((u**2 + 3*u*v + 1)*x**2*y + u + 1) == r"\left({u}^{2} + 3 u v + 1\right) {x}^{2} y + u + 1"
assert latex((u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x) == r"\left({u}^{2} + 3 u v + 1\right) {x}^{2} y + \left(u + 1\right) x"
assert latex((u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x + 1) == r"\left({u}^{2} + 3 u v + 1\right) {x}^{2} y + \left(u + 1\right) x + 1"
assert latex((-u**2 + 3*u*v - 1)*x**2*y - (u + 1)*x - 1) == r"-\left({u}^{2} - 3 u v + 1\right) {x}^{2} y - \left(u + 1\right) x - 1"
assert latex(-(v**2 + v + 1)*x + 3*u*v + 1) == r"-\left({v}^{2} + v + 1\right) x + 3 u v + 1"
assert latex(-(v**2 + v + 1)*x - 3*u*v + 1) == r"-\left({v}^{2} + v + 1\right) x - 3 u v + 1"
def test_latex_FracElement():
Fuv, u,v = field("u,v", ZZ)
Fxyzt, x,y,z,t = field("x,y,z,t", Fuv)
assert latex(x - x) == r"0"
assert latex(x - 1) == r"x - 1"
assert latex(x + 1) == r"x + 1"
assert latex(x/3) == r"\frac{x}{3}"
assert latex(x/z) == r"\frac{x}{z}"
assert latex(x*y/z) == r"\frac{x y}{z}"
assert latex(x/(z*t)) == r"\frac{x}{z t}"
assert latex(x*y/(z*t)) == r"\frac{x y}{z t}"
assert latex((x - 1)/y) == r"\frac{x - 1}{y}"
assert latex((x + 1)/y) == r"\frac{x + 1}{y}"
assert latex((-x - 1)/y) == r"\frac{-x - 1}{y}"
assert latex((x + 1)/(y*z)) == r"\frac{x + 1}{y z}"
assert latex(-y/(x + 1)) == r"\frac{-y}{x + 1}"
assert latex(y*z/(x + 1)) == r"\frac{y z}{x + 1}"
assert latex(((u + 1)*x*y + 1)/((v - 1)*z - 1)) == r"\frac{\left(u + 1\right) x y + 1}{\left(v - 1\right) z - 1}"
assert latex(((u + 1)*x*y + 1)/((v - 1)*z - t*u*v - 1)) == r"\frac{\left(u + 1\right) x y + 1}{\left(v - 1\right) z - u v t - 1}"
def test_latex_Poly():
assert latex(Poly(x**2 + 2 * x, x)) == \
r"\operatorname{Poly}{\left( x^{2} + 2 x, x, domain=\mathbb{Z} \right)}"
assert latex(Poly(x/y, x)) == \
r"\operatorname{Poly}{\left( \frac{x}{y}, x, domain=\mathbb{Z}\left(y\right) \right)}"
assert latex(Poly(2.0*x + y)) == \
r"\operatorname{Poly}{\left( 2.0 x + 1.0 y, x, y, domain=\mathbb{R} \right)}"
def test_latex_RootOf():
assert latex(RootOf(x**5 + x + 3, 0)) == \
r"\operatorname{RootOf} {\left(x^{5} + x + 3, 0\right)}"
def test_latex_RootSum():
assert latex(RootSum(x**5 + x + 3, sin)) == \
r"\operatorname{RootSum} {\left(x^{5} + x + 3, \left( x \mapsto \sin{\left (x \right )} \right)\right)}"
def test_settings():
raises(TypeError, lambda: latex(x*y, method="garbage"))
def test_latex_numbers():
assert latex(catalan(n)) == r"C_{n}"
def test_lamda():
assert latex(Symbol('lamda')) == r"\lambda"
assert latex(Symbol('Lamda')) == r"\Lambda"
def test_custom_symbol_names():
x = Symbol('x')
y = Symbol('y')
assert latex(x) == "x"
assert latex(x, symbol_names={x: "x_i"}) == "x_i"
assert latex(x + y, symbol_names={x: "x_i"}) == "x_i + y"
assert latex(x**2, symbol_names={x: "x_i"}) == "x_i^{2}"
assert latex(x + y, symbol_names={x: "x_i", y: "y_j"}) == "x_i + y_j"
def test_matAdd():
from sympy import MatrixSymbol
from sympy.printing.latex import LatexPrinter
C = MatrixSymbol('C', 5, 5)
B = MatrixSymbol('B', 5, 5)
l = LatexPrinter()
assert l._print_MatAdd(C - 2*B) in ['-2 B + C', 'C -2 B']
assert l._print_MatAdd(C + 2*B) in ['2 B + C', 'C + 2 B']
assert l._print_MatAdd(B - 2*C) in ['B -2 C', '-2 C + B']
assert l._print_MatAdd(B + 2*C) in ['B + 2 C', '2 C + B']
def test_matMul():
from sympy import MatrixSymbol
from sympy.printing.latex import LatexPrinter
A = MatrixSymbol('A', 5, 5)
B = MatrixSymbol('B', 5, 5)
x = Symbol('x')
l = LatexPrinter()
assert l._print_MatMul(2*A) == '2 A'
assert l._print_MatMul(2*x*A) == '2 x A'
assert l._print_MatMul(-2*A) == '-2 A'
assert l._print_MatMul(1.5*A) == '1.5 A'
assert l._print_MatMul(sqrt(2)*A) == r'\sqrt{2} A'
assert l._print_MatMul(-sqrt(2)*A) == r'- \sqrt{2} A'
assert l._print_MatMul(2*sqrt(2)*x*A) == r'2 \sqrt{2} x A'
assert l._print_MatMul(-2*A*(A + 2*B)) in [r'-2 A \left(A + 2 B\right)',
r'-2 A \left(2 B + A\right)']
def test_latex_MatrixSlice():
from sympy.matrices.expressions import MatrixSymbol
assert latex(MatrixSymbol('X', 10, 10)[:5, 1:9:2]) == \
r'X\left[:5, 1:9:2\right]'
assert latex(MatrixSymbol('X', 10, 10)[5, :5:2]) == \
r'X\left[5, :5:2\right]'
def test_latex_RandomDomain():
from sympy.stats import Normal, Die, Exponential, pspace, where
X = Normal('x1', 0, 1)
assert latex(where(X > 0)) == r"Domain: 0 < x_{1} \wedge x_{1} < \infty"
D = Die('d1', 6)
assert latex(where(D > 4)) == r"Domain: d_{1} = 5 \vee d_{1} = 6"
A = Exponential('a', 1)
B = Exponential('b', 1)
assert latex(
pspace(Tuple(A, B)).domain) == \
r"Domain: 0 \leq a \wedge 0 \leq b \wedge a < \infty \wedge b < \infty"
def test_PrettyPoly():
from sympy.polys.domains import QQ
F = QQ.frac_field(x, y)
R = QQ[x, y]
assert latex(F.convert(x/(x + y))) == latex(x/(x + y))
assert latex(R.convert(x + y)) == latex(x + y)
def test_integral_transforms():
x = Symbol("x")
k = Symbol("k")
f = Function("f")
a = Symbol("a")
b = Symbol("b")
assert latex(MellinTransform(f(x), x, k)) == r"\mathcal{M}_{x}\left[f{\left (x \right )}\right]\left(k\right)"
assert latex(InverseMellinTransform(f(k), k, x, a, b)) == r"\mathcal{M}^{-1}_{k}\left[f{\left (k \right )}\right]\left(x\right)"
assert latex(LaplaceTransform(f(x), x, k)) == r"\mathcal{L}_{x}\left[f{\left (x \right )}\right]\left(k\right)"
assert latex(InverseLaplaceTransform(f(k), k, x, (a, b))) == r"\mathcal{L}^{-1}_{k}\left[f{\left (k \right )}\right]\left(x\right)"
assert latex(FourierTransform(f(x), x, k)) == r"\mathcal{F}_{x}\left[f{\left (x \right )}\right]\left(k\right)"
assert latex(InverseFourierTransform(f(k), k, x)) == r"\mathcal{F}^{-1}_{k}\left[f{\left (k \right )}\right]\left(x\right)"
assert latex(CosineTransform(f(x), x, k)) == r"\mathcal{COS}_{x}\left[f{\left (x \right )}\right]\left(k\right)"
assert latex(InverseCosineTransform(f(k), k, x)) == r"\mathcal{COS}^{-1}_{k}\left[f{\left (k \right )}\right]\left(x\right)"
assert latex(SineTransform(f(x), x, k)) == r"\mathcal{SIN}_{x}\left[f{\left (x \right )}\right]\left(k\right)"
assert latex(InverseSineTransform(f(k), k, x)) == r"\mathcal{SIN}^{-1}_{k}\left[f{\left (k \right )}\right]\left(x\right)"
def test_PolynomialRingBase():
from sympy.polys.domains import QQ
assert latex(QQ.old_poly_ring(x, y)) == r"\mathbb{Q}\left[x, y\right]"
assert latex(QQ.old_poly_ring(x, y, order="ilex")) == \
r"S_<^{-1}\mathbb{Q}\left[x, y\right]"
def test_categories():
from sympy.categories import (Object, IdentityMorphism,
NamedMorphism, Category, Diagram, DiagramGrid)
A1 = Object("A1")
A2 = Object("A2")
A3 = Object("A3")
f1 = NamedMorphism(A1, A2, "f1")
f2 = NamedMorphism(A2, A3, "f2")
id_A1 = IdentityMorphism(A1)
K1 = Category("K1")
assert latex(A1) == "A_{1}"
assert latex(f1) == "f_{1}:A_{1}\\rightarrow A_{2}"
assert latex(id_A1) == "id:A_{1}\\rightarrow A_{1}"
assert latex(f2*f1) == "f_{2}\\circ f_{1}:A_{1}\\rightarrow A_{3}"
assert latex(K1) == "\mathbf{K_{1}}"
d = Diagram()
assert latex(d) == "\emptyset"
d = Diagram({f1: "unique", f2: S.EmptySet})
assert latex(d) == r"\left \{ f_{2}\circ f_{1}:A_{1}" \
r"\rightarrow A_{3} : \emptyset, \quad id:A_{1}\rightarrow " \
r"A_{1} : \emptyset, \quad id:A_{2}\rightarrow A_{2} : " \
r"\emptyset, \quad id:A_{3}\rightarrow A_{3} : \emptyset, " \
r"\quad f_{1}:A_{1}\rightarrow A_{2} : \left\{unique\right\}, " \
r"\quad f_{2}:A_{2}\rightarrow A_{3} : \emptyset\right \}"
d = Diagram({f1: "unique", f2: S.EmptySet}, {f2 * f1: "unique"})
assert latex(d) == r"\left \{ f_{2}\circ f_{1}:A_{1}" \
r"\rightarrow A_{3} : \emptyset, \quad id:A_{1}\rightarrow " \
r"A_{1} : \emptyset, \quad id:A_{2}\rightarrow A_{2} : " \
r"\emptyset, \quad id:A_{3}\rightarrow A_{3} : \emptyset, " \
r"\quad f_{1}:A_{1}\rightarrow A_{2} : \left\{unique\right\}," \
r" \quad f_{2}:A_{2}\rightarrow A_{3} : \emptyset\right \}" \
r"\Longrightarrow \left \{ f_{2}\circ f_{1}:A_{1}" \
r"\rightarrow A_{3} : \left\{unique\right\}\right \}"
# A linear diagram.
A = Object("A")
B = Object("B")
C = Object("C")
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
d = Diagram([f, g])
grid = DiagramGrid(d)
assert latex(grid) == "\\begin{array}{cc}\n" \
"A & B \\\\\n" \
" & C \n" \
"\\end{array}\n"
def test_Modules():
from sympy.polys.domains import QQ
from sympy.polys.agca import homomorphism
R = QQ.old_poly_ring(x, y)
F = R.free_module(2)
M = F.submodule([x, y], [1, x**2])
assert latex(F) == r"{\mathbb{Q}\left[x, y\right]}^{2}"
assert latex(M) == \
r"\left< {\left[ {x},{y} \right]},{\left[ {1},{x^{2}} \right]} \right>"
I = R.ideal(x**2, y)
assert latex(I) == r"\left< {x^{2}},{y} \right>"
Q = F / M
assert latex(Q) == r"\frac{{\mathbb{Q}\left[x, y\right]}^{2}}{\left< {\left[ {x},{y} \right]},{\left[ {1},{x^{2}} \right]} \right>}"
assert latex(Q.submodule([1, x**3/2], [2, y])) == \
r"\left< {{\left[ {1},{\frac{x^{3}}{2}} \right]} + {\left< {\left[ {x},{y} \right]},{\left[ {1},{x^{2}} \right]} \right>}},{{\left[ {2},{y} \right]} + {\left< {\left[ {x},{y} \right]},{\left[ {1},{x^{2}} \right]} \right>}} \right>"
h = homomorphism(QQ.old_poly_ring(x).free_module(2), QQ.old_poly_ring(x).free_module(2), [0, 0])
assert latex(h) == r"{\left[\begin{matrix}0 & 0\\0 & 0\end{matrix}\right]} : {{\mathbb{Q}\left[x\right]}^{2}} \to {{\mathbb{Q}\left[x\right]}^{2}}"
def test_QuotientRing():
from sympy.polys.domains import QQ
R = QQ.old_poly_ring(x)/[x**2 + 1]
assert latex(
R) == r"\frac{\mathbb{Q}\left[x\right]}{\left< {x^{2} + 1} \right>}"
assert latex(R.one) == r"{1} + {\left< {x^{2} + 1} \right>}"
def test_Tr():
#TODO: Handle indices
A, B = symbols('A B', commutative=False)
t = Tr(A*B)
assert latex(t) == r'\mbox{Tr}\left(A B\right)'
def test_Adjoint():
from sympy.matrices import MatrixSymbol, Adjoint, Inverse, Transpose
X = MatrixSymbol('X', 2, 2)
Y = MatrixSymbol('Y', 2, 2)
assert latex(Adjoint(X)) == r'X^\dag'
assert latex(Adjoint(X + Y)) == r'\left(X + Y\right)^\dag'
assert latex(Adjoint(X) + Adjoint(Y)) == r'X^\dag + Y^\dag'
assert latex(Adjoint(X*Y)) == r'\left(X Y\right)^\dag'
assert latex(Adjoint(Y)*Adjoint(X)) == r'Y^\dag X^\dag'
assert latex(Adjoint(X**2)) == r'\left(X^{2}\right)^\dag'
assert latex(Adjoint(X)**2) == r'\left(X^\dag\right)^{2}'
assert latex(Adjoint(Inverse(X))) == r'\left(X^{-1}\right)^\dag'
assert latex(Inverse(Adjoint(X))) == r'\left(X^\dag\right)^{-1}'
assert latex(Adjoint(Transpose(X))) == r'\left(X^T\right)^\dag'
assert latex(Transpose(Adjoint(X))) == r'\left(X^\dag\right)^T'
def test_Hadamard():
from sympy.matrices import MatrixSymbol, HadamardProduct
X = MatrixSymbol('X', 2, 2)
Y = MatrixSymbol('Y', 2, 2)
assert latex(HadamardProduct(X, Y*Y)) == r'X \circ \left(Y Y\right)'
assert latex(HadamardProduct(X, Y)*Y) == r'\left(X \circ Y\right) Y'
def test_boolean_args_order():
syms = symbols('a:f')
expr = And(*syms)
assert latex(expr) == 'a \\wedge b \\wedge c \\wedge d \\wedge e \\wedge f'
expr = Or(*syms)
assert latex(expr) == 'a \\vee b \\vee c \\vee d \\vee e \\vee f'
expr = Equivalent(*syms)
assert latex(expr) == 'a \\equiv b \\equiv c \\equiv d \\equiv e \\equiv f'
expr = Xor(*syms)
assert latex(expr) == 'a \\veebar b \\veebar c \\veebar d \\veebar e \\veebar f'
def test_imaginary():
i = sqrt(-1)
assert latex(i) == r'i'
def test_builtins_without_args():
assert latex(sin) == r'\sin'
assert latex(cos) == r'\cos'
assert latex(tan) == r'\tan'
assert latex(log) == r'\log'
assert latex(Ei) == r'\operatorname{Ei}'
assert latex(zeta) == r'\zeta'
def test_latex_greek_functions():
# bug because capital greeks that have roman equivalents should not use
# \Alpha, \Beta, \Eta, etc.
s = Function('Alpha')
assert latex(s) == r'A'
assert latex(s(x)) == r'A{\left (x \right )}'
s = Function('Beta')
assert latex(s) == r'B'
s = Function('Eta')
assert latex(s) == r'H'
assert latex(s(x)) == r'H{\left (x \right )}'
# bug because sympy.core.numbers.Pi is special
p = Function('Pi')
# assert latex(p(x)) == r'\Pi{\left (x \right )}'
assert latex(p) == r'\Pi'
# bug because not all greeks are included
c = Function('chi')
assert latex(c(x)) == r'\chi{\left (x \right )}'
assert latex(c) == r'\chi'
def test_translate():
s = 'Alpha'
assert translate(s) == 'A'
s = 'Beta'
assert translate(s) == 'B'
s = 'Eta'
assert translate(s) == 'H'
s = 'omicron'
assert translate(s) == 'o'
s = 'Pi'
assert translate(s) == r'\Pi'
s = 'pi'
assert translate(s) == r'\pi'
s = 'LamdaHatDOT'
assert translate(s) == r'\dot{\hat{\Lambda}}'
def test_other_symbols():
from sympy.printing.latex import other_symbols
for s in other_symbols:
assert latex(symbols(s)) == "\\"+s
def test_modifiers():
# Test each modifier individually in the simplest case (with funny capitalizations)
assert latex(symbols("xMathring")) == r"\mathring{x}"
assert latex(symbols("xCheck")) == r"\check{x}"
assert latex(symbols("xBreve")) == r"\breve{x}"
assert latex(symbols("xAcute")) == r"\acute{x}"
assert latex(symbols("xGrave")) == r"\grave{x}"
assert latex(symbols("xTilde")) == r"\tilde{x}"
assert latex(symbols("xPrime")) == r"{x}'"
assert latex(symbols("xddDDot")) == r"\ddddot{x}"
assert latex(symbols("xDdDot")) == r"\dddot{x}"
assert latex(symbols("xDDot")) == r"\ddot{x}"
assert latex(symbols("xBold")) == r"\boldsymbol{x}"
assert latex(symbols("xnOrM")) == r"\left\lVert{x}\right\rVert"
assert latex(symbols("xAVG")) == r"\left\langle{x}\right\rangle"
assert latex(symbols("xHat")) == r"\hat{x}"
assert latex(symbols("xDot")) == r"\dot{x}"
assert latex(symbols("xBar")) == r"\bar{x}"
assert latex(symbols("xVec")) == r"\vec{x}"
assert latex(symbols("xAbs")) == r"\left\lvert{x}\right\rvert"
assert latex(symbols("xMag")) == r"\left\lvert{x}\right\rvert"
assert latex(symbols("xPrM")) == r"{x}'"
assert latex(symbols("xBM")) == r"\boldsymbol{x}"
# Test strings that are *only* the names of modifiers
assert latex(symbols("Mathring")) == r"Mathring"
assert latex(symbols("Check")) == r"Check"
assert latex(symbols("Breve")) == r"Breve"
assert latex(symbols("Acute")) == r"Acute"
assert latex(symbols("Grave")) == r"Grave"
assert latex(symbols("Tilde")) == r"Tilde"
assert latex(symbols("Prime")) == r"Prime"
assert latex(symbols("DDot")) == r"\dot{D}"
assert latex(symbols("Bold")) == r"Bold"
assert latex(symbols("NORm")) == r"NORm"
assert latex(symbols("AVG")) == r"AVG"
assert latex(symbols("Hat")) == r"Hat"
assert latex(symbols("Dot")) == r"Dot"
assert latex(symbols("Bar")) == r"Bar"
assert latex(symbols("Vec")) == r"Vec"
assert latex(symbols("Abs")) == r"Abs"
assert latex(symbols("Mag")) == r"Mag"
assert latex(symbols("PrM")) == r"PrM"
assert latex(symbols("BM")) == r"BM"
assert latex(symbols("hbar")) == r"\hbar"
# Check a few combinations
assert latex(symbols("xvecdot")) == r"\dot{\vec{x}}"
assert latex(symbols("xDotVec")) == r"\vec{\dot{x}}"
assert latex(symbols("xHATNorm")) == r"\left\lVert{\hat{x}}\right\rVert"
# Check a couple big, ugly combinations
assert latex(symbols('xMathringBm_yCheckPRM__zbreveAbs')) == r"\boldsymbol{\mathring{x}}^{\left\lvert{\breve{z}}\right\rvert}_{{\check{y}}'}"
assert latex(symbols('alphadothat_nVECDOT__tTildePrime')) == r"\hat{\dot{\alpha}}^{{\tilde{t}}'}_{\dot{\vec{n}}}"
def test_greek_symbols():
assert latex(Symbol('alpha')) == r'\alpha'
assert latex(Symbol('beta')) == r'\beta'
assert latex(Symbol('gamma')) == r'\gamma'
assert latex(Symbol('delta')) == r'\delta'
assert latex(Symbol('epsilon')) == r'\epsilon'
assert latex(Symbol('zeta')) == r'\zeta'
assert latex(Symbol('eta')) == r'\eta'
assert latex(Symbol('theta')) == r'\theta'
assert latex(Symbol('iota')) == r'\iota'
assert latex(Symbol('kappa')) == r'\kappa'
assert latex(Symbol('lambda')) == r'\lambda'
assert latex(Symbol('mu')) == r'\mu'
assert latex(Symbol('nu')) == r'\nu'
assert latex(Symbol('xi')) == r'\xi'
assert latex(Symbol('omicron')) == r'o'
assert latex(Symbol('pi')) == r'\pi'
assert latex(Symbol('rho')) == r'\rho'
assert latex(Symbol('sigma')) == r'\sigma'
assert latex(Symbol('tau')) == r'\tau'
assert latex(Symbol('upsilon')) == r'\upsilon'
assert latex(Symbol('phi')) == r'\phi'
assert latex(Symbol('chi')) == r'\chi'
assert latex(Symbol('psi')) == r'\psi'
assert latex(Symbol('omega')) == r'\omega'
assert latex(Symbol('Alpha')) == r'A'
assert latex(Symbol('Beta')) == r'B'
assert latex(Symbol('Gamma')) == r'\Gamma'
assert latex(Symbol('Delta')) == r'\Delta'
assert latex(Symbol('Epsilon')) == r'E'
assert latex(Symbol('Zeta')) == r'Z'
assert latex(Symbol('Eta')) == r'H'
assert latex(Symbol('Theta')) == r'\Theta'
assert latex(Symbol('Iota')) == r'I'
assert latex(Symbol('Kappa')) == r'K'
assert latex(Symbol('Lambda')) == r'\Lambda'
assert latex(Symbol('Mu')) == r'M'
assert latex(Symbol('Nu')) == r'N'
assert latex(Symbol('Xi')) == r'\Xi'
assert latex(Symbol('Omicron')) == r'O'
assert latex(Symbol('Pi')) == r'\Pi'
assert latex(Symbol('Rho')) == r'P'
assert latex(Symbol('Sigma')) == r'\Sigma'
assert latex(Symbol('Tau')) == r'T'
assert latex(Symbol('Upsilon')) == r'\Upsilon'
assert latex(Symbol('Phi')) == r'\Phi'
assert latex(Symbol('Chi')) == r'X'
assert latex(Symbol('Psi')) == r'\Psi'
assert latex(Symbol('Omega')) == r'\Omega'
assert latex(Symbol('varepsilon')) == r'\varepsilon'
assert latex(Symbol('varkappa')) == r'\varkappa'
assert latex(Symbol('varphi')) == r'\varphi'
assert latex(Symbol('varpi')) == r'\varpi'
assert latex(Symbol('varrho')) == r'\varrho'
assert latex(Symbol('varsigma')) == r'\varsigma'
assert latex(Symbol('vartheta')) == r'\vartheta'
@XFAIL
def test_builtin_without_args_mismatched_names():
assert latex(CosineTransform) == r'\mathcal{COS}'
def test_builtin_no_args():
assert latex(Chi) == r'\operatorname{Chi}'
assert latex(gamma) == r'\Gamma'
assert latex(KroneckerDelta) == r'\delta'
assert latex(DiracDelta) == r'\delta'
assert latex(lowergamma) == r'\gamma'
def test_issue_6853():
p = Function('Pi')
assert latex(p(x)) == r"\Pi{\left (x \right )}"
def test_Mul():
e = Mul(-2, x + 1, evaluate=False)
assert latex(e) == r'- 2 \left(x + 1\right)'
e = Mul(2, x + 1, evaluate=False)
assert latex(e) == r'2 \left(x + 1\right)'
e = Mul(S.One/2, x + 1, evaluate=False)
assert latex(e) == r'\frac{1}{2} \left(x + 1\right)'
e = Mul(y, x + 1, evaluate=False)
assert latex(e) == r'y \left(x + 1\right)'
e = Mul(-y, x + 1, evaluate=False)
assert latex(e) == r'- y \left(x + 1\right)'
e = Mul(-2, x + 1)
assert latex(e) == r'- 2 x - 2'
e = Mul(2, x + 1)
assert latex(e) == r'2 x + 2'
def test_Pow():
e = Pow(2, 2, evaluate=False)
assert latex(e) == r'2^{2}'
def test_issue_7180():
assert latex(Equivalent(x, y)) == r"x \equiv y"
assert latex(Not(Equivalent(x, y))) == r"x \not\equiv y"
| bsd-3-clause |
emedinaa/contentbox | third_party/oauthlib/oauth2/rfc6749/grant_types/resource_owner_password_credentials.py | 13 | 8225 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import unicode_literals, absolute_import
import json
from oauthlib.common import log
from .base import GrantTypeBase
from .. import errors
from ..request_validator import RequestValidator
class ResourceOwnerPasswordCredentialsGrant(GrantTypeBase):
"""`Resource Owner Password Credentials Grant`_
The resource owner password credentials grant type is suitable in
cases where the resource owner has a trust relationship with the
client, such as the device operating system or a highly privileged
application. The authorization server should take special care when
enabling this grant type and only allow it when other flows are not
viable.
This grant type is suitable for clients capable of obtaining the
resource owner's credentials (username and password, typically using
an interactive form). It is also used to migrate existing clients
using direct authentication schemes such as HTTP Basic or Digest
authentication to OAuth by converting the stored credentials to an
access token::
+----------+
| Resource |
| Owner |
| |
+----------+
v
| Resource Owner
(A) Password Credentials
|
v
+---------+ +---------------+
| |>--(B)---- Resource Owner ------->| |
| | Password Credentials | Authorization |
| Client | | Server |
| |<--(C)---- Access Token ---------<| |
| | (w/ Optional Refresh Token) | |
+---------+ +---------------+
Figure 5: Resource Owner Password Credentials Flow
The flow illustrated in Figure 5 includes the following steps:
(A) The resource owner provides the client with its username and
password.
(B) The client requests an access token from the authorization
server's token endpoint by including the credentials received
from the resource owner. When making the request, the client
authenticates with the authorization server.
(C) The authorization server authenticates the client and validates
the resource owner credentials, and if valid, issues an access
token.
.. _`Resource Owner Password Credentials Grant`: http://tools.ietf.org/html/rfc6749#section-4.3
"""
def __init__(self, request_validator=None):
self.request_validator = request_validator or RequestValidator()
def create_token_response(self, request, token_handler):
"""Return token or error in json format.
If the access token request is valid and authorized, the
authorization server issues an access token and optional refresh
token as described in `Section 5.1`_. If the request failed client
authentication or is invalid, the authorization server returns an
error response as described in `Section 5.2`_.
.. _`Section 5.1`: http://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: http://tools.ietf.org/html/rfc6749#section-5.2
"""
headers = {
'Content-Type': 'application/json',
'Cache-Control': 'no-store',
'Pragma': 'no-cache',
}
try:
if self.request_validator.client_authentication_required(request):
log.debug('Authenticating client, %r.', request)
if not self.request_validator.authenticate_client(request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
elif not self.request_validator.authenticate_client_id(request.client_id, request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
log.debug('Validating access token request, %r.', request)
self.validate_token_request(request)
except errors.OAuth2Error as e:
log.debug('Client error in token request, %s.', e)
return headers, e.json, e.status_code
token = token_handler.create_token(request, refresh_token=True)
log.debug('Issuing token %r to client id %r (%r) and username %s.',
token, request.client_id, request.client, request.username)
return headers, json.dumps(token), 200
def validate_token_request(self, request):
"""
The client makes a request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format per Appendix B with a character encoding of UTF-8 in the HTTP
request entity-body:
grant_type
REQUIRED. Value MUST be set to "password".
username
REQUIRED. The resource owner username.
password
REQUIRED. The resource owner password.
scope
OPTIONAL. The scope of the access request as described by
`Section 3.3`_.
If the client type is confidential or the client was issued client
credentials (or assigned other authentication requirements), the
client MUST authenticate with the authorization server as described
in `Section 3.2.1`_.
The authorization server MUST:
o require client authentication for confidential clients or for any
client that was issued client credentials (or with other
authentication requirements),
o authenticate the client if client authentication is included, and
o validate the resource owner password credentials using its
existing password validation algorithm.
Since this access token request utilizes the resource owner's
password, the authorization server MUST protect the endpoint against
brute force attacks (e.g., using rate-limitation or generating
alerts).
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 3.2.1`: http://tools.ietf.org/html/rfc6749#section-3.2.1
"""
for param in ('grant_type', 'username', 'password'):
if not getattr(request, param):
raise errors.InvalidRequestError(
'Request is missing %s parameter.' % param, request=request)
for param in ('grant_type', 'username', 'password', 'scope'):
if param in request.duplicate_params:
raise errors.InvalidRequestError(state=request.state,
description='Duplicate %s parameter.' % param, request=request)
# This error should rarely (if ever) occur if requests are routed to
# grant type handlers based on the grant_type parameter.
if not request.grant_type == 'password':
raise errors.UnsupportedGrantTypeError(request=request)
log.debug('Validating username %s and password %s.',
request.username, request.password)
if not self.request_validator.validate_user(request.username,
request.password, request.client, request):
raise errors.InvalidGrantError('Invalid credentials given.', request=request)
else:
if not hasattr(request.client, 'client_id'):
raise NotImplementedError(
'Validate user must set the '
'request.client.client_id attribute '
'in authenticate_client.')
log.debug('Authorizing access to user %r.', request.user)
# Ensure client is authorized use of this grant type
self.validate_grant_type(request)
if request.client:
request.client_id = request.client_id or request.client.client_id
self.validate_scopes(request)
| apache-2.0 |
xujianhai/flask | tests/test_blueprints.py | 143 | 18147 | # -*- coding: utf-8 -*-
"""
tests.blueprints
~~~~~~~~~~~~~~~~
Blueprints (and currently modules)
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import flask
from flask._compat import text_type
from werkzeug.http import parse_cache_control_header
from jinja2 import TemplateNotFound
def test_blueprint_specific_error_handling():
frontend = flask.Blueprint('frontend', __name__)
backend = flask.Blueprint('backend', __name__)
sideend = flask.Blueprint('sideend', __name__)
@frontend.errorhandler(403)
def frontend_forbidden(e):
return 'frontend says no', 403
@frontend.route('/frontend-no')
def frontend_no():
flask.abort(403)
@backend.errorhandler(403)
def backend_forbidden(e):
return 'backend says no', 403
@backend.route('/backend-no')
def backend_no():
flask.abort(403)
@sideend.route('/what-is-a-sideend')
def sideend_no():
flask.abort(403)
app = flask.Flask(__name__)
app.register_blueprint(frontend)
app.register_blueprint(backend)
app.register_blueprint(sideend)
@app.errorhandler(403)
def app_forbidden(e):
return 'application itself says no', 403
c = app.test_client()
assert c.get('/frontend-no').data == b'frontend says no'
assert c.get('/backend-no').data == b'backend says no'
assert c.get('/what-is-a-sideend').data == b'application itself says no'
def test_blueprint_specific_user_error_handling():
class MyDecoratorException(Exception):
pass
class MyFunctionException(Exception):
pass
blue = flask.Blueprint('blue', __name__)
@blue.errorhandler(MyDecoratorException)
def my_decorator_exception_handler(e):
assert isinstance(e, MyDecoratorException)
return 'boom'
def my_function_exception_handler(e):
assert isinstance(e, MyFunctionException)
return 'bam'
blue.register_error_handler(MyFunctionException, my_function_exception_handler)
@blue.route('/decorator')
def blue_deco_test():
raise MyDecoratorException()
@blue.route('/function')
def blue_func_test():
raise MyFunctionException()
app = flask.Flask(__name__)
app.register_blueprint(blue)
c = app.test_client()
assert c.get('/decorator').data == b'boom'
assert c.get('/function').data == b'bam'
def test_blueprint_url_definitions():
bp = flask.Blueprint('test', __name__)
@bp.route('/foo', defaults={'baz': 42})
def foo(bar, baz):
return '%s/%d' % (bar, baz)
@bp.route('/bar')
def bar(bar):
return text_type(bar)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/1', url_defaults={'bar': 23})
app.register_blueprint(bp, url_prefix='/2', url_defaults={'bar': 19})
c = app.test_client()
assert c.get('/1/foo').data == b'23/42'
assert c.get('/2/foo').data == b'19/42'
assert c.get('/1/bar').data == b'23'
assert c.get('/2/bar').data == b'19'
def test_blueprint_url_processors():
bp = flask.Blueprint('frontend', __name__, url_prefix='/<lang_code>')
@bp.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', flask.g.lang_code)
@bp.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop('lang_code')
@bp.route('/')
def index():
return flask.url_for('.about')
@bp.route('/about')
def about():
return flask.url_for('.index')
app = flask.Flask(__name__)
app.register_blueprint(bp)
c = app.test_client()
assert c.get('/de/').data == b'/de/about'
assert c.get('/de/about').data == b'/de/'
def test_templates_and_static(test_apps):
from blueprintapp import app
c = app.test_client()
rv = c.get('/')
assert rv.data == b'Hello from the Frontend'
rv = c.get('/admin/')
assert rv.data == b'Hello from the Admin'
rv = c.get('/admin/index2')
assert rv.data == b'Hello from the Admin'
rv = c.get('/admin/static/test.txt')
assert rv.data.strip() == b'Admin File'
rv.close()
rv = c.get('/admin/static/css/test.css')
assert rv.data.strip() == b'/* nested file */'
rv.close()
# try/finally, in case other tests use this app for Blueprint tests.
max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT']
try:
expected_max_age = 3600
if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == expected_max_age:
expected_max_age = 7200
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = expected_max_age
rv = c.get('/admin/static/css/test.css')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == expected_max_age
rv.close()
finally:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default
with app.test_request_context():
assert flask.url_for('admin.static', filename='test.txt') == '/admin/static/test.txt'
with app.test_request_context():
try:
flask.render_template('missing.html')
except TemplateNotFound as e:
assert e.name == 'missing.html'
else:
assert 0, 'expected exception'
with flask.Flask(__name__).test_request_context():
assert flask.render_template('nested/nested.txt') == 'I\'m nested'
def test_default_static_cache_timeout():
app = flask.Flask(__name__)
class MyBlueprint(flask.Blueprint):
def get_send_file_max_age(self, filename):
return 100
blueprint = MyBlueprint('blueprint', __name__, static_folder='static')
app.register_blueprint(blueprint)
# try/finally, in case other tests use this app for Blueprint tests.
max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT']
try:
with app.test_request_context():
unexpected_max_age = 3600
if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == unexpected_max_age:
unexpected_max_age = 7200
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = unexpected_max_age
rv = blueprint.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 100
rv.close()
finally:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default
def test_templates_list(test_apps):
from blueprintapp import app
templates = sorted(app.jinja_env.list_templates())
assert templates == ['admin/index.html', 'frontend/index.html']
def test_dotted_names():
frontend = flask.Blueprint('myapp.frontend', __name__)
backend = flask.Blueprint('myapp.backend', __name__)
@frontend.route('/fe')
def frontend_index():
return flask.url_for('myapp.backend.backend_index')
@frontend.route('/fe2')
def frontend_page2():
return flask.url_for('.frontend_index')
@backend.route('/be')
def backend_index():
return flask.url_for('myapp.frontend.frontend_index')
app = flask.Flask(__name__)
app.register_blueprint(frontend)
app.register_blueprint(backend)
c = app.test_client()
assert c.get('/fe').data.strip() == b'/be'
assert c.get('/fe2').data.strip() == b'/fe'
assert c.get('/be').data.strip() == b'/fe'
def test_dotted_names_from_app():
app = flask.Flask(__name__)
app.testing = True
test = flask.Blueprint('test', __name__)
@app.route('/')
def app_index():
return flask.url_for('test.index')
@test.route('/test/')
def index():
return flask.url_for('app_index')
app.register_blueprint(test)
with app.test_client() as c:
rv = c.get('/')
assert rv.data == b'/test/'
def test_empty_url_defaults():
bp = flask.Blueprint('bp', __name__)
@bp.route('/', defaults={'page': 1})
@bp.route('/page/<int:page>')
def something(page):
return str(page)
app = flask.Flask(__name__)
app.register_blueprint(bp)
c = app.test_client()
assert c.get('/').data == b'1'
assert c.get('/page/2').data == b'2'
def test_route_decorator_custom_endpoint():
bp = flask.Blueprint('bp', __name__)
@bp.route('/foo')
def foo():
return flask.request.endpoint
@bp.route('/bar', endpoint='bar')
def foo_bar():
return flask.request.endpoint
@bp.route('/bar/123', endpoint='123')
def foo_bar_foo():
return flask.request.endpoint
@bp.route('/bar/foo')
def bar_foo():
return flask.request.endpoint
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.request.endpoint
c = app.test_client()
assert c.get('/').data == b'index'
assert c.get('/py/foo').data == b'bp.foo'
assert c.get('/py/bar').data == b'bp.bar'
assert c.get('/py/bar/123').data == b'bp.123'
assert c.get('/py/bar/foo').data == b'bp.bar_foo'
def test_route_decorator_custom_endpoint_with_dots():
bp = flask.Blueprint('bp', __name__)
@bp.route('/foo')
def foo():
return flask.request.endpoint
try:
@bp.route('/bar', endpoint='bar.bar')
def foo_bar():
return flask.request.endpoint
except AssertionError:
pass
else:
raise AssertionError('expected AssertionError not raised')
try:
@bp.route('/bar/123', endpoint='bar.123')
def foo_bar_foo():
return flask.request.endpoint
except AssertionError:
pass
else:
raise AssertionError('expected AssertionError not raised')
def foo_foo_foo():
pass
pytest.raises(
AssertionError,
lambda: bp.add_url_rule(
'/bar/123', endpoint='bar.123', view_func=foo_foo_foo
)
)
pytest.raises(
AssertionError,
bp.route('/bar/123', endpoint='bar.123'),
lambda: None
)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
c = app.test_client()
assert c.get('/py/foo').data == b'bp.foo'
# The rule's didn't actually made it through
rv = c.get('/py/bar')
assert rv.status_code == 404
rv = c.get('/py/bar/123')
assert rv.status_code == 404
def test_template_filter():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'my_reverse' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['my_reverse'] == my_reverse
assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba'
def test_add_template_filter():
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'my_reverse' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['my_reverse'] == my_reverse
assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba'
def test_template_filter_with_name():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter('strrev')
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'strrev' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['strrev'] == my_reverse
assert app.jinja_env.filters['strrev']('abcd') == 'dcba'
def test_add_template_filter_with_name():
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse, 'strrev')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'strrev' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['strrev'] == my_reverse
assert app.jinja_env.filters['strrev']('abcd') == 'dcba'
def test_template_filter_with_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def super_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_filter_after_route_with_template():
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def super_reverse(s):
return s[::-1]
app.register_blueprint(bp, url_prefix='/py')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_add_template_filter_with_template():
bp = flask.Blueprint('bp', __name__)
def super_reverse(s):
return s[::-1]
bp.add_app_template_filter(super_reverse)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_filter_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter('super_reverse')
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_add_template_filter_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse, 'super_reverse')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_test():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'is_boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['is_boolean'] == is_boolean
assert app.jinja_env.tests['is_boolean'](False)
def test_add_template_test():
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'is_boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['is_boolean'] == is_boolean
assert app.jinja_env.tests['is_boolean'](False)
def test_template_test_with_name():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == is_boolean
assert app.jinja_env.tests['boolean'](False)
def test_add_template_test_with_name():
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean, 'boolean')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == is_boolean
assert app.jinja_env.tests['boolean'](False)
def test_template_test_with_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_template_test_after_route_with_template():
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def boolean(value):
return isinstance(value, bool)
app.register_blueprint(bp, url_prefix='/py')
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_test_with_template():
bp = flask.Blueprint('bp', __name__)
def boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(boolean)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_template_test_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_test_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean, 'boolean')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
| bsd-3-clause |
chbrandt/zyxw | eada/vo/constants.py | 1 | 1506 |
class Waveband(dict):
"""Base class to define a waveband"""
def __init__(self):
super(Waveband,self).__init__()
self.ucd = None
class Radio(Waveband):
def __init__(self):
super(Radio,self).__init__()
self.ucd = 'em.radio'
class Millimeter(Waveband):
def __init__(self):
super(Millimeter,self).__init__()
self.ucd = 'em.mm'
class Infrared(Waveband):
def __init__(self):
super(Infrared,self).__init__()
self.ucd = 'em.IR'
class Optical(Waveband):
def __init__(self):
super(Optical,self).__init__()
self.ucd = 'em.opt'
class Ultraviolet(Waveband):
def __init__(self):
super(Ultraviolet,self).__init__()
self.ucd = 'em.UV'
class Xray(Waveband):
def __init__(self):
super(Xray,self).__init__()
self.ucd = 'em.X-ray'
class Gammaray(Waveband):
def __init__(self):
super(Gammaray,self).__init__()
self.ucd = 'em.gamma'
# Wavebands available to search for catalogue data
# (for convenience I relate the UCD words used)
# For UCDs, take a look at http://www.ivoa.net/documents/latest/UCDlist.html
#
WAVEBANDS = {'radio' : Radio(),
'millimeter' : Millimeter(),
'infrared' : Infrared(),
'optical' : Optical(),
'uv' : Ultraviolet(),
'xray' : Xray(),
'gammaray' : Gammaray()}
SERVICES = {'scs' : None,
'ssa' : None}
| gpl-2.0 |
wavefrontHQ/python-client | test/test_response_container_monitored_cluster.py | 1 | 1438 | # coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import wavefront_api_client
from wavefront_api_client.models.response_container_monitored_cluster import ResponseContainerMonitoredCluster # noqa: E501
from wavefront_api_client.rest import ApiException
class TestResponseContainerMonitoredCluster(unittest.TestCase):
"""ResponseContainerMonitoredCluster unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testResponseContainerMonitoredCluster(self):
"""Test ResponseContainerMonitoredCluster"""
# FIXME: construct object with mandatory attributes with example values
# model = wavefront_api_client.models.response_container_monitored_cluster.ResponseContainerMonitoredCluster() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
c0defreak/python-for-android | python3-alpha/extra_modules/gdata/tlslite/utils/cryptomath.py | 48 | 11687 | """cryptomath module
This module has basic math/crypto code."""
import os
import sys
import math
import base64
import binascii
if sys.version_info[:2] <= (2, 4):
from sha import sha as sha1
else:
from hashlib import sha1
from .compat import *
# **************************************************************************
# Load Optional Modules
# **************************************************************************
# Try to load M2Crypto/OpenSSL
try:
from M2Crypto import m2
m2cryptoLoaded = True
except ImportError:
m2cryptoLoaded = False
# Try to load cryptlib
try:
import cryptlib_py
try:
cryptlib_py.cryptInit()
except cryptlib_py.CryptException as e:
#If tlslite and cryptoIDlib are both present,
#they might each try to re-initialize this,
#so we're tolerant of that.
if e[0] != cryptlib_py.CRYPT_ERROR_INITED:
raise
cryptlibpyLoaded = True
except ImportError:
cryptlibpyLoaded = False
#Try to load GMPY
try:
import gmpy
gmpyLoaded = True
except ImportError:
gmpyLoaded = False
#Try to load pycrypto
try:
import Crypto.Cipher.AES
pycryptoLoaded = True
except ImportError:
pycryptoLoaded = False
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Get os.urandom PRNG
try:
os.urandom(1)
def getRandomBytes(howMany):
return stringToBytes(os.urandom(howMany))
prngName = "os.urandom"
except:
# Else get cryptlib PRNG
if cryptlibpyLoaded:
def getRandomBytes(howMany):
randomKey = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED,
cryptlib_py.CRYPT_ALGO_AES)
cryptlib_py.cryptSetAttribute(randomKey,
cryptlib_py.CRYPT_CTXINFO_MODE,
cryptlib_py.CRYPT_MODE_OFB)
cryptlib_py.cryptGenerateKey(randomKey)
bytes = createByteArrayZeros(howMany)
cryptlib_py.cryptEncrypt(randomKey, bytes)
return bytes
prngName = "cryptlib"
else:
#Else get UNIX /dev/urandom PRNG
try:
devRandomFile = open("/dev/urandom", "rb")
def getRandomBytes(howMany):
return stringToBytes(devRandomFile.read(howMany))
prngName = "/dev/urandom"
except IOError:
#Else get Win32 CryptoAPI PRNG
try:
import win32prng
def getRandomBytes(howMany):
s = win32prng.getRandomBytes(howMany)
if len(s) != howMany:
raise AssertionError()
return stringToBytes(s)
prngName ="CryptoAPI"
except ImportError:
#Else no PRNG :-(
def getRandomBytes(howMany):
raise NotImplementedError("No Random Number Generator "\
"available.")
prngName = "None"
# **************************************************************************
# Converter Functions
# **************************************************************************
def bytesToNumber(bytes):
total = 0
multiplier = 1
for count in range(len(bytes)-1, -1, -1):
byte = bytes[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToBytes(n):
howManyBytes = numBytes(n)
bytes = createByteArrayZeros(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
bytes[count] = int(n % 256)
n >>= 8
return bytes
def bytesToBase64(bytes):
s = bytesToString(bytes)
return stringToBase64(s)
def base64ToBytes(s):
s = base64ToString(s)
return stringToBytes(s)
def numberToBase64(n):
bytes = numberToBytes(n)
return bytesToBase64(bytes)
def base64ToNumber(s):
bytes = base64ToBytes(s)
return bytesToNumber(bytes)
def stringToNumber(s):
bytes = stringToBytes(s)
return bytesToNumber(bytes)
def numberToString(s):
bytes = numberToBytes(s)
return bytesToString(bytes)
def base64ToString(s):
try:
return base64.decodestring(s)
except binascii.Error as e:
raise SyntaxError(e)
except binascii.Incomplete as e:
raise SyntaxError(e)
def stringToBase64(s):
return base64.encodestring(s).replace("\n", "")
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number
raise AssertionError()
bytes = stringToBytes(mpi[4:])
return bytesToNumber(bytes)
def numberToMPI(n):
bytes = numberToBytes(n)
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
bytes = concatArrays(createByteArrayZeros(4+ext), bytes)
bytes[0] = (length >> 24) & 0xFF
bytes[1] = (length >> 16) & 0xFF
bytes[2] = (length >> 8) & 0xFF
bytes[3] = length & 0xFF
return bytesToString(bytes)
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
def hashAndBase64(s):
return stringToBase64(sha1(s).digest())
def getBase64Nonce(numChars=22): #defaults to an 132 bit nonce
bytes = getRandomBytes(numChars)
bytesStr = "".join([chr(b) for b in bytes])
return stringToBase64(bytesStr)[:numChars]
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
#This will break when python division changes, but we can't use // cause
#of Jython
return (a * b) / gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
#This will break when python division changes, but we can't use //
#cause of Jython
q = d / c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
if gmpyLoaded:
def powMod(base, power, modulus):
base = gmpy.mpz(base)
power = gmpy.mpz(power)
modulus = gmpy.mpz(modulus)
result = pow(base, power, modulus)
return int(result)
else:
#Copied from Bryan G. Olson's post to comp.lang.python
#Does left-to-right instead of pow()'s right-to-left,
#thus about 30% faster than the python built-in with small bases
def powMod(base, power, modulus):
nBitScan = 5
""" Return base**power mod modulus, using multi bit scanning
with nBitScan bits at a time."""
#TREV - Added support for negative exponents
negativeResult = False
if (power < 0):
power *= -1
negativeResult = True
exp2 = 2**nBitScan
mask = exp2 - 1
# Break power into a list of digits of nBitScan bits.
# The list is recursive so easy to read in reverse direction.
nibbles = None
while power:
nibbles = int(power & mask), nibbles
power = power >> nBitScan
# Make a table of powers of base up to 2**nBitScan - 1
lowPowers = [1]
for i in range(1, exp2):
lowPowers.append((lowPowers[i-1] * base) % modulus)
# To exponentiate by the first nibble, look it up in the table
nib, nibbles = nibbles
prod = lowPowers[nib]
# For the rest, square nBitScan times, then multiply by
# base^nibble
while nibbles:
nib, nibbles = nibbles
for i in range(nBitScan):
prod = (prod * prod) % modulus
if nib: prod = (prod * lowPowers[nib]) % modulus
#TREV - Added support for negative exponents
if negativeResult:
prodInv = invMod(prod, modulus)
#Check to make sure the inverse is correct
if (prod * prodInv) % modulus != 1:
raise AssertionError()
return prodInv
return prod
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = list(range(n))
for count in range(2, int(math.sqrt(n))):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print("*", end=' ')
s, t = n-1, 0
while s % 2 == 0:
s, t = s/2, t+1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-1)) * 3/2
high = 2 ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print(".", end=' ')
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-2)) * 3/2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print(".", end=' ')
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
| apache-2.0 |
lbartoletti/QGIS | tests/src/python/test_qgsserver_wms_dxf.py | 18 | 2175 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServer WMS GetPrint.
From build dir, run: ctest -R PyQgsServerWMSGetMapDxf -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Tudor Bărăscu'
__date__ = '27/01/2021'
__copyright__ = 'Copyright 2020, The QGIS Project'
import os
import urllib.parse
from qgis.testing import unittest
from test_qgsserver import QgsServerTestBase
from qgis.core import (QgsVectorLayer)
from qgis.PyQt import QtCore
# Needed on Qt 5 so that the serialization of XML is consistent among all executions
os.environ['QT_HASH_SEED'] = '1'
class PyQgsServerWMSGetMapDxf(QgsServerTestBase):
def test_dxf_export_works_with_reverse_axis_epsg(self):
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(os.path.join(self.testdata_path,
'test_dxf_export.qgs')),
"SERVICE": "WMS",
"VERSION": "1.3.0",
"REQUEST": "GetMap",
"BBOX": "399980,449980,400050,450100",
"CRS": "EPSG:3844",
"LAYERS": "test_dxf_export",
"STYLES": ",",
"FORMAT": "application/dxf",
"SCALE": "500",
"FILE_NAME": "test_dxf_export.dxf"
}.items())])
r, h = self._result(self._execute_request(qs))
tempDir = QtCore.QTemporaryDir()
dxf = os.path.join(tempDir.path(), 'test_dxf_export.dxf')
f = open(dxf, 'wb')
f.write(r)
f.close()
vl = QgsVectorLayer(dxf, "lyr", "ogr")
myMessage = ('Expected downloaded dxf contains: %s line\nGot: %s\n' %
(1, vl.featureCount()))
self.assertEqual(vl.featureCount(), 1, myMessage)
line_from_dxf = next(vl.getFeatures()).geometry().asWkt()
line = 'LineString (450000 400000, 450100 400000)'
self.assertEqual(line_from_dxf, line)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/numpy/polynomial/chebyshev.py | 79 | 61966 | """
Objects for dealing with Chebyshev series.
This module provides a number of objects (mostly functions) useful for
dealing with Chebyshev series, including a `Chebyshev` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `chebdomain` -- Chebyshev series default domain, [-1,1].
- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates
identically to 0.
- `chebone` -- (Coefficients of the) Chebyshev series that evaluates
identically to 1.
- `chebx` -- (Coefficients of the) Chebyshev series for the identity map,
``f(x) = x``.
Arithmetic
----------
- `chebadd` -- add two Chebyshev series.
- `chebsub` -- subtract one Chebyshev series from another.
- `chebmul` -- multiply two Chebyshev series.
- `chebdiv` -- divide one Chebyshev series by another.
- `chebpow` -- raise a Chebyshev series to an positive integer power
- `chebval` -- evaluate a Chebyshev series at given points.
- `chebval2d` -- evaluate a 2D Chebyshev series at given points.
- `chebval3d` -- evaluate a 3D Chebyshev series at given points.
- `chebgrid2d` -- evaluate a 2D Chebyshev series on a Cartesian product.
- `chebgrid3d` -- evaluate a 3D Chebyshev series on a Cartesian product.
Calculus
--------
- `chebder` -- differentiate a Chebyshev series.
- `chebint` -- integrate a Chebyshev series.
Misc Functions
--------------
- `chebfromroots` -- create a Chebyshev series with specified roots.
- `chebroots` -- find the roots of a Chebyshev series.
- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials.
- `chebvander2d` -- Vandermonde-like matrix for 2D power series.
- `chebvander3d` -- Vandermonde-like matrix for 3D power series.
- `chebgauss` -- Gauss-Chebyshev quadrature, points and weights.
- `chebweight` -- Chebyshev weight function.
- `chebcompanion` -- symmetrized companion matrix in Chebyshev form.
- `chebfit` -- least-squares fit returning a Chebyshev series.
- `chebpts1` -- Chebyshev points of the first kind.
- `chebpts2` -- Chebyshev points of the second kind.
- `chebtrim` -- trim leading coefficients from a Chebyshev series.
- `chebline` -- Chebyshev series representing given straight line.
- `cheb2poly` -- convert a Chebyshev series to a polynomial.
- `poly2cheb` -- convert a polynomial to a Chebyshev series.
Classes
-------
- `Chebyshev` -- A Chebyshev series class.
See also
--------
`numpy.polynomial`
Notes
-----
The implementations of multiplication, division, integration, and
differentiation use the algebraic identities [1]_:
.. math ::
T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\
z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}.
where
.. math :: x = \\frac{z + z^{-1}}{2}.
These identities allow a Chebyshev series to be expressed as a finite,
symmetric Laurent series. In this module, this sort of Laurent series
is referred to as a "z-series."
References
----------
.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev
Polynomials," *Journal of Statistical Planning and Inference 14*, 2008
(preprint: http://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd',
'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval',
'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots',
'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1',
'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d',
'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion',
'chebgauss', 'chebweight']
chebtrim = pu.trimcoef
#
# A collection of functions for manipulating z-series. These are private
# functions and do minimal error checking.
#
def _cseries_to_zseries(c):
"""Covert Chebyshev series to z-series.
Covert a Chebyshev series to the equivalent z-series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
c : 1-D ndarray
Chebyshev coefficients, ordered from low to high
Returns
-------
zs : 1-D ndarray
Odd length symmetric z-series, ordered from low to high.
"""
n = c.size
zs = np.zeros(2*n-1, dtype=c.dtype)
zs[n-1:] = c/2
return zs + zs[::-1]
def _zseries_to_cseries(zs):
"""Covert z-series to a Chebyshev series.
Covert a z series to the equivalent Chebyshev series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
zs : 1-D ndarray
Odd length symmetric z-series, ordered from low to high.
Returns
-------
c : 1-D ndarray
Chebyshev coefficients, ordered from low to high.
"""
n = (zs.size + 1)//2
c = zs[n-1:].copy()
c[1:n] *= 2
return c
def _zseries_mul(z1, z2):
"""Multiply two z-series.
Multiply two z-series to produce a z-series.
Parameters
----------
z1, z2 : 1-D ndarray
The arrays must be 1-D but this is not checked.
Returns
-------
product : 1-D ndarray
The product z-series.
Notes
-----
This is simply convolution. If symmetric/anti-symmetric z-series are
denoted by S/A then the following rules apply:
S*S, A*A -> S
S*A, A*S -> A
"""
return np.convolve(z1, z2)
def _zseries_div(z1, z2):
"""Divide the first z-series by the second.
Divide `z1` by `z2` and return the quotient and remainder as z-series.
Warning: this implementation only applies when both z1 and z2 have the
same symmetry, which is sufficient for present purposes.
Parameters
----------
z1, z2 : 1-D ndarray
The arrays must be 1-D and have the same symmetry, but this is not
checked.
Returns
-------
(quotient, remainder) : 1-D ndarrays
Quotient and remainder as z-series.
Notes
-----
This is not the same as polynomial division on account of the desired form
of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A
then the following rules apply:
S/S -> S,S
A/A -> S,A
The restriction to types of the same symmetry could be fixed but seems like
unneeded generality. There is no natural form for the remainder in the case
where there is no symmetry.
"""
z1 = z1.copy()
z2 = z2.copy()
len1 = len(z1)
len2 = len(z2)
if len2 == 1:
z1 /= z2
return z1, z1[:1]*0
elif len1 < len2:
return z1[:1]*0, z1
else:
dlen = len1 - len2
scl = z2[0]
z2 /= scl
quo = np.empty(dlen + 1, dtype=z1.dtype)
i = 0
j = dlen
while i < j:
r = z1[i]
quo[i] = z1[i]
quo[dlen - i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
z1[j:j+len2] -= tmp
i += 1
j -= 1
r = z1[i]
quo[i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
quo /= scl
rem = z1[i+1:i-1+len2].copy()
return quo, rem
def _zseries_der(zs):
"""Differentiate a z-series.
The derivative is with respect to x, not z. This is achieved using the
chain rule and the value of dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to differentiate.
Returns
-------
derivative : z-series
The derivative
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
multiplying the value of zs by two also so that the two cancels in the
division.
"""
n = len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs *= np.arange(-n, n+1)*2
d, r = _zseries_div(zs, ns)
return d
def _zseries_int(zs):
"""Integrate a z-series.
The integral is with respect to x, not z. This is achieved by a change
of variable using dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to integrate
Returns
-------
integral : z-series
The indefinite integral
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
dividing the resulting zs by two.
"""
n = 1 + len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs = _zseries_mul(zs, ns)
div = np.arange(-n, n+1)*2
zs[:n] /= div[:n]
zs[n+1:] /= div[n+1:]
zs[n] = 0
return zs
#
# Chebyshev series functions
#
def poly2cheb(pol):
"""
Convert a polynomial to a Chebyshev series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Chebyshev series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Chebyshev
series.
See Also
--------
cheb2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(range(4))
>>> p
Polynomial([ 0., 1., 2., 3.], [-1., 1.])
>>> c = p.convert(kind=P.Chebyshev)
>>> c
Chebyshev([ 1. , 3.25, 1. , 0.75], [-1., 1.])
>>> P.poly2cheb(range(4))
array([ 1. , 3.25, 1. , 0.75])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = chebadd(chebmulx(res), pol[i])
return res
def cheb2poly(c):
"""
Convert a Chebyshev series to a polynomial.
Convert an array representing the coefficients of a Chebyshev series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Chebyshev series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2cheb
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> c = P.Chebyshev(range(4))
>>> c
Chebyshev([ 0., 1., 2., 3.], [-1., 1.])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([ -2., -8., 4., 12.], [-1., 1.])
>>> P.cheb2poly(range(4))
array([ -2., -8., 4., 12.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1)
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Chebyshev default domain.
chebdomain = np.array([-1, 1])
# Chebyshev coefficients representing zero.
chebzero = np.array([0])
# Chebyshev coefficients representing one.
chebone = np.array([1])
# Chebyshev coefficients representing the identity x.
chebx = np.array([0, 1])
def chebline(off, scl):
"""
Chebyshev series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Chebyshev series for
``off + scl*x``.
See Also
--------
polyline
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebline(3,2)
array([3, 2])
>>> C.chebval(-3, C.chebline(3,2)) # should be -3
-3.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def chebfromroots(roots):
"""
Generate a Chebyshev series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Chebyshev form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Chebyshev form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.25, 0. , 0.25])
>>> j = complex(0,1)
>>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.5+0.j, 0.0+0.j, 0.5+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [chebline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [chebmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = chebmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def chebadd(c1, c2):
"""
Add one Chebyshev series to another.
Returns the sum of two Chebyshev series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Chebyshev series of their sum.
See Also
--------
chebsub, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Chebyshev series
is a Chebyshev series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebsub(c1, c2):
"""
Subtract one Chebyshev series from another.
Returns the difference of two Chebyshev series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their difference.
See Also
--------
chebadd, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Chebyshev
series is a Chebyshev series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebsub(c1,c2)
array([-2., 0., 2.])
>>> C.chebsub(c2,c1) # -C.chebsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebmulx(c):
"""Multiply a Chebyshev series by x.
Multiply the polynomial `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
.. versionadded:: 1.5.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
if len(c) > 1:
tmp = c[1:]/2
prd[2:] = tmp
prd[0:-2] += tmp
return prd
def chebmul(c1, c2):
"""
Multiply one Chebyshev series by another.
Returns the product of two Chebyshev series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their product.
See Also
--------
chebadd, chebsub, chebdiv, chebpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Chebyshev polynomial basis set. Thus, to express
the product as a C-series, it is typically necessary to "reproject"
the product onto said basis set, which typically produces
"unintuitive live" (but correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebmul(c1,c2) # multiplication requires "reprojection"
array([ 6.5, 12. , 12. , 4. , 1.5])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
prd = _zseries_mul(z1, z2)
ret = _zseries_to_cseries(prd)
return pu.trimseq(ret)
def chebdiv(c1, c2):
"""
Divide one Chebyshev series by another.
Returns the quotient-with-remainder of two Chebyshev series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Chebyshev series coefficients representing the quotient and
remainder.
See Also
--------
chebadd, chebsub, chebmul, chebpow
Notes
-----
In general, the (polynomial) division of one C-series by another
results in quotient and remainder terms that are not in the Chebyshev
polynomial basis set. Thus, to express these results as C-series, it
is typically necessary to "reproject" the results onto said basis
set, which typically produces "unintuitive" (but correct) results;
see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> C.chebdiv(c2,c1) # neither "intuitive"
(array([ 0., 2.]), array([-2., -4.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
quo, rem = _zseries_div(z1, z2)
quo = pu.trimseq(_zseries_to_cseries(quo))
rem = pu.trimseq(_zseries_to_cseries(rem))
return quo, rem
def chebpow(c, pow, maxpower=16):
"""Raise a Chebyshev series to a power.
Returns the Chebyshev series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.``
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Chebyshev series of power.
See Also
--------
chebadd, chebsub, chebmul, chebdiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
zs = _cseries_to_zseries(c)
prd = zs
for i in range(2, power + 1):
prd = np.convolve(prd, zs)
return _zseries_to_cseries(prd)
def chebder(c, m=1, scl=1, axis=0):
"""
Differentiate a Chebyshev series.
Returns the Chebyshev series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2``
while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) +
2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Chebyshev series of the derivative.
See Also
--------
chebint
Notes
-----
In general, the result of differentiating a C-series needs to be
"reprojected" onto the C-series basis set. Thus, typically, the
result of this function is "unintuitive," albeit correct; see Examples
section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3,4)
>>> C.chebder(c)
array([ 14., 12., 24.])
>>> C.chebder(c,3)
array([ 96.])
>>> C.chebder(c,scl=-1)
array([-14., -12., -24.])
>>> C.chebder(c,2,-1)
array([ 12., 96.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j)*c[j]
c[j - 2] += (j*c[j])/(j - 2)
if n > 1:
der[1] = 4*c[2]
der[0] = c[1]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Chebyshev series.
Returns the Chebyshev series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]]
represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) +
2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
C-series coefficients of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
chebder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a`- perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3)
>>> C.chebint(c)
array([ 0.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,3)
array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667,
0.00625 ])
>>> C.chebint(c, k=3)
array([ 3.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,lbnd=-2)
array([ 8.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,scl=-2)
array([-1., 1., -1., -1.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/4
for j in range(2, n):
t = c[j]/(2*j + 1)
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[j - 1] -= c[j]/(2*(j - 1))
tmp[0] += k[i] - chebval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def chebval(x, c, tensor=True):
"""
Evaluate a Chebyshev series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
chebval2d, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
x2 = 2*x
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
c0 = c[-i] - c1
c1 = tmp + c1*x2
return c0 + c1*x
def chebval2d(x, y, c):
"""
Evaluate a 2-D Chebyshev series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than 2 the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
chebval, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = chebval(x, c)
c = chebval(y, c, tensor=False)
return c
def chebgrid2d(x, y, c):
"""
Evaluate a 2-D Chebyshev series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * T_i(a) * T_j(b),
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
chebval, chebval2d, chebval3d, chebgrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = chebval(x, c)
c = chebval(y, c)
return c
def chebval3d(x, y, z, c):
"""
Evaluate a 3-D Chebyshev series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
chebval, chebval2d, chebgrid2d, chebgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = chebval(x, c)
c = chebval(y, c, tensor=False)
c = chebval(z, c, tensor=False)
return c
def chebgrid3d(x, y, z, c):
"""
Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
chebval, chebval2d, chebgrid2d, chebval3d
Notes
-----
.. versionadded::1.7.0
"""
c = chebval(x, c)
c = chebval(y, c)
c = chebval(z, c)
return c
def chebvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = T_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Chebyshev polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and
``chebval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Chebyshev series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Chebyshev polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries.
v[0] = x*0 + 1
if ideg > 0:
x2 = 2*x
v[1] = x
for i in range(2, ideg + 1):
v[i] = v[i-1]*x2 - v[i-2]
return np.rollaxis(v, 0, v.ndim)
def chebvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = T_i(x) * T_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Chebyshev polynomials.
If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Chebyshev
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
chebvander, chebvander3d. chebval2d, chebval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = chebvander(x, degx)
vy = chebvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def chebvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Chebyshev polynomials.
If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Chebyshev
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
chebvander, chebvander3d. chebval2d, chebval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = chebvander(x, degx)
vy = chebvander(y, degy)
vz = chebvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def chebfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Chebyshev series to data.
Return the coefficients of a Legendre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting series
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Chebyshev coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
polyfit, legfit, lagfit, hermfit, hermefit
chebval : Evaluates a Chebyshev series.
chebvander : Vandermonde matrix of Chebyshev series.
chebweight : Chebyshev weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Chebyshev series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Chebyshev series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = chebvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def chebcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is a Chebyshev basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.array([1.] + [np.sqrt(.5)]*(n-1))
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[0] = np.sqrt(.5)
top[1:] = 1/2
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5
return mat
def chebroots(c):
"""
Compute the roots of a Chebyshev series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * T_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Chebyshev series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.chebyshev as cheb
>>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots
array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = chebcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def chebgauss(deg):
"""
Gauss-Chebyshev quadrature.
Computes the sample points and weights for Gauss-Chebyshev quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1/\sqrt{1 - x^2}`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. For Gauss-Chebyshev there are closed form solutions for
the sample points and weights. If n = `deg`, then
.. math:: x_i = \cos(\pi (2 i - 1) / (2 n))
.. math:: w_i = \pi / n
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg))
w = np.ones(ideg)*(np.pi/ideg)
return x, w
def chebweight(x):
"""
The weight function of the Chebyshev polynomials.
The weight function is :math:`1/\sqrt{1 - x^2}` and the interval of
integration is :math:`[-1, 1]`. The Chebyshev polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded:: 1.7.0
"""
w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x))
return w
def chebpts1(npts):
"""
Chebyshev points of the first kind.
The Chebyshev points of the first kind are the points ``cos(x)``,
where ``x = [pi*(k + .5)/npts for k in range(npts)]``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the first kind.
See Also
--------
chebpts2
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 1:
raise ValueError("npts must be >= 1")
x = np.linspace(-np.pi, 0, _npts, endpoint=False) + np.pi/(2*_npts)
return np.cos(x)
def chebpts2(npts):
"""
Chebyshev points of the second kind.
The Chebyshev points of the second kind are the points ``cos(x)``,
where ``x = [pi*k/(npts - 1) for k in range(npts)]``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the second kind.
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 2:
raise ValueError("npts must be >= 2")
x = np.linspace(-np.pi, 0, _npts)
return np.cos(x)
#
# Chebyshev series class
#
class Chebyshev(ABCPolyBase):
"""A Chebyshev series class.
The Chebyshev class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
methods listed below.
Parameters
----------
coef : array_like
Chebyshev coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(chebadd)
_sub = staticmethod(chebsub)
_mul = staticmethod(chebmul)
_div = staticmethod(chebdiv)
_pow = staticmethod(chebpow)
_val = staticmethod(chebval)
_int = staticmethod(chebint)
_der = staticmethod(chebder)
_fit = staticmethod(chebfit)
_line = staticmethod(chebline)
_roots = staticmethod(chebroots)
_fromroots = staticmethod(chebfromroots)
# Virtual properties
nickname = 'cheb'
domain = np.array(chebdomain)
window = np.array(chebdomain)
| gpl-2.0 |
bbmt-bbmt/MagretUtilQT | Privilege.py | 1 | 1654 | # -*- coding: utf-8 -*-
import win32con
import sys
import os
import ctypes
from ProcessWithLogon import CreateProcessWithLogonW
from ProcessWithLogon import STARTUPINFO
from win32com.shell.shell import ShellExecuteEx
from win32com.shell import shellcon
import logging
log = logging.getLogger(__name__)
def get_privilege(login, password, domaine=None, uac=False):
log.info("get privilege lancé")
lpStartupInfo = STARTUPINFO()
lpStartupInfo.cb = ctypes.sizeof(STARTUPINFO)
lpStartupInfo.lpReserved = 0
lpStartupInfo.lpDesktop = 0
lpStartupInfo.lpTitle = 0 # ctypes.c_wchar_p('mon titre')
lpStartupInfo.dwFlags = 0 # win32con.STARTF_USESHOWWINDOW
lpStartupInfo.cbReserved2 = 0
lpStartupInfo.lpReserved2 = 0
lpStartupInfo.wShowWindow = win32con.SW_HIDE
pass_uac_str = ''
if uac:
pass_uac_str = 'pass_uac'
lpStartupInfo.dwFlags = win32con.STARTF_USESHOWWINDOW
# cmd = 'cmd.exe /C "cd /D \"%s\" & \"%s\" %s"' % (os.getcwd(), sys.argv[0], pass_uac)
name = sys.argv[0].split('\\')[-1]
path = os.getcwd()
# changer de repertoire permet de lancer pushd sans erreur
os.chdir("c:\\")
# new_cmd = 'cmd /C "pushd %s && \"%s\" %s"' % (path, name, pass_uac)
new_cmd = name + " " + path + " " + pass_uac_str
log.debug("get privilege cmd %s" % new_cmd)
CreateProcessWithLogonW(login, domaine, password, 0, None,
new_cmd, lpStartupInfo=lpStartupInfo)
def pass_uac():
log.info("pass uac lancé")
ShellExecuteEx(nShow=win32con.SW_SHOWNORMAL, fMask=shellcon.SEE_MASK_NOCLOSEPROCESS, lpVerb='runas', lpFile=sys.argv[0])
return
| gpl-3.0 |
thomasgilgenast/spqr-nonrel | django/contrib/gis/gdal/base.py | 398 | 1143 | from ctypes import c_void_p
from types import NoneType
from django.contrib.gis.gdal.error import GDALException
class GDALBase(object):
"""
Base object for GDAL objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr: return self._ptr
else: raise GDALException('GDAL %s pointer no longer valid.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if isinstance(ptr, (int, long)):
self._ptr = self.ptr_type(ptr)
elif isinstance(ptr, (self.ptr_type, NoneType)):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
ptr = property(_get_ptr, _set_ptr)
| bsd-3-clause |
electricimp/otp | lib/asn1/test/asn1_SUITE_data/BitStr.py | 97 | 1813 | BitStr DEFINITIONS ::=
BEGIN
-- F.2.5.1
-- Use a bit string type to model binary data whose format and
-- length are unspecified,
-- or specified elsewhere, and whose length in bits is not necessarily
-- a multiple of eight.
-- EXAMPLE
G3FacsimilePage ::= BIT STRING
-- a sequence of bits conforming to Recommendation T.4.
image G3FacsimilePage ::= '100110100100001110110'B
trailer BIT STRING ::= '0123456789ABCDEF'H
body1 G3FacsimilePage ::= '1101'B
body2 G3FacsimilePage ::= '1101000'B
-- F.2.5.2
-- Use a bit string type with a size constraint to model the
-- values of a fixed sized bit field.
-- EXAMPLE
BitField ::= BIT STRING (SIZE (12))
map1 BitField ::= '100110100100'B
map2 BitField ::= '9A4'H
map3 BitField ::= '1001101001'B -- Illegal - violates size constraint
-- F.2.5.3
-- Use a bit string type to model the values of a bit map, an
-- ordered collection of logical variables
-- indicating whether a particular condition holds for each of a
-- correspondingly ordered collection of objects.
DaysOfTheWeek ::= BIT STRING {
sunday(0), monday (1), tuesday(2),
wednesday(3), thursday(4), friday(5),
saturday(6) } (SIZE (0..7))
sunnyDaysLastWeek1 DaysOfTheWeek ::= {sunday, monday, wednesday}
sunnyDaysLastWeek2 DaysOfTheWeek ::= '1101'B
sunnyDaysLastWeek3 DaysOfTheWeek ::= '1101000'B
sunnyDaysLastWeek4 DaysOfTheWeek ::= '11010000'B -- Illegal - violates size constraint
-- F.2.5.5
-- Use a bit string type with named bits to model the values of a
-- collection of related logical variables.
-- EXAMPLE
PersonalStatus ::= BIT STRING
{married(0), employed(1), veteran(2), collegeGraduate(3)}
billClinton PersonalStatus ::= {married, employed, collegeGraduate}
hillaryClinton PersonalStatus ::= '110100'B
END
| apache-2.0 |
naparuba/shinken | test/test_srv_badhost.py | 18 | 1321 | #!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestServiceWhithBadHost(ShinkenTest):
def setUp(self):
try:
self.setup_with_file('etc/shinken_srv_badhost.cfg')
except AttributeError:
pass
# Nagios allow service with no host to exist, it will just drop them
def test_ServiceWhitNoHost(self):
self.assertEqual(False, self.conf.conf_is_correct)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
2ndQuadrant/ansible | lib/ansible/modules/network/fortimanager/fmgr_fwobj_vip.py | 39 | 125192 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_fwobj_vip
version_added: "2.8"
notes:
- Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/).
author:
- Luke Weighall (@lweighall)
- Andrew Welsh (@Ghilli3)
- Jim Huber (@p4r4n0y1ng)
short_description: Manages Virtual IPs objects in FortiManager
description:
- Manages Virtual IP objects in FortiManager for IPv4
options:
adom:
description:
- The ADOM the configuration should belong to.
required: false
default: root
mode:
description:
- Sets one of three modes for managing the object.
- Allows use of soft-adds instead of overwriting existing values
choices: ['add', 'set', 'delete', 'update']
required: false
default: add
websphere_server:
description:
- Enable to add an HTTP header to indicate SSL offloading for a WebSphere server.
- choice | disable | Do not add HTTP header indicating SSL offload for WebSphere server.
- choice | enable | Add HTTP header indicating SSL offload for WebSphere server.
required: false
choices: ["disable", "enable"]
weblogic_server:
description:
- Enable to add an HTTP header to indicate SSL offloading for a WebLogic server.
- choice | disable | Do not add HTTP header indicating SSL offload for WebLogic server.
- choice | enable | Add HTTP header indicating SSL offload for WebLogic server.
required: false
choices: ["disable", "enable"]
type:
description:
- Configure a static NAT, load balance, server load balance, DNS translation, or FQDN VIP.
- choice | static-nat | Static NAT.
- choice | load-balance | Load balance.
- choice | server-load-balance | Server load balance.
- choice | dns-translation | DNS translation.
- choice | fqdn | FQDN Translation
required: false
choices: ["static-nat", "load-balance", "server-load-balance", "dns-translation", "fqdn"]
ssl_server_session_state_type:
description:
- How to expire SSL sessions for the segment of the SSL connection between the server and the FortiGate.
- choice | disable | Do not keep session states.
- choice | time | Expire session states after this many minutes.
- choice | count | Expire session states when this maximum is reached.
- choice | both | Expire session states based on time or count, whichever occurs first.
required: false
choices: ["disable", "time", "count", "both"]
ssl_server_session_state_timeout:
description:
- Number of minutes to keep FortiGate to Server SSL session state.
required: false
ssl_server_session_state_max:
description:
- Maximum number of FortiGate to Server SSL session states to keep.
required: false
ssl_server_min_version:
description:
- Lowest SSL/TLS version acceptable from a server. Use the client setting by default.
- choice | ssl-3.0 | SSL 3.0.
- choice | tls-1.0 | TLS 1.0.
- choice | tls-1.1 | TLS 1.1.
- choice | tls-1.2 | TLS 1.2.
- choice | client | Use same value as client configuration.
required: false
choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2", "client"]
ssl_server_max_version:
description:
- Highest SSL/TLS version acceptable from a server. Use the client setting by default.
- choice | ssl-3.0 | SSL 3.0.
- choice | tls-1.0 | TLS 1.0.
- choice | tls-1.1 | TLS 1.1.
- choice | tls-1.2 | TLS 1.2.
- choice | client | Use same value as client configuration.
required: false
choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2", "client"]
ssl_server_algorithm:
description:
- Permitted encryption algorithms for the server side of SSL full mode sessions according to encryption strength
- choice | high | High encryption. Allow only AES and ChaCha.
- choice | low | Low encryption. Allow AES, ChaCha, 3DES, RC4, and DES.
- choice | medium | Medium encryption. Allow AES, ChaCha, 3DES, and RC4.
- choice | custom | Custom encryption. Use ssl-server-cipher-suites to select the cipher suites that are allowed.
- choice | client | Use the same encryption algorithms for both client and server sessions.
required: false
choices: ["high", "low", "medium", "custom", "client"]
ssl_send_empty_frags:
description:
- Enable/disable sending empty fragments to avoid CBC IV attacks (SSL 3.0 & TLS 1.0 only).
- choice | disable | Do not send empty fragments.
- choice | enable | Send empty fragments.
required: false
choices: ["disable", "enable"]
ssl_pfs:
description:
- Select the cipher suites that can be used for SSL perfect forward secrecy (PFS).
- choice | require | Allow only Diffie-Hellman cipher-suites, so PFS is applied.
- choice | deny | Allow only non-Diffie-Hellman cipher-suites, so PFS is not applied.
- choice | allow | Allow use of any cipher suite so PFS may or may not be used depending on the cipher suite
required: false
choices: ["require", "deny", "allow"]
ssl_mode:
description:
- Apply SSL offloading mode
- choice | half | Client to FortiGate SSL.
- choice | full | Client to FortiGate and FortiGate to Server SSL.
required: false
choices: ["half", "full"]
ssl_min_version:
description:
- Lowest SSL/TLS version acceptable from a client.
- choice | ssl-3.0 | SSL 3.0.
- choice | tls-1.0 | TLS 1.0.
- choice | tls-1.1 | TLS 1.1.
- choice | tls-1.2 | TLS 1.2.
required: false
choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]
ssl_max_version:
description:
- Highest SSL/TLS version acceptable from a client.
- choice | ssl-3.0 | SSL 3.0.
- choice | tls-1.0 | TLS 1.0.
- choice | tls-1.1 | TLS 1.1.
- choice | tls-1.2 | TLS 1.2.
required: false
choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]
ssl_http_match_host:
description:
- Enable/disable HTTP host matching for location conversion.
- choice | disable | Do not match HTTP host.
- choice | enable | Match HTTP host in response header.
required: false
choices: ["disable", "enable"]
ssl_http_location_conversion:
description:
- Enable to replace HTTP with HTTPS in the reply's Location HTTP header field.
- choice | disable | Disable HTTP location conversion.
- choice | enable | Enable HTTP location conversion.
required: false
choices: ["disable", "enable"]
ssl_hsts_include_subdomains:
description:
- Indicate that HSTS header applies to all subdomains.
- choice | disable | HSTS header does not apply to subdomains.
- choice | enable | HSTS header applies to subdomains.
required: false
choices: ["disable", "enable"]
ssl_hsts_age:
description:
- Number of seconds the client should honour the HSTS setting.
required: false
ssl_hsts:
description:
- Enable/disable including HSTS header in response.
- choice | disable | Do not add a HSTS header to each a HTTP response.
- choice | enable | Add a HSTS header to each HTTP response.
required: false
choices: ["disable", "enable"]
ssl_hpkp_report_uri:
description:
- URL to report HPKP violations to.
required: false
ssl_hpkp_primary:
description:
- Certificate to generate primary HPKP pin from.
required: false
ssl_hpkp_include_subdomains:
description:
- Indicate that HPKP header applies to all subdomains.
- choice | disable | HPKP header does not apply to subdomains.
- choice | enable | HPKP header applies to subdomains.
required: false
choices: ["disable", "enable"]
ssl_hpkp_backup:
description:
- Certificate to generate backup HPKP pin from.
required: false
ssl_hpkp_age:
description:
- Number of seconds the client should honour the HPKP setting.
required: false
ssl_hpkp:
description:
- Enable/disable including HPKP header in response.
- choice | disable | Do not add a HPKP header to each HTTP response.
- choice | enable | Add a HPKP header to each a HTTP response.
- choice | report-only | Add a HPKP Report-Only header to each HTTP response.
required: false
choices: ["disable", "enable", "report-only"]
ssl_dh_bits:
description:
- Number of bits to use in the Diffie-Hellman exchange for RSA encryption of SSL sessions.
- choice | 768 | 768-bit Diffie-Hellman prime.
- choice | 1024 | 1024-bit Diffie-Hellman prime.
- choice | 1536 | 1536-bit Diffie-Hellman prime.
- choice | 2048 | 2048-bit Diffie-Hellman prime.
- choice | 3072 | 3072-bit Diffie-Hellman prime.
- choice | 4096 | 4096-bit Diffie-Hellman prime.
required: false
choices: ["768", "1024", "1536", "2048", "3072", "4096"]
ssl_client_session_state_type:
description:
- How to expire SSL sessions for the segment of the SSL connection between the client and the FortiGate.
- choice | disable | Do not keep session states.
- choice | time | Expire session states after this many minutes.
- choice | count | Expire session states when this maximum is reached.
- choice | both | Expire session states based on time or count, whichever occurs first.
required: false
choices: ["disable", "time", "count", "both"]
ssl_client_session_state_timeout:
description:
- Number of minutes to keep client to FortiGate SSL session state.
required: false
ssl_client_session_state_max:
description:
- Maximum number of client to FortiGate SSL session states to keep.
required: false
ssl_client_renegotiation:
description:
- Allow, deny, or require secure renegotiation of client sessions to comply with RFC 5746.
- choice | deny | Abort any client initiated SSL re-negotiation attempt.
- choice | allow | Allow a SSL client to renegotiate.
- choice | secure | Abort any client initiated SSL re-negotiation attempt that does not use RFC 5746.
required: false
choices: ["deny", "allow", "secure"]
ssl_client_fallback:
description:
- Enable/disable support for preventing Downgrade Attacks on client connections (RFC 7507).
- choice | disable | Disable.
- choice | enable | Enable.
required: false
choices: ["disable", "enable"]
ssl_certificate:
description:
- The name of the SSL certificate to use for SSL acceleration.
required: false
ssl_algorithm:
description:
- Permitted encryption algorithms for SSL sessions according to encryption strength.
- choice | high | High encryption. Allow only AES and ChaCha.
- choice | medium | Medium encryption. Allow AES, ChaCha, 3DES, and RC4.
- choice | low | Low encryption. Allow AES, ChaCha, 3DES, RC4, and DES.
- choice | custom | Custom encryption. Use config ssl-cipher-suites to select the cipher suites that are allowed.
required: false
choices: ["high", "medium", "low", "custom"]
srcintf_filter:
description:
- Interfaces to which the VIP applies. Separate the names with spaces.
required: false
src_filter:
description:
- Source address filter. Each address must be either an IP/subnet (x.x.x.x/n) or a range (x.x.x.x-y.y.y.y).
- Separate addresses with spaces.
required: false
service:
description:
- Service name.
required: false
server_type:
description:
- Protocol to be load balanced by the virtual server (also called the server load balance virtual IP).
- choice | http | HTTP
- choice | https | HTTPS
- choice | ssl | SSL
- choice | tcp | TCP
- choice | udp | UDP
- choice | ip | IP
- choice | imaps | IMAPS
- choice | pop3s | POP3S
- choice | smtps | SMTPS
required: false
choices: ["http", "https", "ssl", "tcp", "udp", "ip", "imaps", "pop3s", "smtps"]
protocol:
description:
- Protocol to use when forwarding packets.
- choice | tcp | TCP.
- choice | udp | UDP.
- choice | sctp | SCTP.
- choice | icmp | ICMP.
required: false
choices: ["tcp", "udp", "sctp", "icmp"]
portmapping_type:
description:
- Port mapping type.
- choice | 1-to-1 | One to one.
- choice | m-to-n | Many to many.
required: false
choices: ["1-to-1", "m-to-n"]
portforward:
description:
- Enable/disable port forwarding.
- choice | disable | Disable port forward.
- choice | enable | Enable port forward.
required: false
choices: ["disable", "enable"]
persistence:
description:
- Configure how to make sure that clients connect to the same server every time they make a request that is part
- of the same session.
- choice | none | None.
- choice | http-cookie | HTTP cookie.
- choice | ssl-session-id | SSL session ID.
required: false
choices: ["none", "http-cookie", "ssl-session-id"]
outlook_web_access:
description:
- Enable to add the Front-End-Https header for Microsoft Outlook Web Access.
- choice | disable | Disable Outlook Web Access support.
- choice | enable | Enable Outlook Web Access support.
required: false
choices: ["disable", "enable"]
nat_source_vip:
description:
- Enable to prevent unintended servers from using a virtual IP.
- Disable to use the actual IP address of the server as the source address.
- choice | disable | Do not force to NAT as VIP.
- choice | enable | Force to NAT as VIP.
required: false
choices: ["disable", "enable"]
name:
description:
- Virtual IP name.
required: false
monitor:
description:
- Name of the health check monitor to use when polling to determine a virtual server's connectivity status.
required: false
max_embryonic_connections:
description:
- Maximum number of incomplete connections.
required: false
mappedport:
description:
- Port number range on the destination network to which the external port number range is mapped.
required: false
mappedip:
description:
- IP address or address range on the destination network to which the external IP address is mapped.
required: false
mapped_addr:
description:
- Mapped FQDN address name.
required: false
ldb_method:
description:
- Method used to distribute sessions to real servers.
- choice | static | Distribute to server based on source IP.
- choice | round-robin | Distribute to server based round robin order.
- choice | weighted | Distribute to server based on weight.
- choice | least-session | Distribute to server with lowest session count.
- choice | least-rtt | Distribute to server with lowest Round-Trip-Time.
- choice | first-alive | Distribute to the first server that is alive.
- choice | http-host | Distribute to server based on host field in HTTP header.
required: false
choices: ["static", "round-robin", "weighted", "least-session", "least-rtt", "first-alive", "http-host"]
https_cookie_secure:
description:
- Enable/disable verification that inserted HTTPS cookies are secure.
- choice | disable | Do not mark cookie as secure, allow sharing between an HTTP and HTTPS connection.
- choice | enable | Mark inserted cookie as secure, cookie can only be used for HTTPS a connection.
required: false
choices: ["disable", "enable"]
http_multiplex:
description:
- Enable/disable HTTP multiplexing.
- choice | disable | Disable HTTP session multiplexing.
- choice | enable | Enable HTTP session multiplexing.
required: false
choices: ["disable", "enable"]
http_ip_header_name:
description:
- For HTTP multiplexing, enter a custom HTTPS header name. The orig client IP address is added to this header.
- If empty, X-Forwarded-For is used.
required: false
http_ip_header:
description:
- For HTTP multiplexing, enable to add the original client IP address in the XForwarded-For HTTP header.
- choice | disable | Disable adding HTTP header.
- choice | enable | Enable adding HTTP header.
required: false
choices: ["disable", "enable"]
http_cookie_share:
description:
- Control sharing of cookies across virtual servers. same-ip means a cookie from one virtual server can be used
- by another. Disable stops cookie sharing.
- choice | disable | Only allow HTTP cookie to match this virtual server.
- choice | same-ip | Allow HTTP cookie to match any virtual server with same IP.
required: false
choices: ["disable", "same-ip"]
http_cookie_path:
description:
- Limit HTTP cookie persistence to the specified path.
required: false
http_cookie_generation:
description:
- Generation of HTTP cookie to be accepted. Changing invalidates all existing cookies.
required: false
http_cookie_domain_from_host:
description:
- Enable/disable use of HTTP cookie domain from host field in HTTP.
- choice | disable | Disable use of HTTP cookie domain from host field in HTTP (use http-cooke-domain setting).
- choice | enable | Enable use of HTTP cookie domain from host field in HTTP.
required: false
choices: ["disable", "enable"]
http_cookie_domain:
description:
- Domain that HTTP cookie persistence should apply to.
required: false
http_cookie_age:
description:
- Time in minutes that client web browsers should keep a cookie. Default is 60 seconds. 0 = no time limit.
required: false
gratuitous_arp_interval:
description:
- Enable to have the VIP send gratuitous ARPs. 0=disabled. Set from 5 up to 8640000 seconds to enable.
required: false
extport:
description:
- Incoming port number range that you want to map to a port number range on the destination network.
required: false
extip:
description:
- IP address or address range on the external interface that you want to map to an address or address range on t
- he destination network.
required: false
extintf:
description:
- Interface connected to the source network that receives the packets that will be forwarded to the destination
- network.
required: false
extaddr:
description:
- External FQDN address name.
required: false
dns_mapping_ttl:
description:
- DNS mapping TTL (Set to zero to use TTL in DNS response, default = 0).
required: false
comment:
description:
- Comment.
required: false
color:
description:
- Color of icon on the GUI.
required: false
arp_reply:
description:
- Enable to respond to ARP requests for this virtual IP address. Enabled by default.
- choice | disable | Disable ARP reply.
- choice | enable | Enable ARP reply.
required: false
choices: ["disable", "enable"]
dynamic_mapping:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
dynamic_mapping_arp_reply:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
required: false
choices: ["disable", "enable"]
dynamic_mapping_color:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_comment:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_dns_mapping_ttl:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_extaddr:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_extintf:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_extip:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_extport:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_gratuitous_arp_interval:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_http_cookie_age:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_http_cookie_domain:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_http_cookie_domain_from_host:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
required: false
choices: ["disable", "enable"]
dynamic_mapping_http_cookie_generation:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_http_cookie_path:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_http_cookie_share:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | same-ip |
required: false
choices: ["disable", "same-ip"]
dynamic_mapping_http_ip_header:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
required: false
choices: ["disable", "enable"]
dynamic_mapping_http_ip_header_name:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_http_multiplex:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
required: false
choices: ["disable", "enable"]
dynamic_mapping_https_cookie_secure:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
required: false
choices: ["disable", "enable"]
dynamic_mapping_ldb_method:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | static |
- choice | round-robin |
- choice | weighted |
- choice | least-session |
- choice | least-rtt |
- choice | first-alive |
- choice | http-host |
required: false
choices: ["static", "round-robin", "weighted", "least-session", "least-rtt", "first-alive", "http-host"]
dynamic_mapping_mapped_addr:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_mappedip:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_mappedport:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_max_embryonic_connections:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_monitor:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_nat_source_vip:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
required: false
choices: ["disable", "enable"]
dynamic_mapping_outlook_web_access:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
required: false
choices: ["disable", "enable"]
dynamic_mapping_persistence:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | none |
- choice | http-cookie |
- choice | ssl-session-id |
required: false
choices: ["none", "http-cookie", "ssl-session-id"]
dynamic_mapping_portforward:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
required: false
choices: ["disable", "enable"]
dynamic_mapping_portmapping_type:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | 1-to-1 |
- choice | m-to-n |
required: false
choices: ["1-to-1", "m-to-n"]
dynamic_mapping_protocol:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | tcp |
- choice | udp |
- choice | sctp |
- choice | icmp |
required: false
choices: ["tcp", "udp", "sctp", "icmp"]
dynamic_mapping_server_type:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | http |
- choice | https |
- choice | ssl |
- choice | tcp |
- choice | udp |
- choice | ip |
- choice | imaps |
- choice | pop3s |
- choice | smtps |
required: false
choices: ["http", "https", "ssl", "tcp", "udp", "ip", "imaps", "pop3s", "smtps"]
dynamic_mapping_service:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_src_filter:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_srcintf_filter:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_ssl_algorithm:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | high |
- choice | medium |
- choice | low |
- choice | custom |
required: false
choices: ["high", "medium", "low", "custom"]
dynamic_mapping_ssl_certificate:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_ssl_client_fallback:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
required: false
choices: ["disable", "enable"]
dynamic_mapping_ssl_client_renegotiation:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | deny |
- choice | allow |
- choice | secure |
required: false
choices: ["deny", "allow", "secure"]
dynamic_mapping_ssl_client_session_state_max:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_ssl_client_session_state_timeout:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_ssl_client_session_state_type:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | time |
- choice | count |
- choice | both |
required: false
choices: ["disable", "time", "count", "both"]
dynamic_mapping_ssl_dh_bits:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | 768 |
- choice | 1024 |
- choice | 1536 |
- choice | 2048 |
- choice | 3072 |
- choice | 4096 |
required: false
choices: ["768", "1024", "1536", "2048", "3072", "4096"]
dynamic_mapping_ssl_hpkp:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
- choice | report-only |
required: false
choices: ["disable", "enable", "report-only"]
dynamic_mapping_ssl_hpkp_age:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_ssl_hpkp_backup:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_ssl_hpkp_include_subdomains:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
required: false
choices: ["disable", "enable"]
dynamic_mapping_ssl_hpkp_primary:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_ssl_hpkp_report_uri:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_ssl_hsts:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
required: false
choices: ["disable", "enable"]
dynamic_mapping_ssl_hsts_age:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_ssl_hsts_include_subdomains:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
required: false
choices: ["disable", "enable"]
dynamic_mapping_ssl_http_location_conversion:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
required: false
choices: ["disable", "enable"]
dynamic_mapping_ssl_http_match_host:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
required: false
choices: ["disable", "enable"]
dynamic_mapping_ssl_max_version:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | ssl-3.0 |
- choice | tls-1.0 |
- choice | tls-1.1 |
- choice | tls-1.2 |
required: false
choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]
dynamic_mapping_ssl_min_version:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | ssl-3.0 |
- choice | tls-1.0 |
- choice | tls-1.1 |
- choice | tls-1.2 |
required: false
choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]
dynamic_mapping_ssl_mode:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | half |
- choice | full |
required: false
choices: ["half", "full"]
dynamic_mapping_ssl_pfs:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | require |
- choice | deny |
- choice | allow |
required: false
choices: ["require", "deny", "allow"]
dynamic_mapping_ssl_send_empty_frags:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
required: false
choices: ["disable", "enable"]
dynamic_mapping_ssl_server_algorithm:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | high |
- choice | low |
- choice | medium |
- choice | custom |
- choice | client |
required: false
choices: ["high", "low", "medium", "custom", "client"]
dynamic_mapping_ssl_server_max_version:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | ssl-3.0 |
- choice | tls-1.0 |
- choice | tls-1.1 |
- choice | tls-1.2 |
- choice | client |
required: false
choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2", "client"]
dynamic_mapping_ssl_server_min_version:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | ssl-3.0 |
- choice | tls-1.0 |
- choice | tls-1.1 |
- choice | tls-1.2 |
- choice | client |
required: false
choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2", "client"]
dynamic_mapping_ssl_server_session_state_max:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_ssl_server_session_state_timeout:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_ssl_server_session_state_type:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | time |
- choice | count |
- choice | both |
required: false
choices: ["disable", "time", "count", "both"]
dynamic_mapping_type:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | static-nat |
- choice | load-balance |
- choice | server-load-balance |
- choice | dns-translation |
- choice | fqdn |
required: false
choices: ["static-nat", "load-balance", "server-load-balance", "dns-translation", "fqdn"]
dynamic_mapping_weblogic_server:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
required: false
choices: ["disable", "enable"]
dynamic_mapping_websphere_server:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
required: false
choices: ["disable", "enable"]
dynamic_mapping_realservers_client_ip:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_realservers_healthcheck:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | disable |
- choice | enable |
- choice | vip |
required: false
choices: ["disable", "enable", "vip"]
dynamic_mapping_realservers_holddown_interval:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_realservers_http_host:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_realservers_ip:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_realservers_max_connections:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_realservers_monitor:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_realservers_port:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_realservers_seq:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_realservers_status:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | active |
- choice | standby |
- choice | disable |
required: false
choices: ["active", "standby", "disable"]
dynamic_mapping_realservers_weight:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
required: false
dynamic_mapping_ssl_cipher_suites_cipher:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- choice | TLS-RSA-WITH-RC4-128-MD5 |
- choice | TLS-RSA-WITH-RC4-128-SHA |
- choice | TLS-RSA-WITH-DES-CBC-SHA |
- choice | TLS-RSA-WITH-3DES-EDE-CBC-SHA |
- choice | TLS-RSA-WITH-AES-128-CBC-SHA |
- choice | TLS-RSA-WITH-AES-256-CBC-SHA |
- choice | TLS-RSA-WITH-AES-128-CBC-SHA256 |
- choice | TLS-RSA-WITH-AES-256-CBC-SHA256 |
- choice | TLS-RSA-WITH-CAMELLIA-128-CBC-SHA |
- choice | TLS-RSA-WITH-CAMELLIA-256-CBC-SHA |
- choice | TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256 |
- choice | TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256 |
- choice | TLS-RSA-WITH-SEED-CBC-SHA |
- choice | TLS-RSA-WITH-ARIA-128-CBC-SHA256 |
- choice | TLS-RSA-WITH-ARIA-256-CBC-SHA384 |
- choice | TLS-DHE-RSA-WITH-DES-CBC-SHA |
- choice | TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA |
- choice | TLS-DHE-RSA-WITH-AES-128-CBC-SHA |
- choice | TLS-DHE-RSA-WITH-AES-256-CBC-SHA |
- choice | TLS-DHE-RSA-WITH-AES-128-CBC-SHA256 |
- choice | TLS-DHE-RSA-WITH-AES-256-CBC-SHA256 |
- choice | TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA |
- choice | TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA |
- choice | TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256 |
- choice | TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256 |
- choice | TLS-DHE-RSA-WITH-SEED-CBC-SHA |
- choice | TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256 |
- choice | TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384 |
- choice | TLS-ECDHE-RSA-WITH-RC4-128-SHA |
- choice | TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA |
- choice | TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA |
- choice | TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA |
- choice | TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256 |
- choice | TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256 |
- choice | TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256 |
- choice | TLS-DHE-RSA-WITH-AES-128-GCM-SHA256 |
- choice | TLS-DHE-RSA-WITH-AES-256-GCM-SHA384 |
- choice | TLS-DHE-DSS-WITH-AES-128-CBC-SHA |
- choice | TLS-DHE-DSS-WITH-AES-256-CBC-SHA |
- choice | TLS-DHE-DSS-WITH-AES-128-CBC-SHA256 |
- choice | TLS-DHE-DSS-WITH-AES-128-GCM-SHA256 |
- choice | TLS-DHE-DSS-WITH-AES-256-CBC-SHA256 |
- choice | TLS-DHE-DSS-WITH-AES-256-GCM-SHA384 |
- choice | TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256 |
- choice | TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256 |
- choice | TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384 |
- choice | TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384 |
- choice | TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA |
- choice | TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256 |
- choice | TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256 |
- choice | TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384 |
- choice | TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384 |
- choice | TLS-RSA-WITH-AES-128-GCM-SHA256 |
- choice | TLS-RSA-WITH-AES-256-GCM-SHA384 |
- choice | TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA |
- choice | TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA |
- choice | TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256 |
- choice | TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256 |
- choice | TLS-DHE-DSS-WITH-SEED-CBC-SHA |
- choice | TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256 |
- choice | TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384 |
- choice | TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256 |
- choice | TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384 |
- choice | TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256 |
- choice | TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384 |
- choice | TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA |
- choice | TLS-DHE-DSS-WITH-DES-CBC-SHA |
required: false
choices: ["TLS-RSA-WITH-RC4-128-MD5",
"TLS-RSA-WITH-RC4-128-SHA",
"TLS-RSA-WITH-DES-CBC-SHA",
"TLS-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-RSA-WITH-AES-128-CBC-SHA",
"TLS-RSA-WITH-AES-256-CBC-SHA",
"TLS-RSA-WITH-AES-128-CBC-SHA256",
"TLS-RSA-WITH-AES-256-CBC-SHA256",
"TLS-RSA-WITH-CAMELLIA-128-CBC-SHA",
"TLS-RSA-WITH-CAMELLIA-256-CBC-SHA",
"TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256",
"TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256",
"TLS-RSA-WITH-SEED-CBC-SHA",
"TLS-RSA-WITH-ARIA-128-CBC-SHA256",
"TLS-RSA-WITH-ARIA-256-CBC-SHA384",
"TLS-DHE-RSA-WITH-DES-CBC-SHA",
"TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-DHE-RSA-WITH-AES-128-CBC-SHA",
"TLS-DHE-RSA-WITH-AES-256-CBC-SHA",
"TLS-DHE-RSA-WITH-AES-128-CBC-SHA256",
"TLS-DHE-RSA-WITH-AES-256-CBC-SHA256",
"TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA",
"TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA",
"TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256",
"TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256",
"TLS-DHE-RSA-WITH-SEED-CBC-SHA",
"TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256",
"TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384",
"TLS-ECDHE-RSA-WITH-RC4-128-SHA",
"TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA",
"TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA",
"TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256",
"TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256",
"TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256",
"TLS-DHE-RSA-WITH-AES-128-GCM-SHA256",
"TLS-DHE-RSA-WITH-AES-256-GCM-SHA384",
"TLS-DHE-DSS-WITH-AES-128-CBC-SHA",
"TLS-DHE-DSS-WITH-AES-256-CBC-SHA",
"TLS-DHE-DSS-WITH-AES-128-CBC-SHA256",
"TLS-DHE-DSS-WITH-AES-128-GCM-SHA256",
"TLS-DHE-DSS-WITH-AES-256-CBC-SHA256",
"TLS-DHE-DSS-WITH-AES-256-GCM-SHA384",
"TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256",
"TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256",
"TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384",
"TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384",
"TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA",
"TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256",
"TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256",
"TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384",
"TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384",
"TLS-RSA-WITH-AES-128-GCM-SHA256",
"TLS-RSA-WITH-AES-256-GCM-SHA384",
"TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA",
"TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA",
"TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256",
"TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256",
"TLS-DHE-DSS-WITH-SEED-CBC-SHA",
"TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256",
"TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384",
"TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256",
"TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384",
"TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256",
"TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384",
"TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA",
"TLS-DHE-DSS-WITH-DES-CBC-SHA"]
dynamic_mapping_ssl_cipher_suites_versions:
description:
- Dynamic Mapping Version of Suffixed Option Name. Sub-Table. Same Descriptions as Parent.
- FLAG Based Options. Specify multiple in list form.
- flag | ssl-3.0 |
- flag | tls-1.0 |
- flag | tls-1.1 |
- flag | tls-1.2 |
required: false
choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]
realservers:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
realservers_client_ip:
description:
- Only clients in this IP range can connect to this real server.
required: false
realservers_healthcheck:
description:
- Enable to check the responsiveness of the real server before forwarding traffic.
- choice | disable | Disable per server health check.
- choice | enable | Enable per server health check.
- choice | vip | Use health check defined in VIP.
required: false
choices: ["disable", "enable", "vip"]
realservers_holddown_interval:
description:
- Time in seconds that the health check monitor monitors an unresponsive server that should be active.
required: false
realservers_http_host:
description:
- HTTP server domain name in HTTP header.
required: false
realservers_ip:
description:
- IP address of the real server.
required: false
realservers_max_connections:
description:
- Max number of active connections that can be directed to the real server. When reached, sessions are sent to
- their real servers.
required: false
realservers_monitor:
description:
- Name of the health check monitor to use when polling to determine a virtual server's connectivity status.
required: false
realservers_port:
description:
- Port for communicating with the real server. Required if port forwarding is enabled.
required: false
realservers_seq:
description:
- Real Server Sequence Number
required: false
realservers_status:
description:
- Set the status of the real server to active so that it can accept traffic.
- Or on standby or disabled so no traffic is sent.
- choice | active | Server status active.
- choice | standby | Server status standby.
- choice | disable | Server status disable.
required: false
choices: ["active", "standby", "disable"]
realservers_weight:
description:
- Weight of the real server. If weighted load balancing is enabled, the server with the highest weight gets more
- connections.
required: false
ssl_cipher_suites:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
ssl_cipher_suites_cipher:
description:
- Cipher suite name.
- choice | TLS-RSA-WITH-RC4-128-MD5 | Cipher suite TLS-RSA-WITH-RC4-128-MD5.
- choice | TLS-RSA-WITH-RC4-128-SHA | Cipher suite TLS-RSA-WITH-RC4-128-SHA.
- choice | TLS-RSA-WITH-DES-CBC-SHA | Cipher suite TLS-RSA-WITH-DES-CBC-SHA.
- choice | TLS-RSA-WITH-3DES-EDE-CBC-SHA | Cipher suite TLS-RSA-WITH-3DES-EDE-CBC-SHA.
- choice | TLS-RSA-WITH-AES-128-CBC-SHA | Cipher suite TLS-RSA-WITH-AES-128-CBC-SHA.
- choice | TLS-RSA-WITH-AES-256-CBC-SHA | Cipher suite TLS-RSA-WITH-AES-256-CBC-SHA.
- choice | TLS-RSA-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-RSA-WITH-AES-128-CBC-SHA256.
- choice | TLS-RSA-WITH-AES-256-CBC-SHA256 | Cipher suite TLS-RSA-WITH-AES-256-CBC-SHA256.
- choice | TLS-RSA-WITH-CAMELLIA-128-CBC-SHA | Cipher suite TLS-RSA-WITH-CAMELLIA-128-CBC-SHA.
- choice | TLS-RSA-WITH-CAMELLIA-256-CBC-SHA | Cipher suite TLS-RSA-WITH-CAMELLIA-256-CBC-SHA.
- choice | TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256 | Cipher suite TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256.
- choice | TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256 | Cipher suite TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256.
- choice | TLS-RSA-WITH-SEED-CBC-SHA | Cipher suite TLS-RSA-WITH-SEED-CBC-SHA.
- choice | TLS-RSA-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-RSA-WITH-ARIA-128-CBC-SHA256.
- choice | TLS-RSA-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-RSA-WITH-ARIA-256-CBC-SHA384.
- choice | TLS-DHE-RSA-WITH-DES-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-DES-CBC-SHA.
- choice | TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA.
- choice | TLS-DHE-RSA-WITH-AES-128-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-AES-128-CBC-SHA.
- choice | TLS-DHE-RSA-WITH-AES-256-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-AES-256-CBC-SHA.
- choice | TLS-DHE-RSA-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-AES-128-CBC-SHA256.
- choice | TLS-DHE-RSA-WITH-AES-256-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-AES-256-CBC-SHA256.
- choice | TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA.
- choice | TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA.
- choice | TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256.
- choice | TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256.
- choice | TLS-DHE-RSA-WITH-SEED-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-SEED-CBC-SHA.
- choice | TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256.
- choice | TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384.
- choice | TLS-ECDHE-RSA-WITH-RC4-128-SHA | Cipher suite TLS-ECDHE-RSA-WITH-RC4-128-SHA.
- choice | TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA | Cipher suite TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA.
- choice | TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA | Cipher suite TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA.
- choice | TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA | Cipher suite TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA.
- choice | TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256 | Cipher suite TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256.
- choice | TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256 | Cipher suite TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256.
- choice | TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256 | Cipher suite TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256.
- choice | TLS-DHE-RSA-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-DHE-RSA-WITH-AES-128-GCM-SHA256.
- choice | TLS-DHE-RSA-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-DHE-RSA-WITH-AES-256-GCM-SHA384.
- choice | TLS-DHE-DSS-WITH-AES-128-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-AES-128-CBC-SHA.
- choice | TLS-DHE-DSS-WITH-AES-256-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-AES-256-CBC-SHA.
- choice | TLS-DHE-DSS-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-AES-128-CBC-SHA256.
- choice | TLS-DHE-DSS-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-DHE-DSS-WITH-AES-128-GCM-SHA256.
- choice | TLS-DHE-DSS-WITH-AES-256-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-AES-256-CBC-SHA256.
- choice | TLS-DHE-DSS-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-DHE-DSS-WITH-AES-256-GCM-SHA384.
- choice | TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256.
- choice | TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256.
- choice | TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384 | Cipher suite TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384.
- choice | TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384.
- choice | TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA.
- choice | TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256.
- choice | TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256.
- choice | TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384 | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384.
- choice | TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384.
- choice | TLS-RSA-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-RSA-WITH-AES-128-GCM-SHA256.
- choice | TLS-RSA-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-RSA-WITH-AES-256-GCM-SHA384.
- choice | TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA | Cipher suite TLS-DSS-RSA-WITH-CAMELLIA-128-CBC-SHA.
- choice | TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA.
- choice | TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256.
- choice | TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256.
- choice | TLS-DHE-DSS-WITH-SEED-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-SEED-CBC-SHA.
- choice | TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256.
- choice | TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384.
- choice | TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256.
- choice | TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384.
- choice | TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC_SHA256.
- choice | TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC_SHA384.
- choice | TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA.
- choice | TLS-DHE-DSS-WITH-DES-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-DES-CBC-SHA.
required: false
choices: ["TLS-RSA-WITH-RC4-128-MD5",
"TLS-RSA-WITH-RC4-128-SHA",
"TLS-RSA-WITH-DES-CBC-SHA",
"TLS-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-RSA-WITH-AES-128-CBC-SHA",
"TLS-RSA-WITH-AES-256-CBC-SHA",
"TLS-RSA-WITH-AES-128-CBC-SHA256",
"TLS-RSA-WITH-AES-256-CBC-SHA256",
"TLS-RSA-WITH-CAMELLIA-128-CBC-SHA",
"TLS-RSA-WITH-CAMELLIA-256-CBC-SHA",
"TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256",
"TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256",
"TLS-RSA-WITH-SEED-CBC-SHA",
"TLS-RSA-WITH-ARIA-128-CBC-SHA256",
"TLS-RSA-WITH-ARIA-256-CBC-SHA384",
"TLS-DHE-RSA-WITH-DES-CBC-SHA",
"TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-DHE-RSA-WITH-AES-128-CBC-SHA",
"TLS-DHE-RSA-WITH-AES-256-CBC-SHA",
"TLS-DHE-RSA-WITH-AES-128-CBC-SHA256",
"TLS-DHE-RSA-WITH-AES-256-CBC-SHA256",
"TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA",
"TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA",
"TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256",
"TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256",
"TLS-DHE-RSA-WITH-SEED-CBC-SHA",
"TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256",
"TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384",
"TLS-ECDHE-RSA-WITH-RC4-128-SHA",
"TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA",
"TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA",
"TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256",
"TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256",
"TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256",
"TLS-DHE-RSA-WITH-AES-128-GCM-SHA256",
"TLS-DHE-RSA-WITH-AES-256-GCM-SHA384",
"TLS-DHE-DSS-WITH-AES-128-CBC-SHA",
"TLS-DHE-DSS-WITH-AES-256-CBC-SHA",
"TLS-DHE-DSS-WITH-AES-128-CBC-SHA256",
"TLS-DHE-DSS-WITH-AES-128-GCM-SHA256",
"TLS-DHE-DSS-WITH-AES-256-CBC-SHA256",
"TLS-DHE-DSS-WITH-AES-256-GCM-SHA384",
"TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256",
"TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256",
"TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384",
"TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384",
"TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA",
"TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256",
"TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256",
"TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384",
"TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384",
"TLS-RSA-WITH-AES-128-GCM-SHA256",
"TLS-RSA-WITH-AES-256-GCM-SHA384",
"TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA",
"TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA",
"TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256",
"TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256",
"TLS-DHE-DSS-WITH-SEED-CBC-SHA",
"TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256",
"TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384",
"TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256",
"TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384",
"TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256",
"TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384",
"TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA",
"TLS-DHE-DSS-WITH-DES-CBC-SHA"]
ssl_cipher_suites_versions:
description:
- SSL/TLS versions that the cipher suite can be used with.
- FLAG Based Options. Specify multiple in list form.
- flag | ssl-3.0 | SSL 3.0.
- flag | tls-1.0 | TLS 1.0.
- flag | tls-1.1 | TLS 1.1.
- flag | tls-1.2 | TLS 1.2.
required: false
choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]
ssl_server_cipher_suites:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
ssl_server_cipher_suites_cipher:
description:
- Cipher suite name.
- choice | TLS-RSA-WITH-RC4-128-MD5 | Cipher suite TLS-RSA-WITH-RC4-128-MD5.
- choice | TLS-RSA-WITH-RC4-128-SHA | Cipher suite TLS-RSA-WITH-RC4-128-SHA.
- choice | TLS-RSA-WITH-DES-CBC-SHA | Cipher suite TLS-RSA-WITH-DES-CBC-SHA.
- choice | TLS-RSA-WITH-3DES-EDE-CBC-SHA | Cipher suite TLS-RSA-WITH-3DES-EDE-CBC-SHA.
- choice | TLS-RSA-WITH-AES-128-CBC-SHA | Cipher suite TLS-RSA-WITH-AES-128-CBC-SHA.
- choice | TLS-RSA-WITH-AES-256-CBC-SHA | Cipher suite TLS-RSA-WITH-AES-256-CBC-SHA.
- choice | TLS-RSA-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-RSA-WITH-AES-128-CBC-SHA256.
- choice | TLS-RSA-WITH-AES-256-CBC-SHA256 | Cipher suite TLS-RSA-WITH-AES-256-CBC-SHA256.
- choice | TLS-RSA-WITH-CAMELLIA-128-CBC-SHA | Cipher suite TLS-RSA-WITH-CAMELLIA-128-CBC-SHA.
- choice | TLS-RSA-WITH-CAMELLIA-256-CBC-SHA | Cipher suite TLS-RSA-WITH-CAMELLIA-256-CBC-SHA.
- choice | TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256 | Cipher suite TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256.
- choice | TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256 | Cipher suite TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256.
- choice | TLS-RSA-WITH-SEED-CBC-SHA | Cipher suite TLS-RSA-WITH-SEED-CBC-SHA.
- choice | TLS-RSA-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-RSA-WITH-ARIA-128-CBC-SHA256.
- choice | TLS-RSA-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-RSA-WITH-ARIA-256-CBC-SHA384.
- choice | TLS-DHE-RSA-WITH-DES-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-DES-CBC-SHA.
- choice | TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA.
- choice | TLS-DHE-RSA-WITH-AES-128-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-AES-128-CBC-SHA.
- choice | TLS-DHE-RSA-WITH-AES-256-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-AES-256-CBC-SHA.
- choice | TLS-DHE-RSA-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-AES-128-CBC-SHA256.
- choice | TLS-DHE-RSA-WITH-AES-256-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-AES-256-CBC-SHA256.
- choice | TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA.
- choice | TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA.
- choice | TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256.
- choice | TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256.
- choice | TLS-DHE-RSA-WITH-SEED-CBC-SHA | Cipher suite TLS-DHE-RSA-WITH-SEED-CBC-SHA.
- choice | TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256.
- choice | TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384.
- choice | TLS-ECDHE-RSA-WITH-RC4-128-SHA | Cipher suite TLS-ECDHE-RSA-WITH-RC4-128-SHA.
- choice | TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA | Cipher suite TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA.
- choice | TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA | Cipher suite TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA.
- choice | TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA | Cipher suite TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA.
- choice | TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256 | Cipher suite TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256.
- choice | TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256 | Suite TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256.
- choice | TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256 | Cipher suite TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256.
- choice | TLS-DHE-RSA-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-DHE-RSA-WITH-AES-128-GCM-SHA256.
- choice | TLS-DHE-RSA-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-DHE-RSA-WITH-AES-256-GCM-SHA384.
- choice | TLS-DHE-DSS-WITH-AES-128-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-AES-128-CBC-SHA.
- choice | TLS-DHE-DSS-WITH-AES-256-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-AES-256-CBC-SHA.
- choice | TLS-DHE-DSS-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-AES-128-CBC-SHA256.
- choice | TLS-DHE-DSS-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-DHE-DSS-WITH-AES-128-GCM-SHA256.
- choice | TLS-DHE-DSS-WITH-AES-256-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-AES-256-CBC-SHA256.
- choice | TLS-DHE-DSS-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-DHE-DSS-WITH-AES-256-GCM-SHA384.
- choice | TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256.
- choice | TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256.
- choice | TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384 | Cipher suite TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384.
- choice | TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384.
- choice | TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA.
- choice | TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256 | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256.
- choice | TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256.
- choice | TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384 | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384.
- choice | TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384.
- choice | TLS-RSA-WITH-AES-128-GCM-SHA256 | Cipher suite TLS-RSA-WITH-AES-128-GCM-SHA256.
- choice | TLS-RSA-WITH-AES-256-GCM-SHA384 | Cipher suite TLS-RSA-WITH-AES-256-GCM-SHA384.
- choice | TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA | Cipher suite TLS-DSS-RSA-WITH-CAMELLIA-128-CBC-SHA.
- choice | TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA.
- choice | TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256.
- choice | TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256.
- choice | TLS-DHE-DSS-WITH-SEED-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-SEED-CBC-SHA.
- choice | TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256.
- choice | TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384.
- choice | TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256.
- choice | TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384.
- choice | TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256 | Cipher suite TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC_SHA256.
- choice | TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384 | Cipher suite TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC_SHA384.
- choice | TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA.
- choice | TLS-DHE-DSS-WITH-DES-CBC-SHA | Cipher suite TLS-DHE-DSS-WITH-DES-CBC-SHA.
required: false
choices: ["TLS-RSA-WITH-RC4-128-MD5",
"TLS-RSA-WITH-RC4-128-SHA",
"TLS-RSA-WITH-DES-CBC-SHA",
"TLS-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-RSA-WITH-AES-128-CBC-SHA",
"TLS-RSA-WITH-AES-256-CBC-SHA",
"TLS-RSA-WITH-AES-128-CBC-SHA256",
"TLS-RSA-WITH-AES-256-CBC-SHA256",
"TLS-RSA-WITH-CAMELLIA-128-CBC-SHA",
"TLS-RSA-WITH-CAMELLIA-256-CBC-SHA",
"TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256",
"TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256",
"TLS-RSA-WITH-SEED-CBC-SHA",
"TLS-RSA-WITH-ARIA-128-CBC-SHA256",
"TLS-RSA-WITH-ARIA-256-CBC-SHA384",
"TLS-DHE-RSA-WITH-DES-CBC-SHA",
"TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-DHE-RSA-WITH-AES-128-CBC-SHA",
"TLS-DHE-RSA-WITH-AES-256-CBC-SHA",
"TLS-DHE-RSA-WITH-AES-128-CBC-SHA256",
"TLS-DHE-RSA-WITH-AES-256-CBC-SHA256",
"TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA",
"TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA",
"TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256",
"TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256",
"TLS-DHE-RSA-WITH-SEED-CBC-SHA",
"TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256",
"TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384",
"TLS-ECDHE-RSA-WITH-RC4-128-SHA",
"TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA",
"TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA",
"TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256",
"TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256",
"TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256",
"TLS-DHE-RSA-WITH-AES-128-GCM-SHA256",
"TLS-DHE-RSA-WITH-AES-256-GCM-SHA384",
"TLS-DHE-DSS-WITH-AES-128-CBC-SHA",
"TLS-DHE-DSS-WITH-AES-256-CBC-SHA",
"TLS-DHE-DSS-WITH-AES-128-CBC-SHA256",
"TLS-DHE-DSS-WITH-AES-128-GCM-SHA256",
"TLS-DHE-DSS-WITH-AES-256-CBC-SHA256",
"TLS-DHE-DSS-WITH-AES-256-GCM-SHA384",
"TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256",
"TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256",
"TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384",
"TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384",
"TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA",
"TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256",
"TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256",
"TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384",
"TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384",
"TLS-RSA-WITH-AES-128-GCM-SHA256",
"TLS-RSA-WITH-AES-256-GCM-SHA384",
"TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA",
"TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA",
"TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256",
"TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256",
"TLS-DHE-DSS-WITH-SEED-CBC-SHA",
"TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256",
"TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384",
"TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256",
"TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384",
"TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256",
"TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384",
"TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA",
"TLS-DHE-DSS-WITH-DES-CBC-SHA"]
ssl_server_cipher_suites_priority:
description:
- SSL/TLS cipher suites priority.
required: false
ssl_server_cipher_suites_versions:
description:
- SSL/TLS versions that the cipher suite can be used with.
- FLAG Based Options. Specify multiple in list form.
- flag | ssl-3.0 | SSL 3.0.
- flag | tls-1.0 | TLS 1.0.
- flag | tls-1.1 | TLS 1.1.
- flag | tls-1.2 | TLS 1.2.
required: false
choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]
'''
EXAMPLES = '''
# BASIC FULL STATIC NAT MAPPING
- name: EDIT FMGR_FIREWALL_VIP SNAT
fmgr_fwobj_vip:
name: "Basic StaticNAT Map"
mode: "set"
adom: "ansible"
type: "static-nat"
extip: "82.72.192.185"
extintf: "any"
mappedip: "10.7.220.25"
comment: "Created by Ansible"
color: "17"
# BASIC PORT PNAT MAPPING
- name: EDIT FMGR_FIREWALL_VIP PNAT
fmgr_fwobj_vip:
name: "Basic PNAT Map Port 10443"
mode: "set"
adom: "ansible"
type: "static-nat"
extip: "82.72.192.185"
extport: "10443"
extintf: "any"
portforward: "enable"
protocol: "tcp"
mappedip: "10.7.220.25"
mappedport: "443"
comment: "Created by Ansible"
color: "17"
# BASIC DNS TRANSLATION NAT
- name: EDIT FMGR_FIREWALL_DNST
fmgr_fwobj_vip:
name: "Basic DNS Translation"
mode: "set"
adom: "ansible"
type: "dns-translation"
extip: "192.168.0.1-192.168.0.100"
extintf: "dmz"
mappedip: "3.3.3.0/24, 4.0.0.0/24"
comment: "Created by Ansible"
color: "12"
# BASIC FQDN NAT
- name: EDIT FMGR_FIREWALL_FQDN
fmgr_fwobj_vip:
name: "Basic FQDN Translation"
mode: "set"
adom: "ansible"
type: "fqdn"
mapped_addr: "google-play"
comment: "Created by Ansible"
color: "5"
# DELETE AN ENTRY
- name: DELETE FMGR_FIREWALL_VIP PNAT
fmgr_fwobj_vip:
name: "Basic PNAT Map Port 10443"
mode: "delete"
adom: "ansible"
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
from ansible.module_utils.network.fortimanager.common import FMGBaseException
from ansible.module_utils.network.fortimanager.common import FMGRCommon
from ansible.module_utils.network.fortimanager.common import FMGRMethods
from ansible.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
from ansible.module_utils.network.fortimanager.common import prepare_dict
from ansible.module_utils.network.fortimanager.common import scrub_dict
def fmgr_firewall_vip_modify(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
mode = paramgram["mode"]
adom = paramgram["adom"]
# INIT A BASIC OBJECTS
response = DEFAULT_RESULT_OBJ
url = ""
datagram = {}
# EVAL THE MODE PARAMETER FOR SET OR ADD
if mode in ['set', 'add', 'update']:
url = '/pm/config/adom/{adom}/obj/firewall/vip'.format(adom=adom)
datagram = scrub_dict(prepare_dict(paramgram))
# EVAL THE MODE PARAMETER FOR DELETE
elif mode == "delete":
# SET THE CORRECT URL FOR DELETE
url = '/pm/config/adom/{adom}/obj/firewall/vip/{name}'.format(adom=adom, name=paramgram["name"])
datagram = {}
response = fmgr.process_request(url, datagram, paramgram["mode"])
return response
#############
# END METHODS
#############
def main():
argument_spec = dict(
adom=dict(type="str", default="root"),
mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"),
websphere_server=dict(required=False, type="str", choices=["disable", "enable"]),
weblogic_server=dict(required=False, type="str", choices=["disable", "enable"]),
type=dict(required=False, type="str",
choices=["static-nat", "load-balance", "server-load-balance", "dns-translation", "fqdn"]),
ssl_server_session_state_type=dict(required=False, type="str", choices=["disable", "time", "count", "both"]),
ssl_server_session_state_timeout=dict(required=False, type="int"),
ssl_server_session_state_max=dict(required=False, type="int"),
ssl_server_min_version=dict(required=False, type="str",
choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2", "client"]),
ssl_server_max_version=dict(required=False, type="str",
choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2", "client"]),
ssl_server_algorithm=dict(required=False, type="str", choices=["high", "low", "medium", "custom", "client"]),
ssl_send_empty_frags=dict(required=False, type="str", choices=["disable", "enable"]),
ssl_pfs=dict(required=False, type="str", choices=["require", "deny", "allow"]),
ssl_mode=dict(required=False, type="str", choices=["half", "full"]),
ssl_min_version=dict(required=False, type="str", choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]),
ssl_max_version=dict(required=False, type="str", choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]),
ssl_http_match_host=dict(required=False, type="str", choices=["disable", "enable"]),
ssl_http_location_conversion=dict(required=False, type="str", choices=["disable", "enable"]),
ssl_hsts_include_subdomains=dict(required=False, type="str", choices=["disable", "enable"]),
ssl_hsts_age=dict(required=False, type="int"),
ssl_hsts=dict(required=False, type="str", choices=["disable", "enable"]),
ssl_hpkp_report_uri=dict(required=False, type="str"),
ssl_hpkp_primary=dict(required=False, type="str"),
ssl_hpkp_include_subdomains=dict(required=False, type="str", choices=["disable", "enable"]),
ssl_hpkp_backup=dict(required=False, type="str"),
ssl_hpkp_age=dict(required=False, type="int"),
ssl_hpkp=dict(required=False, type="str", choices=["disable", "enable", "report-only"]),
ssl_dh_bits=dict(required=False, type="str", choices=["768", "1024", "1536", "2048", "3072", "4096"]),
ssl_client_session_state_type=dict(required=False, type="str", choices=["disable", "time", "count", "both"]),
ssl_client_session_state_timeout=dict(required=False, type="int"),
ssl_client_session_state_max=dict(required=False, type="int"),
ssl_client_renegotiation=dict(required=False, type="str", choices=["deny", "allow", "secure"]),
ssl_client_fallback=dict(required=False, type="str", choices=["disable", "enable"]),
ssl_certificate=dict(required=False, type="str"),
ssl_algorithm=dict(required=False, type="str", choices=["high", "medium", "low", "custom"]),
srcintf_filter=dict(required=False, type="str"),
src_filter=dict(required=False, type="str"),
service=dict(required=False, type="str"),
server_type=dict(required=False, type="str",
choices=["http", "https", "ssl", "tcp", "udp", "ip", "imaps", "pop3s", "smtps"]),
protocol=dict(required=False, type="str", choices=["tcp", "udp", "sctp", "icmp"]),
portmapping_type=dict(required=False, type="str", choices=["1-to-1", "m-to-n"]),
portforward=dict(required=False, type="str", choices=["disable", "enable"]),
persistence=dict(required=False, type="str", choices=["none", "http-cookie", "ssl-session-id"]),
outlook_web_access=dict(required=False, type="str", choices=["disable", "enable"]),
nat_source_vip=dict(required=False, type="str", choices=["disable", "enable"]),
name=dict(required=False, type="str"),
monitor=dict(required=False, type="str"),
max_embryonic_connections=dict(required=False, type="int"),
mappedport=dict(required=False, type="str"),
mappedip=dict(required=False, type="str"),
mapped_addr=dict(required=False, type="str"),
ldb_method=dict(required=False, type="str",
choices=["static", "round-robin", "weighted", "least-session", "least-rtt", "first-alive",
"http-host"]),
https_cookie_secure=dict(required=False, type="str", choices=["disable", "enable"]),
http_multiplex=dict(required=False, type="str", choices=["disable", "enable"]),
http_ip_header_name=dict(required=False, type="str"),
http_ip_header=dict(required=False, type="str", choices=["disable", "enable"]),
http_cookie_share=dict(required=False, type="str", choices=["disable", "same-ip"]),
http_cookie_path=dict(required=False, type="str"),
http_cookie_generation=dict(required=False, type="int"),
http_cookie_domain_from_host=dict(required=False, type="str", choices=["disable", "enable"]),
http_cookie_domain=dict(required=False, type="str"),
http_cookie_age=dict(required=False, type="int"),
gratuitous_arp_interval=dict(required=False, type="int"),
extport=dict(required=False, type="str"),
extip=dict(required=False, type="str"),
extintf=dict(required=False, type="str"),
extaddr=dict(required=False, type="str"),
dns_mapping_ttl=dict(required=False, type="int"),
comment=dict(required=False, type="str"),
color=dict(required=False, type="int"),
arp_reply=dict(required=False, type="str", choices=["disable", "enable"]),
dynamic_mapping=dict(required=False, type="list"),
dynamic_mapping_arp_reply=dict(required=False, type="str", choices=["disable", "enable"]),
dynamic_mapping_color=dict(required=False, type="int"),
dynamic_mapping_comment=dict(required=False, type="str"),
dynamic_mapping_dns_mapping_ttl=dict(required=False, type="int"),
dynamic_mapping_extaddr=dict(required=False, type="str"),
dynamic_mapping_extintf=dict(required=False, type="str"),
dynamic_mapping_extip=dict(required=False, type="str"),
dynamic_mapping_extport=dict(required=False, type="str"),
dynamic_mapping_gratuitous_arp_interval=dict(required=False, type="int"),
dynamic_mapping_http_cookie_age=dict(required=False, type="int"),
dynamic_mapping_http_cookie_domain=dict(required=False, type="str"),
dynamic_mapping_http_cookie_domain_from_host=dict(required=False, type="str", choices=["disable", "enable"]),
dynamic_mapping_http_cookie_generation=dict(required=False, type="int"),
dynamic_mapping_http_cookie_path=dict(required=False, type="str"),
dynamic_mapping_http_cookie_share=dict(required=False, type="str", choices=["disable", "same-ip"]),
dynamic_mapping_http_ip_header=dict(required=False, type="str", choices=["disable", "enable"]),
dynamic_mapping_http_ip_header_name=dict(required=False, type="str"),
dynamic_mapping_http_multiplex=dict(required=False, type="str", choices=["disable", "enable"]),
dynamic_mapping_https_cookie_secure=dict(required=False, type="str", choices=["disable", "enable"]),
dynamic_mapping_ldb_method=dict(required=False, type="str", choices=["static",
"round-robin",
"weighted",
"least-session",
"least-rtt",
"first-alive",
"http-host"]),
dynamic_mapping_mapped_addr=dict(required=False, type="str"),
dynamic_mapping_mappedip=dict(required=False, type="str"),
dynamic_mapping_mappedport=dict(required=False, type="str"),
dynamic_mapping_max_embryonic_connections=dict(required=False, type="int"),
dynamic_mapping_monitor=dict(required=False, type="str"),
dynamic_mapping_nat_source_vip=dict(required=False, type="str", choices=["disable", "enable"]),
dynamic_mapping_outlook_web_access=dict(required=False, type="str", choices=["disable", "enable"]),
dynamic_mapping_persistence=dict(required=False, type="str", choices=["none", "http-cookie", "ssl-session-id"]),
dynamic_mapping_portforward=dict(required=False, type="str", choices=["disable", "enable"]),
dynamic_mapping_portmapping_type=dict(required=False, type="str", choices=["1-to-1", "m-to-n"]),
dynamic_mapping_protocol=dict(required=False, type="str", choices=["tcp", "udp", "sctp", "icmp"]),
dynamic_mapping_server_type=dict(required=False, type="str",
choices=["http", "https", "ssl", "tcp", "udp", "ip", "imaps", "pop3s",
"smtps"]),
dynamic_mapping_service=dict(required=False, type="str"),
dynamic_mapping_src_filter=dict(required=False, type="str"),
dynamic_mapping_srcintf_filter=dict(required=False, type="str"),
dynamic_mapping_ssl_algorithm=dict(required=False, type="str", choices=["high", "medium", "low", "custom"]),
dynamic_mapping_ssl_certificate=dict(required=False, type="str"),
dynamic_mapping_ssl_client_fallback=dict(required=False, type="str", choices=["disable", "enable"]),
dynamic_mapping_ssl_client_renegotiation=dict(required=False, type="str", choices=["deny", "allow", "secure"]),
dynamic_mapping_ssl_client_session_state_max=dict(required=False, type="int"),
dynamic_mapping_ssl_client_session_state_timeout=dict(required=False, type="int"),
dynamic_mapping_ssl_client_session_state_type=dict(required=False, type="str",
choices=["disable", "time", "count", "both"]),
dynamic_mapping_ssl_dh_bits=dict(required=False, type="str",
choices=["768", "1024", "1536", "2048", "3072", "4096"]),
dynamic_mapping_ssl_hpkp=dict(required=False, type="str", choices=["disable", "enable", "report-only"]),
dynamic_mapping_ssl_hpkp_age=dict(required=False, type="int"),
dynamic_mapping_ssl_hpkp_backup=dict(required=False, type="str"),
dynamic_mapping_ssl_hpkp_include_subdomains=dict(required=False, type="str", choices=["disable", "enable"]),
dynamic_mapping_ssl_hpkp_primary=dict(required=False, type="str"),
dynamic_mapping_ssl_hpkp_report_uri=dict(required=False, type="str"),
dynamic_mapping_ssl_hsts=dict(required=False, type="str", choices=["disable", "enable"]),
dynamic_mapping_ssl_hsts_age=dict(required=False, type="int"),
dynamic_mapping_ssl_hsts_include_subdomains=dict(required=False, type="str", choices=["disable", "enable"]),
dynamic_mapping_ssl_http_location_conversion=dict(required=False, type="str", choices=["disable", "enable"]),
dynamic_mapping_ssl_http_match_host=dict(required=False, type="str", choices=["disable", "enable"]),
dynamic_mapping_ssl_max_version=dict(required=False, type="str",
choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]),
dynamic_mapping_ssl_min_version=dict(required=False, type="str",
choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]),
dynamic_mapping_ssl_mode=dict(required=False, type="str", choices=["half", "full"]),
dynamic_mapping_ssl_pfs=dict(required=False, type="str", choices=["require", "deny", "allow"]),
dynamic_mapping_ssl_send_empty_frags=dict(required=False, type="str", choices=["disable", "enable"]),
dynamic_mapping_ssl_server_algorithm=dict(required=False, type="str",
choices=["high", "low", "medium", "custom", "client"]),
dynamic_mapping_ssl_server_max_version=dict(required=False, type="str",
choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2", "client"]),
dynamic_mapping_ssl_server_min_version=dict(required=False, type="str",
choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2", "client"]),
dynamic_mapping_ssl_server_session_state_max=dict(required=False, type="int"),
dynamic_mapping_ssl_server_session_state_timeout=dict(required=False, type="int"),
dynamic_mapping_ssl_server_session_state_type=dict(required=False, type="str",
choices=["disable", "time", "count", "both"]),
dynamic_mapping_type=dict(required=False, type="str",
choices=["static-nat", "load-balance", "server-load-balance", "dns-translation",
"fqdn"]),
dynamic_mapping_weblogic_server=dict(required=False, type="str", choices=["disable", "enable"]),
dynamic_mapping_websphere_server=dict(required=False, type="str", choices=["disable", "enable"]),
dynamic_mapping_realservers_client_ip=dict(required=False, type="str"),
dynamic_mapping_realservers_healthcheck=dict(required=False, type="str", choices=["disable", "enable", "vip"]),
dynamic_mapping_realservers_holddown_interval=dict(required=False, type="int"),
dynamic_mapping_realservers_http_host=dict(required=False, type="str"),
dynamic_mapping_realservers_ip=dict(required=False, type="str"),
dynamic_mapping_realservers_max_connections=dict(required=False, type="int"),
dynamic_mapping_realservers_monitor=dict(required=False, type="str"),
dynamic_mapping_realservers_port=dict(required=False, type="int"),
dynamic_mapping_realservers_seq=dict(required=False, type="str"),
dynamic_mapping_realservers_status=dict(required=False, type="str", choices=["active", "standby", "disable"]),
dynamic_mapping_realservers_weight=dict(required=False, type="int"),
dynamic_mapping_ssl_cipher_suites_cipher=dict(required=False,
type="str",
choices=["TLS-RSA-WITH-RC4-128-MD5",
"TLS-RSA-WITH-RC4-128-SHA",
"TLS-RSA-WITH-DES-CBC-SHA",
"TLS-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-RSA-WITH-AES-128-CBC-SHA",
"TLS-RSA-WITH-AES-256-CBC-SHA",
"TLS-RSA-WITH-AES-128-CBC-SHA256",
"TLS-RSA-WITH-AES-256-CBC-SHA256",
"TLS-RSA-WITH-CAMELLIA-128-CBC-SHA",
"TLS-RSA-WITH-CAMELLIA-256-CBC-SHA",
"TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256",
"TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256",
"TLS-RSA-WITH-SEED-CBC-SHA",
"TLS-RSA-WITH-ARIA-128-CBC-SHA256",
"TLS-RSA-WITH-ARIA-256-CBC-SHA384",
"TLS-DHE-RSA-WITH-DES-CBC-SHA",
"TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-DHE-RSA-WITH-AES-128-CBC-SHA",
"TLS-DHE-RSA-WITH-AES-256-CBC-SHA",
"TLS-DHE-RSA-WITH-AES-128-CBC-SHA256",
"TLS-DHE-RSA-WITH-AES-256-CBC-SHA256",
"TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA",
"TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA",
"TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256",
"TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256",
"TLS-DHE-RSA-WITH-SEED-CBC-SHA",
"TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256",
"TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384",
"TLS-ECDHE-RSA-WITH-RC4-128-SHA",
"TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA",
"TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA",
"TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256",
"TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256",
"TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256",
"TLS-DHE-RSA-WITH-AES-128-GCM-SHA256",
"TLS-DHE-RSA-WITH-AES-256-GCM-SHA384",
"TLS-DHE-DSS-WITH-AES-128-CBC-SHA",
"TLS-DHE-DSS-WITH-AES-256-CBC-SHA",
"TLS-DHE-DSS-WITH-AES-128-CBC-SHA256",
"TLS-DHE-DSS-WITH-AES-128-GCM-SHA256",
"TLS-DHE-DSS-WITH-AES-256-CBC-SHA256",
"TLS-DHE-DSS-WITH-AES-256-GCM-SHA384",
"TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256",
"TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256",
"TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384",
"TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384",
"TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA",
"TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256",
"TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256",
"TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384",
"TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384",
"TLS-RSA-WITH-AES-128-GCM-SHA256",
"TLS-RSA-WITH-AES-256-GCM-SHA384",
"TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA",
"TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA",
"TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256",
"TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256",
"TLS-DHE-DSS-WITH-SEED-CBC-SHA",
"TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256",
"TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384",
"TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256",
"TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384",
"TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256",
"TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384",
"TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA",
"TLS-DHE-DSS-WITH-DES-CBC-SHA"]),
dynamic_mapping_ssl_cipher_suites_versions=dict(required=False, type="str",
choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]),
realservers=dict(required=False, type="list"),
realservers_client_ip=dict(required=False, type="str"),
realservers_healthcheck=dict(required=False, type="str", choices=["disable", "enable", "vip"]),
realservers_holddown_interval=dict(required=False, type="int"),
realservers_http_host=dict(required=False, type="str"),
realservers_ip=dict(required=False, type="str"),
realservers_max_connections=dict(required=False, type="int"),
realservers_monitor=dict(required=False, type="str"),
realservers_port=dict(required=False, type="int"),
realservers_seq=dict(required=False, type="str"),
realservers_status=dict(required=False, type="str", choices=["active", "standby", "disable"]),
realservers_weight=dict(required=False, type="int"),
ssl_cipher_suites=dict(required=False, type="list"),
ssl_cipher_suites_cipher=dict(required=False,
type="str",
choices=["TLS-RSA-WITH-RC4-128-MD5",
"TLS-RSA-WITH-RC4-128-SHA",
"TLS-RSA-WITH-DES-CBC-SHA",
"TLS-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-RSA-WITH-AES-128-CBC-SHA",
"TLS-RSA-WITH-AES-256-CBC-SHA",
"TLS-RSA-WITH-AES-128-CBC-SHA256",
"TLS-RSA-WITH-AES-256-CBC-SHA256",
"TLS-RSA-WITH-CAMELLIA-128-CBC-SHA",
"TLS-RSA-WITH-CAMELLIA-256-CBC-SHA",
"TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256",
"TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256",
"TLS-RSA-WITH-SEED-CBC-SHA",
"TLS-RSA-WITH-ARIA-128-CBC-SHA256",
"TLS-RSA-WITH-ARIA-256-CBC-SHA384",
"TLS-DHE-RSA-WITH-DES-CBC-SHA",
"TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-DHE-RSA-WITH-AES-128-CBC-SHA",
"TLS-DHE-RSA-WITH-AES-256-CBC-SHA",
"TLS-DHE-RSA-WITH-AES-128-CBC-SHA256",
"TLS-DHE-RSA-WITH-AES-256-CBC-SHA256",
"TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA",
"TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA",
"TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256",
"TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256",
"TLS-DHE-RSA-WITH-SEED-CBC-SHA",
"TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256",
"TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384",
"TLS-ECDHE-RSA-WITH-RC4-128-SHA",
"TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA",
"TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA",
"TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256",
"TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256",
"TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256",
"TLS-DHE-RSA-WITH-AES-128-GCM-SHA256",
"TLS-DHE-RSA-WITH-AES-256-GCM-SHA384",
"TLS-DHE-DSS-WITH-AES-128-CBC-SHA",
"TLS-DHE-DSS-WITH-AES-256-CBC-SHA",
"TLS-DHE-DSS-WITH-AES-128-CBC-SHA256",
"TLS-DHE-DSS-WITH-AES-128-GCM-SHA256",
"TLS-DHE-DSS-WITH-AES-256-CBC-SHA256",
"TLS-DHE-DSS-WITH-AES-256-GCM-SHA384",
"TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256",
"TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256",
"TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384",
"TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384",
"TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA",
"TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256",
"TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256",
"TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384",
"TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384",
"TLS-RSA-WITH-AES-128-GCM-SHA256",
"TLS-RSA-WITH-AES-256-GCM-SHA384",
"TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA",
"TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA",
"TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256",
"TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256",
"TLS-DHE-DSS-WITH-SEED-CBC-SHA",
"TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256",
"TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384",
"TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256",
"TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384",
"TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256",
"TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384",
"TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA",
"TLS-DHE-DSS-WITH-DES-CBC-SHA"]),
ssl_cipher_suites_versions=dict(required=False, type="str",
choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]),
ssl_server_cipher_suites=dict(required=False, type="list"),
ssl_server_cipher_suites_cipher=dict(required=False,
type="str",
choices=["TLS-RSA-WITH-RC4-128-MD5",
"TLS-RSA-WITH-RC4-128-SHA",
"TLS-RSA-WITH-DES-CBC-SHA",
"TLS-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-RSA-WITH-AES-128-CBC-SHA",
"TLS-RSA-WITH-AES-256-CBC-SHA",
"TLS-RSA-WITH-AES-128-CBC-SHA256",
"TLS-RSA-WITH-AES-256-CBC-SHA256",
"TLS-RSA-WITH-CAMELLIA-128-CBC-SHA",
"TLS-RSA-WITH-CAMELLIA-256-CBC-SHA",
"TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256",
"TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256",
"TLS-RSA-WITH-SEED-CBC-SHA",
"TLS-RSA-WITH-ARIA-128-CBC-SHA256",
"TLS-RSA-WITH-ARIA-256-CBC-SHA384",
"TLS-DHE-RSA-WITH-DES-CBC-SHA",
"TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-DHE-RSA-WITH-AES-128-CBC-SHA",
"TLS-DHE-RSA-WITH-AES-256-CBC-SHA",
"TLS-DHE-RSA-WITH-AES-128-CBC-SHA256",
"TLS-DHE-RSA-WITH-AES-256-CBC-SHA256",
"TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA",
"TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA",
"TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256",
"TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256",
"TLS-DHE-RSA-WITH-SEED-CBC-SHA",
"TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256",
"TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384",
"TLS-ECDHE-RSA-WITH-RC4-128-SHA",
"TLS-ECDHE-RSA-WITH-3DES-EDE-CBC-SHA",
"TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA",
"TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA",
"TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256",
"TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256",
"TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256",
"TLS-DHE-RSA-WITH-AES-128-GCM-SHA256",
"TLS-DHE-RSA-WITH-AES-256-GCM-SHA384",
"TLS-DHE-DSS-WITH-AES-128-CBC-SHA",
"TLS-DHE-DSS-WITH-AES-256-CBC-SHA",
"TLS-DHE-DSS-WITH-AES-128-CBC-SHA256",
"TLS-DHE-DSS-WITH-AES-128-GCM-SHA256",
"TLS-DHE-DSS-WITH-AES-256-CBC-SHA256",
"TLS-DHE-DSS-WITH-AES-256-GCM-SHA384",
"TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256",
"TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256",
"TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384",
"TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384",
"TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA",
"TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256",
"TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256",
"TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384",
"TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384",
"TLS-RSA-WITH-AES-128-GCM-SHA256",
"TLS-RSA-WITH-AES-256-GCM-SHA384",
"TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA",
"TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA",
"TLS-DHE-DSS-WITH-CAMELLIA-128-CBC-SHA256",
"TLS-DHE-DSS-WITH-CAMELLIA-256-CBC-SHA256",
"TLS-DHE-DSS-WITH-SEED-CBC-SHA",
"TLS-DHE-DSS-WITH-ARIA-128-CBC-SHA256",
"TLS-DHE-DSS-WITH-ARIA-256-CBC-SHA384",
"TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256",
"TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384",
"TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256",
"TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384",
"TLS-DHE-DSS-WITH-3DES-EDE-CBC-SHA",
"TLS-DHE-DSS-WITH-DES-CBC-SHA"]),
ssl_server_cipher_suites_priority=dict(required=False, type="str"),
ssl_server_cipher_suites_versions=dict(required=False, type="str",
choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]),
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, )
# MODULE PARAMGRAM
paramgram = {
"mode": module.params["mode"],
"adom": module.params["adom"],
"websphere-server": module.params["websphere_server"],
"weblogic-server": module.params["weblogic_server"],
"type": module.params["type"],
"ssl-server-session-state-type": module.params["ssl_server_session_state_type"],
"ssl-server-session-state-timeout": module.params["ssl_server_session_state_timeout"],
"ssl-server-session-state-max": module.params["ssl_server_session_state_max"],
"ssl-server-min-version": module.params["ssl_server_min_version"],
"ssl-server-max-version": module.params["ssl_server_max_version"],
"ssl-server-algorithm": module.params["ssl_server_algorithm"],
"ssl-send-empty-frags": module.params["ssl_send_empty_frags"],
"ssl-pfs": module.params["ssl_pfs"],
"ssl-mode": module.params["ssl_mode"],
"ssl-min-version": module.params["ssl_min_version"],
"ssl-max-version": module.params["ssl_max_version"],
"ssl-http-match-host": module.params["ssl_http_match_host"],
"ssl-http-location-conversion": module.params["ssl_http_location_conversion"],
"ssl-hsts-include-subdomains": module.params["ssl_hsts_include_subdomains"],
"ssl-hsts-age": module.params["ssl_hsts_age"],
"ssl-hsts": module.params["ssl_hsts"],
"ssl-hpkp-report-uri": module.params["ssl_hpkp_report_uri"],
"ssl-hpkp-primary": module.params["ssl_hpkp_primary"],
"ssl-hpkp-include-subdomains": module.params["ssl_hpkp_include_subdomains"],
"ssl-hpkp-backup": module.params["ssl_hpkp_backup"],
"ssl-hpkp-age": module.params["ssl_hpkp_age"],
"ssl-hpkp": module.params["ssl_hpkp"],
"ssl-dh-bits": module.params["ssl_dh_bits"],
"ssl-client-session-state-type": module.params["ssl_client_session_state_type"],
"ssl-client-session-state-timeout": module.params["ssl_client_session_state_timeout"],
"ssl-client-session-state-max": module.params["ssl_client_session_state_max"],
"ssl-client-renegotiation": module.params["ssl_client_renegotiation"],
"ssl-client-fallback": module.params["ssl_client_fallback"],
"ssl-certificate": module.params["ssl_certificate"],
"ssl-algorithm": module.params["ssl_algorithm"],
"srcintf-filter": module.params["srcintf_filter"],
"src-filter": module.params["src_filter"],
"service": module.params["service"],
"server-type": module.params["server_type"],
"protocol": module.params["protocol"],
"portmapping-type": module.params["portmapping_type"],
"portforward": module.params["portforward"],
"persistence": module.params["persistence"],
"outlook-web-access": module.params["outlook_web_access"],
"nat-source-vip": module.params["nat_source_vip"],
"name": module.params["name"],
"monitor": module.params["monitor"],
"max-embryonic-connections": module.params["max_embryonic_connections"],
"mappedport": module.params["mappedport"],
"mappedip": module.params["mappedip"],
"mapped-addr": module.params["mapped_addr"],
"ldb-method": module.params["ldb_method"],
"https-cookie-secure": module.params["https_cookie_secure"],
"http-multiplex": module.params["http_multiplex"],
"http-ip-header-name": module.params["http_ip_header_name"],
"http-ip-header": module.params["http_ip_header"],
"http-cookie-share": module.params["http_cookie_share"],
"http-cookie-path": module.params["http_cookie_path"],
"http-cookie-generation": module.params["http_cookie_generation"],
"http-cookie-domain-from-host": module.params["http_cookie_domain_from_host"],
"http-cookie-domain": module.params["http_cookie_domain"],
"http-cookie-age": module.params["http_cookie_age"],
"gratuitous-arp-interval": module.params["gratuitous_arp_interval"],
"extport": module.params["extport"],
"extip": module.params["extip"],
"extintf": module.params["extintf"],
"extaddr": module.params["extaddr"],
"dns-mapping-ttl": module.params["dns_mapping_ttl"],
"comment": module.params["comment"],
"color": module.params["color"],
"arp-reply": module.params["arp_reply"],
"dynamic_mapping": {
"arp-reply": module.params["dynamic_mapping_arp_reply"],
"color": module.params["dynamic_mapping_color"],
"comment": module.params["dynamic_mapping_comment"],
"dns-mapping-ttl": module.params["dynamic_mapping_dns_mapping_ttl"],
"extaddr": module.params["dynamic_mapping_extaddr"],
"extintf": module.params["dynamic_mapping_extintf"],
"extip": module.params["dynamic_mapping_extip"],
"extport": module.params["dynamic_mapping_extport"],
"gratuitous-arp-interval": module.params["dynamic_mapping_gratuitous_arp_interval"],
"http-cookie-age": module.params["dynamic_mapping_http_cookie_age"],
"http-cookie-domain": module.params["dynamic_mapping_http_cookie_domain"],
"http-cookie-domain-from-host": module.params["dynamic_mapping_http_cookie_domain_from_host"],
"http-cookie-generation": module.params["dynamic_mapping_http_cookie_generation"],
"http-cookie-path": module.params["dynamic_mapping_http_cookie_path"],
"http-cookie-share": module.params["dynamic_mapping_http_cookie_share"],
"http-ip-header": module.params["dynamic_mapping_http_ip_header"],
"http-ip-header-name": module.params["dynamic_mapping_http_ip_header_name"],
"http-multiplex": module.params["dynamic_mapping_http_multiplex"],
"https-cookie-secure": module.params["dynamic_mapping_https_cookie_secure"],
"ldb-method": module.params["dynamic_mapping_ldb_method"],
"mapped-addr": module.params["dynamic_mapping_mapped_addr"],
"mappedip": module.params["dynamic_mapping_mappedip"],
"mappedport": module.params["dynamic_mapping_mappedport"],
"max-embryonic-connections": module.params["dynamic_mapping_max_embryonic_connections"],
"monitor": module.params["dynamic_mapping_monitor"],
"nat-source-vip": module.params["dynamic_mapping_nat_source_vip"],
"outlook-web-access": module.params["dynamic_mapping_outlook_web_access"],
"persistence": module.params["dynamic_mapping_persistence"],
"portforward": module.params["dynamic_mapping_portforward"],
"portmapping-type": module.params["dynamic_mapping_portmapping_type"],
"protocol": module.params["dynamic_mapping_protocol"],
"server-type": module.params["dynamic_mapping_server_type"],
"service": module.params["dynamic_mapping_service"],
"src-filter": module.params["dynamic_mapping_src_filter"],
"srcintf-filter": module.params["dynamic_mapping_srcintf_filter"],
"ssl-algorithm": module.params["dynamic_mapping_ssl_algorithm"],
"ssl-certificate": module.params["dynamic_mapping_ssl_certificate"],
"ssl-client-fallback": module.params["dynamic_mapping_ssl_client_fallback"],
"ssl-client-renegotiation": module.params["dynamic_mapping_ssl_client_renegotiation"],
"ssl-client-session-state-max": module.params["dynamic_mapping_ssl_client_session_state_max"],
"ssl-client-session-state-timeout": module.params["dynamic_mapping_ssl_client_session_state_timeout"],
"ssl-client-session-state-type": module.params["dynamic_mapping_ssl_client_session_state_type"],
"ssl-dh-bits": module.params["dynamic_mapping_ssl_dh_bits"],
"ssl-hpkp": module.params["dynamic_mapping_ssl_hpkp"],
"ssl-hpkp-age": module.params["dynamic_mapping_ssl_hpkp_age"],
"ssl-hpkp-backup": module.params["dynamic_mapping_ssl_hpkp_backup"],
"ssl-hpkp-include-subdomains": module.params["dynamic_mapping_ssl_hpkp_include_subdomains"],
"ssl-hpkp-primary": module.params["dynamic_mapping_ssl_hpkp_primary"],
"ssl-hpkp-report-uri": module.params["dynamic_mapping_ssl_hpkp_report_uri"],
"ssl-hsts": module.params["dynamic_mapping_ssl_hsts"],
"ssl-hsts-age": module.params["dynamic_mapping_ssl_hsts_age"],
"ssl-hsts-include-subdomains": module.params["dynamic_mapping_ssl_hsts_include_subdomains"],
"ssl-http-location-conversion": module.params["dynamic_mapping_ssl_http_location_conversion"],
"ssl-http-match-host": module.params["dynamic_mapping_ssl_http_match_host"],
"ssl-max-version": module.params["dynamic_mapping_ssl_max_version"],
"ssl-min-version": module.params["dynamic_mapping_ssl_min_version"],
"ssl-mode": module.params["dynamic_mapping_ssl_mode"],
"ssl-pfs": module.params["dynamic_mapping_ssl_pfs"],
"ssl-send-empty-frags": module.params["dynamic_mapping_ssl_send_empty_frags"],
"ssl-server-algorithm": module.params["dynamic_mapping_ssl_server_algorithm"],
"ssl-server-max-version": module.params["dynamic_mapping_ssl_server_max_version"],
"ssl-server-min-version": module.params["dynamic_mapping_ssl_server_min_version"],
"ssl-server-session-state-max": module.params["dynamic_mapping_ssl_server_session_state_max"],
"ssl-server-session-state-timeout": module.params["dynamic_mapping_ssl_server_session_state_timeout"],
"ssl-server-session-state-type": module.params["dynamic_mapping_ssl_server_session_state_type"],
"type": module.params["dynamic_mapping_type"],
"weblogic-server": module.params["dynamic_mapping_weblogic_server"],
"websphere-server": module.params["dynamic_mapping_websphere_server"],
"realservers": {
"client-ip": module.params["dynamic_mapping_realservers_client_ip"],
"healthcheck": module.params["dynamic_mapping_realservers_healthcheck"],
"holddown-interval": module.params["dynamic_mapping_realservers_holddown_interval"],
"http-host": module.params["dynamic_mapping_realservers_http_host"],
"ip": module.params["dynamic_mapping_realservers_ip"],
"max-connections": module.params["dynamic_mapping_realservers_max_connections"],
"monitor": module.params["dynamic_mapping_realservers_monitor"],
"port": module.params["dynamic_mapping_realservers_port"],
"seq": module.params["dynamic_mapping_realservers_seq"],
"status": module.params["dynamic_mapping_realservers_status"],
"weight": module.params["dynamic_mapping_realservers_weight"],
},
"ssl-cipher-suites": {
"cipher": module.params["dynamic_mapping_ssl_cipher_suites_cipher"],
"versions": module.params["dynamic_mapping_ssl_cipher_suites_versions"],
},
},
"realservers": {
"client-ip": module.params["realservers_client_ip"],
"healthcheck": module.params["realservers_healthcheck"],
"holddown-interval": module.params["realservers_holddown_interval"],
"http-host": module.params["realservers_http_host"],
"ip": module.params["realservers_ip"],
"max-connections": module.params["realservers_max_connections"],
"monitor": module.params["realservers_monitor"],
"port": module.params["realservers_port"],
"seq": module.params["realservers_seq"],
"status": module.params["realservers_status"],
"weight": module.params["realservers_weight"],
},
"ssl-cipher-suites": {
"cipher": module.params["ssl_cipher_suites_cipher"],
"versions": module.params["ssl_cipher_suites_versions"],
},
"ssl-server-cipher-suites": {
"cipher": module.params["ssl_server_cipher_suites_cipher"],
"priority": module.params["ssl_server_cipher_suites_priority"],
"versions": module.params["ssl_server_cipher_suites_versions"],
}
}
module.paramgram = paramgram
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = FortiManagerHandler(connection, module)
fmgr.tools = FMGRCommon()
else:
module.fail_json(**FAIL_SOCKET_MSG)
list_overrides = ['dynamic_mapping', 'realservers', 'ssl-cipher-suites', 'ssl-server-cipher-suites']
paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides,
paramgram=paramgram, module=module)
results = DEFAULT_RESULT_OBJ
try:
results = fmgr_firewall_vip_modify(fmgr, paramgram)
fmgr.govern_response(module=module, results=results,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
except Exception as err:
raise FMGBaseException(err)
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
| gpl-3.0 |
silentfuzzle/calibre | src/odf/odfmanifest.py | 8 | 3670 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
# This script lists the content of the manifest.xml file
import zipfile
from xml.sax import make_parser,handler
from xml.sax.xmlreader import InputSource
import xml.sax.saxutils
from cStringIO import StringIO
MANIFESTNS="urn:oasis:names:tc:opendocument:xmlns:manifest:1.0"
#-----------------------------------------------------------------------------
#
# ODFMANIFESTHANDLER
#
#-----------------------------------------------------------------------------
class ODFManifestHandler(handler.ContentHandler):
""" The ODFManifestHandler parses a manifest file and produces a list of
content """
def __init__(self):
self.manifest = {}
# Tags
# FIXME: Also handle encryption data
self.elements = {
(MANIFESTNS, 'file-entry'): (self.s_file_entry, self.donothing),
}
def handle_starttag(self, tag, method, attrs):
method(tag,attrs)
def handle_endtag(self, tag, method):
method(tag)
def startElementNS(self, tag, qname, attrs):
method = self.elements.get(tag, (None, None))[0]
if method:
self.handle_starttag(tag, method, attrs)
else:
self.unknown_starttag(tag,attrs)
def endElementNS(self, tag, qname):
method = self.elements.get(tag, (None, None))[1]
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
def unknown_starttag(self, tag, attrs):
pass
def unknown_endtag(self, tag):
pass
def donothing(self, tag, attrs=None):
pass
def s_file_entry(self, tag, attrs):
m = attrs.get((MANIFESTNS, 'media-type'),"application/octet-stream")
p = attrs.get((MANIFESTNS, 'full-path'))
self.manifest[p] = { 'media-type':m, 'full-path':p }
#-----------------------------------------------------------------------------
#
# Reading the file
#
#-----------------------------------------------------------------------------
def manifestlist(manifestxml):
odhandler = ODFManifestHandler()
parser = make_parser()
parser.setFeature(handler.feature_namespaces, 1)
parser.setContentHandler(odhandler)
parser.setErrorHandler(handler.ErrorHandler())
inpsrc = InputSource()
inpsrc.setByteStream(StringIO(manifestxml))
parser.setFeature(handler.feature_external_ges, False) # Changed by Kovid to ignore external DTDs
parser.parse(inpsrc)
return odhandler.manifest
def odfmanifest(odtfile):
z = zipfile.ZipFile(odtfile)
manifest = z.read('META-INF/manifest.xml')
z.close()
return manifestlist(manifest)
if __name__ == "__main__":
import sys
result = odfmanifest(sys.argv[1])
for file in result.values():
print "%-40s %-40s" % (file['media-type'], file['full-path'])
| gpl-3.0 |
mcallaghan/tmv | BasicBrowser/tmv_app/urls.py | 1 | 3757 | from django.conf.urls import url, include
from django.conf import settings
from django.conf.urls.static import static
from django.urls import path
from tmv_app.views import *
from django.contrib.auth.decorators import login_required
app_name = 'tmv_app'
urlpatterns = [
url(r'^$', login_required(runs), name='index'),
path('network/<int:run_id>', login_required(network), name='network'),
path('network/<int:run_id>/<str:csvtype>', login_required(network), name='network'),
url(r'^network_wg/(?P<run_id>\d+)$', login_required(network_wg), name='network_wg'),
url(r'^network_wg/(?P<run_id>\d+)/(?P<t>\d+)/(?P<f>\d+)/(?P<top>.+)$', login_required(network_wg), name='network_wg'),
url(r'^return_corrs$', login_required(return_corrs), name='return_corrs'),
url(r'^growth/(?P<run_id>\d+)$', login_required(growth_heatmap), name='growth_heatmap'),
url(r'^growth_json/(?P<run_id>\d+)/(?P<v>.+)/$', login_required(growth_json), name='growth_json'),
# topic page and topic doc loader
url(r'^topic/(?P<topic_id>\d+)/$', login_required(topic_detail), name="topic_detail"),
url(r'^topic/(?P<topic_id>\d+)/(?P<run_id>\d+)/$', login_required(topic_detail), name="topic_detail"),
url(r'^get_topic_docs/(?P<topic_id>\d+)/$', login_required(get_topic_docs), name="get_topic_docs"),
path('get_topicterms/<int:topic_id>', login_required(get_topicterms), name="get_topicterms"),
url(r'^multi_topic/$', login_required(multi_topic), name="multi_topic"),
url(r'^highlight_dtm_w$', login_required(highlight_dtm_w), name="highlight_dtm_w"),
url(r'^dynamic_topic/(?P<topic_id>\d+)/$', login_required(dynamic_topic_detail), name="dynamic_topic_detail"),
url(r'^term/(?P<run_id>\d+)/(?P<term_id>\d+)/$', login_required(term_detail), name="term_detail"),
url(r'^doc/random/(?P<run_id>\d+)$', login_required(doc_random), name="random_doc"),
url(r'^doc/(?P<doc_id>.+)/(?P<run_id>\d+)$', login_required(doc_detail), name="doc_detail"),
url(r'^author/(?P<author_name>.+)/(?P<run_id>\d+)$', login_required(author_detail), name="author_detail"),
url(r'^institution/(?P<run_id>\d+)/(?P<institution_name>.+)/$', login_required(institution_detail)),
# Home page
url(r'^topic_presence/(?P<run_id>\d+)$', login_required(topic_presence_detail),name="topics"),
url(r'^topics_time/(?P<run_id>\d+)/(?P<stype>\d+)$', login_required(topics_time),name="topics_time"),
url(r'^topics_time_csv/(?P<run_id>\d+)/$', login_required(get_yt_csv),name="topics_time_csv"),
url(r'^stats/(?P<run_id>\d+)$', login_required(stats), name="stats"),
url(r'^runs$', login_required(runs), name='runs'),
url(r'^runs/(?P<pid>\d+)/$', login_required(runs), name='runs'),
url(r'^adjust_threshold/(?P<run_id>\d+)/(?P<what>.+)$', login_required(adjust_threshold), name='adjust_threshold'),
url(r'^update/(?P<run_id>\d+)$', login_required(update_run), name='update'),
url(r'^runs/delete/(?P<new_run_id>\d+)$', login_required(delete_run), name='delete_run'),
url(r'^topic/random$', login_required(topic_random)),
url(r'^term/random$', login_required(term_random)),
url(r'^print_table/(?P<run_id>\d+)$', login_required(print_table), name="print_table"),
url(r'^compare/(?P<a>\d+)/(?P<z>\d+)$', login_required(compare_runs), name="compare_runs")
]
# Example:
# (r'^BasicBrowser/', include('BasicBrowser.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
#onyl serve static content for development
#urlpatterns += static(settings.STATIC_URL,document_root='static')
| gpl-3.0 |
jchevin/MissionPlanner-master | Lib/encodings/cp864.py | 593 | 33919 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP864.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp864',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0025: 0x066a, # ARABIC PERCENT SIGN
0x0080: 0x00b0, # DEGREE SIGN
0x0081: 0x00b7, # MIDDLE DOT
0x0082: 0x2219, # BULLET OPERATOR
0x0083: 0x221a, # SQUARE ROOT
0x0084: 0x2592, # MEDIUM SHADE
0x0085: 0x2500, # FORMS LIGHT HORIZONTAL
0x0086: 0x2502, # FORMS LIGHT VERTICAL
0x0087: 0x253c, # FORMS LIGHT VERTICAL AND HORIZONTAL
0x0088: 0x2524, # FORMS LIGHT VERTICAL AND LEFT
0x0089: 0x252c, # FORMS LIGHT DOWN AND HORIZONTAL
0x008a: 0x251c, # FORMS LIGHT VERTICAL AND RIGHT
0x008b: 0x2534, # FORMS LIGHT UP AND HORIZONTAL
0x008c: 0x2510, # FORMS LIGHT DOWN AND LEFT
0x008d: 0x250c, # FORMS LIGHT DOWN AND RIGHT
0x008e: 0x2514, # FORMS LIGHT UP AND RIGHT
0x008f: 0x2518, # FORMS LIGHT UP AND LEFT
0x0090: 0x03b2, # GREEK SMALL BETA
0x0091: 0x221e, # INFINITY
0x0092: 0x03c6, # GREEK SMALL PHI
0x0093: 0x00b1, # PLUS-OR-MINUS SIGN
0x0094: 0x00bd, # FRACTION 1/2
0x0095: 0x00bc, # FRACTION 1/4
0x0096: 0x2248, # ALMOST EQUAL TO
0x0097: 0x00ab, # LEFT POINTING GUILLEMET
0x0098: 0x00bb, # RIGHT POINTING GUILLEMET
0x0099: 0xfef7, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
0x009a: 0xfef8, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
0x009b: None, # UNDEFINED
0x009c: None, # UNDEFINED
0x009d: 0xfefb, # ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
0x009e: 0xfefc, # ARABIC LIGATURE LAM WITH ALEF FINAL FORM
0x009f: None, # UNDEFINED
0x00a1: 0x00ad, # SOFT HYPHEN
0x00a2: 0xfe82, # ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
0x00a5: 0xfe84, # ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
0x00a6: None, # UNDEFINED
0x00a7: None, # UNDEFINED
0x00a8: 0xfe8e, # ARABIC LETTER ALEF FINAL FORM
0x00a9: 0xfe8f, # ARABIC LETTER BEH ISOLATED FORM
0x00aa: 0xfe95, # ARABIC LETTER TEH ISOLATED FORM
0x00ab: 0xfe99, # ARABIC LETTER THEH ISOLATED FORM
0x00ac: 0x060c, # ARABIC COMMA
0x00ad: 0xfe9d, # ARABIC LETTER JEEM ISOLATED FORM
0x00ae: 0xfea1, # ARABIC LETTER HAH ISOLATED FORM
0x00af: 0xfea5, # ARABIC LETTER KHAH ISOLATED FORM
0x00b0: 0x0660, # ARABIC-INDIC DIGIT ZERO
0x00b1: 0x0661, # ARABIC-INDIC DIGIT ONE
0x00b2: 0x0662, # ARABIC-INDIC DIGIT TWO
0x00b3: 0x0663, # ARABIC-INDIC DIGIT THREE
0x00b4: 0x0664, # ARABIC-INDIC DIGIT FOUR
0x00b5: 0x0665, # ARABIC-INDIC DIGIT FIVE
0x00b6: 0x0666, # ARABIC-INDIC DIGIT SIX
0x00b7: 0x0667, # ARABIC-INDIC DIGIT SEVEN
0x00b8: 0x0668, # ARABIC-INDIC DIGIT EIGHT
0x00b9: 0x0669, # ARABIC-INDIC DIGIT NINE
0x00ba: 0xfed1, # ARABIC LETTER FEH ISOLATED FORM
0x00bb: 0x061b, # ARABIC SEMICOLON
0x00bc: 0xfeb1, # ARABIC LETTER SEEN ISOLATED FORM
0x00bd: 0xfeb5, # ARABIC LETTER SHEEN ISOLATED FORM
0x00be: 0xfeb9, # ARABIC LETTER SAD ISOLATED FORM
0x00bf: 0x061f, # ARABIC QUESTION MARK
0x00c0: 0x00a2, # CENT SIGN
0x00c1: 0xfe80, # ARABIC LETTER HAMZA ISOLATED FORM
0x00c2: 0xfe81, # ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
0x00c3: 0xfe83, # ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
0x00c4: 0xfe85, # ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
0x00c5: 0xfeca, # ARABIC LETTER AIN FINAL FORM
0x00c6: 0xfe8b, # ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
0x00c7: 0xfe8d, # ARABIC LETTER ALEF ISOLATED FORM
0x00c8: 0xfe91, # ARABIC LETTER BEH INITIAL FORM
0x00c9: 0xfe93, # ARABIC LETTER TEH MARBUTA ISOLATED FORM
0x00ca: 0xfe97, # ARABIC LETTER TEH INITIAL FORM
0x00cb: 0xfe9b, # ARABIC LETTER THEH INITIAL FORM
0x00cc: 0xfe9f, # ARABIC LETTER JEEM INITIAL FORM
0x00cd: 0xfea3, # ARABIC LETTER HAH INITIAL FORM
0x00ce: 0xfea7, # ARABIC LETTER KHAH INITIAL FORM
0x00cf: 0xfea9, # ARABIC LETTER DAL ISOLATED FORM
0x00d0: 0xfeab, # ARABIC LETTER THAL ISOLATED FORM
0x00d1: 0xfead, # ARABIC LETTER REH ISOLATED FORM
0x00d2: 0xfeaf, # ARABIC LETTER ZAIN ISOLATED FORM
0x00d3: 0xfeb3, # ARABIC LETTER SEEN INITIAL FORM
0x00d4: 0xfeb7, # ARABIC LETTER SHEEN INITIAL FORM
0x00d5: 0xfebb, # ARABIC LETTER SAD INITIAL FORM
0x00d6: 0xfebf, # ARABIC LETTER DAD INITIAL FORM
0x00d7: 0xfec1, # ARABIC LETTER TAH ISOLATED FORM
0x00d8: 0xfec5, # ARABIC LETTER ZAH ISOLATED FORM
0x00d9: 0xfecb, # ARABIC LETTER AIN INITIAL FORM
0x00da: 0xfecf, # ARABIC LETTER GHAIN INITIAL FORM
0x00db: 0x00a6, # BROKEN VERTICAL BAR
0x00dc: 0x00ac, # NOT SIGN
0x00dd: 0x00f7, # DIVISION SIGN
0x00de: 0x00d7, # MULTIPLICATION SIGN
0x00df: 0xfec9, # ARABIC LETTER AIN ISOLATED FORM
0x00e0: 0x0640, # ARABIC TATWEEL
0x00e1: 0xfed3, # ARABIC LETTER FEH INITIAL FORM
0x00e2: 0xfed7, # ARABIC LETTER QAF INITIAL FORM
0x00e3: 0xfedb, # ARABIC LETTER KAF INITIAL FORM
0x00e4: 0xfedf, # ARABIC LETTER LAM INITIAL FORM
0x00e5: 0xfee3, # ARABIC LETTER MEEM INITIAL FORM
0x00e6: 0xfee7, # ARABIC LETTER NOON INITIAL FORM
0x00e7: 0xfeeb, # ARABIC LETTER HEH INITIAL FORM
0x00e8: 0xfeed, # ARABIC LETTER WAW ISOLATED FORM
0x00e9: 0xfeef, # ARABIC LETTER ALEF MAKSURA ISOLATED FORM
0x00ea: 0xfef3, # ARABIC LETTER YEH INITIAL FORM
0x00eb: 0xfebd, # ARABIC LETTER DAD ISOLATED FORM
0x00ec: 0xfecc, # ARABIC LETTER AIN MEDIAL FORM
0x00ed: 0xfece, # ARABIC LETTER GHAIN FINAL FORM
0x00ee: 0xfecd, # ARABIC LETTER GHAIN ISOLATED FORM
0x00ef: 0xfee1, # ARABIC LETTER MEEM ISOLATED FORM
0x00f0: 0xfe7d, # ARABIC SHADDA MEDIAL FORM
0x00f1: 0x0651, # ARABIC SHADDAH
0x00f2: 0xfee5, # ARABIC LETTER NOON ISOLATED FORM
0x00f3: 0xfee9, # ARABIC LETTER HEH ISOLATED FORM
0x00f4: 0xfeec, # ARABIC LETTER HEH MEDIAL FORM
0x00f5: 0xfef0, # ARABIC LETTER ALEF MAKSURA FINAL FORM
0x00f6: 0xfef2, # ARABIC LETTER YEH FINAL FORM
0x00f7: 0xfed0, # ARABIC LETTER GHAIN MEDIAL FORM
0x00f8: 0xfed5, # ARABIC LETTER QAF ISOLATED FORM
0x00f9: 0xfef5, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
0x00fa: 0xfef6, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
0x00fb: 0xfedd, # ARABIC LETTER LAM ISOLATED FORM
0x00fc: 0xfed9, # ARABIC LETTER KAF ISOLATED FORM
0x00fd: 0xfef1, # ARABIC LETTER YEH ISOLATED FORM
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: None, # UNDEFINED
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'\u066a' # 0x0025 -> ARABIC PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xb0' # 0x0080 -> DEGREE SIGN
u'\xb7' # 0x0081 -> MIDDLE DOT
u'\u2219' # 0x0082 -> BULLET OPERATOR
u'\u221a' # 0x0083 -> SQUARE ROOT
u'\u2592' # 0x0084 -> MEDIUM SHADE
u'\u2500' # 0x0085 -> FORMS LIGHT HORIZONTAL
u'\u2502' # 0x0086 -> FORMS LIGHT VERTICAL
u'\u253c' # 0x0087 -> FORMS LIGHT VERTICAL AND HORIZONTAL
u'\u2524' # 0x0088 -> FORMS LIGHT VERTICAL AND LEFT
u'\u252c' # 0x0089 -> FORMS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x008a -> FORMS LIGHT VERTICAL AND RIGHT
u'\u2534' # 0x008b -> FORMS LIGHT UP AND HORIZONTAL
u'\u2510' # 0x008c -> FORMS LIGHT DOWN AND LEFT
u'\u250c' # 0x008d -> FORMS LIGHT DOWN AND RIGHT
u'\u2514' # 0x008e -> FORMS LIGHT UP AND RIGHT
u'\u2518' # 0x008f -> FORMS LIGHT UP AND LEFT
u'\u03b2' # 0x0090 -> GREEK SMALL BETA
u'\u221e' # 0x0091 -> INFINITY
u'\u03c6' # 0x0092 -> GREEK SMALL PHI
u'\xb1' # 0x0093 -> PLUS-OR-MINUS SIGN
u'\xbd' # 0x0094 -> FRACTION 1/2
u'\xbc' # 0x0095 -> FRACTION 1/4
u'\u2248' # 0x0096 -> ALMOST EQUAL TO
u'\xab' # 0x0097 -> LEFT POINTING GUILLEMET
u'\xbb' # 0x0098 -> RIGHT POINTING GUILLEMET
u'\ufef7' # 0x0099 -> ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
u'\ufef8' # 0x009a -> ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
u'\ufffe' # 0x009b -> UNDEFINED
u'\ufffe' # 0x009c -> UNDEFINED
u'\ufefb' # 0x009d -> ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
u'\ufefc' # 0x009e -> ARABIC LIGATURE LAM WITH ALEF FINAL FORM
u'\ufffe' # 0x009f -> UNDEFINED
u'\xa0' # 0x00a0 -> NON-BREAKING SPACE
u'\xad' # 0x00a1 -> SOFT HYPHEN
u'\ufe82' # 0x00a2 -> ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
u'\xa3' # 0x00a3 -> POUND SIGN
u'\xa4' # 0x00a4 -> CURRENCY SIGN
u'\ufe84' # 0x00a5 -> ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
u'\ufffe' # 0x00a6 -> UNDEFINED
u'\ufffe' # 0x00a7 -> UNDEFINED
u'\ufe8e' # 0x00a8 -> ARABIC LETTER ALEF FINAL FORM
u'\ufe8f' # 0x00a9 -> ARABIC LETTER BEH ISOLATED FORM
u'\ufe95' # 0x00aa -> ARABIC LETTER TEH ISOLATED FORM
u'\ufe99' # 0x00ab -> ARABIC LETTER THEH ISOLATED FORM
u'\u060c' # 0x00ac -> ARABIC COMMA
u'\ufe9d' # 0x00ad -> ARABIC LETTER JEEM ISOLATED FORM
u'\ufea1' # 0x00ae -> ARABIC LETTER HAH ISOLATED FORM
u'\ufea5' # 0x00af -> ARABIC LETTER KHAH ISOLATED FORM
u'\u0660' # 0x00b0 -> ARABIC-INDIC DIGIT ZERO
u'\u0661' # 0x00b1 -> ARABIC-INDIC DIGIT ONE
u'\u0662' # 0x00b2 -> ARABIC-INDIC DIGIT TWO
u'\u0663' # 0x00b3 -> ARABIC-INDIC DIGIT THREE
u'\u0664' # 0x00b4 -> ARABIC-INDIC DIGIT FOUR
u'\u0665' # 0x00b5 -> ARABIC-INDIC DIGIT FIVE
u'\u0666' # 0x00b6 -> ARABIC-INDIC DIGIT SIX
u'\u0667' # 0x00b7 -> ARABIC-INDIC DIGIT SEVEN
u'\u0668' # 0x00b8 -> ARABIC-INDIC DIGIT EIGHT
u'\u0669' # 0x00b9 -> ARABIC-INDIC DIGIT NINE
u'\ufed1' # 0x00ba -> ARABIC LETTER FEH ISOLATED FORM
u'\u061b' # 0x00bb -> ARABIC SEMICOLON
u'\ufeb1' # 0x00bc -> ARABIC LETTER SEEN ISOLATED FORM
u'\ufeb5' # 0x00bd -> ARABIC LETTER SHEEN ISOLATED FORM
u'\ufeb9' # 0x00be -> ARABIC LETTER SAD ISOLATED FORM
u'\u061f' # 0x00bf -> ARABIC QUESTION MARK
u'\xa2' # 0x00c0 -> CENT SIGN
u'\ufe80' # 0x00c1 -> ARABIC LETTER HAMZA ISOLATED FORM
u'\ufe81' # 0x00c2 -> ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
u'\ufe83' # 0x00c3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
u'\ufe85' # 0x00c4 -> ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
u'\ufeca' # 0x00c5 -> ARABIC LETTER AIN FINAL FORM
u'\ufe8b' # 0x00c6 -> ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
u'\ufe8d' # 0x00c7 -> ARABIC LETTER ALEF ISOLATED FORM
u'\ufe91' # 0x00c8 -> ARABIC LETTER BEH INITIAL FORM
u'\ufe93' # 0x00c9 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM
u'\ufe97' # 0x00ca -> ARABIC LETTER TEH INITIAL FORM
u'\ufe9b' # 0x00cb -> ARABIC LETTER THEH INITIAL FORM
u'\ufe9f' # 0x00cc -> ARABIC LETTER JEEM INITIAL FORM
u'\ufea3' # 0x00cd -> ARABIC LETTER HAH INITIAL FORM
u'\ufea7' # 0x00ce -> ARABIC LETTER KHAH INITIAL FORM
u'\ufea9' # 0x00cf -> ARABIC LETTER DAL ISOLATED FORM
u'\ufeab' # 0x00d0 -> ARABIC LETTER THAL ISOLATED FORM
u'\ufead' # 0x00d1 -> ARABIC LETTER REH ISOLATED FORM
u'\ufeaf' # 0x00d2 -> ARABIC LETTER ZAIN ISOLATED FORM
u'\ufeb3' # 0x00d3 -> ARABIC LETTER SEEN INITIAL FORM
u'\ufeb7' # 0x00d4 -> ARABIC LETTER SHEEN INITIAL FORM
u'\ufebb' # 0x00d5 -> ARABIC LETTER SAD INITIAL FORM
u'\ufebf' # 0x00d6 -> ARABIC LETTER DAD INITIAL FORM
u'\ufec1' # 0x00d7 -> ARABIC LETTER TAH ISOLATED FORM
u'\ufec5' # 0x00d8 -> ARABIC LETTER ZAH ISOLATED FORM
u'\ufecb' # 0x00d9 -> ARABIC LETTER AIN INITIAL FORM
u'\ufecf' # 0x00da -> ARABIC LETTER GHAIN INITIAL FORM
u'\xa6' # 0x00db -> BROKEN VERTICAL BAR
u'\xac' # 0x00dc -> NOT SIGN
u'\xf7' # 0x00dd -> DIVISION SIGN
u'\xd7' # 0x00de -> MULTIPLICATION SIGN
u'\ufec9' # 0x00df -> ARABIC LETTER AIN ISOLATED FORM
u'\u0640' # 0x00e0 -> ARABIC TATWEEL
u'\ufed3' # 0x00e1 -> ARABIC LETTER FEH INITIAL FORM
u'\ufed7' # 0x00e2 -> ARABIC LETTER QAF INITIAL FORM
u'\ufedb' # 0x00e3 -> ARABIC LETTER KAF INITIAL FORM
u'\ufedf' # 0x00e4 -> ARABIC LETTER LAM INITIAL FORM
u'\ufee3' # 0x00e5 -> ARABIC LETTER MEEM INITIAL FORM
u'\ufee7' # 0x00e6 -> ARABIC LETTER NOON INITIAL FORM
u'\ufeeb' # 0x00e7 -> ARABIC LETTER HEH INITIAL FORM
u'\ufeed' # 0x00e8 -> ARABIC LETTER WAW ISOLATED FORM
u'\ufeef' # 0x00e9 -> ARABIC LETTER ALEF MAKSURA ISOLATED FORM
u'\ufef3' # 0x00ea -> ARABIC LETTER YEH INITIAL FORM
u'\ufebd' # 0x00eb -> ARABIC LETTER DAD ISOLATED FORM
u'\ufecc' # 0x00ec -> ARABIC LETTER AIN MEDIAL FORM
u'\ufece' # 0x00ed -> ARABIC LETTER GHAIN FINAL FORM
u'\ufecd' # 0x00ee -> ARABIC LETTER GHAIN ISOLATED FORM
u'\ufee1' # 0x00ef -> ARABIC LETTER MEEM ISOLATED FORM
u'\ufe7d' # 0x00f0 -> ARABIC SHADDA MEDIAL FORM
u'\u0651' # 0x00f1 -> ARABIC SHADDAH
u'\ufee5' # 0x00f2 -> ARABIC LETTER NOON ISOLATED FORM
u'\ufee9' # 0x00f3 -> ARABIC LETTER HEH ISOLATED FORM
u'\ufeec' # 0x00f4 -> ARABIC LETTER HEH MEDIAL FORM
u'\ufef0' # 0x00f5 -> ARABIC LETTER ALEF MAKSURA FINAL FORM
u'\ufef2' # 0x00f6 -> ARABIC LETTER YEH FINAL FORM
u'\ufed0' # 0x00f7 -> ARABIC LETTER GHAIN MEDIAL FORM
u'\ufed5' # 0x00f8 -> ARABIC LETTER QAF ISOLATED FORM
u'\ufef5' # 0x00f9 -> ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
u'\ufef6' # 0x00fa -> ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
u'\ufedd' # 0x00fb -> ARABIC LETTER LAM ISOLATED FORM
u'\ufed9' # 0x00fc -> ARABIC LETTER KAF ISOLATED FORM
u'\ufef1' # 0x00fd -> ARABIC LETTER YEH ISOLATED FORM
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\ufffe' # 0x00ff -> UNDEFINED
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00a0, # NON-BREAKING SPACE
0x00a2: 0x00c0, # CENT SIGN
0x00a3: 0x00a3, # POUND SIGN
0x00a4: 0x00a4, # CURRENCY SIGN
0x00a6: 0x00db, # BROKEN VERTICAL BAR
0x00ab: 0x0097, # LEFT POINTING GUILLEMET
0x00ac: 0x00dc, # NOT SIGN
0x00ad: 0x00a1, # SOFT HYPHEN
0x00b0: 0x0080, # DEGREE SIGN
0x00b1: 0x0093, # PLUS-OR-MINUS SIGN
0x00b7: 0x0081, # MIDDLE DOT
0x00bb: 0x0098, # RIGHT POINTING GUILLEMET
0x00bc: 0x0095, # FRACTION 1/4
0x00bd: 0x0094, # FRACTION 1/2
0x00d7: 0x00de, # MULTIPLICATION SIGN
0x00f7: 0x00dd, # DIVISION SIGN
0x03b2: 0x0090, # GREEK SMALL BETA
0x03c6: 0x0092, # GREEK SMALL PHI
0x060c: 0x00ac, # ARABIC COMMA
0x061b: 0x00bb, # ARABIC SEMICOLON
0x061f: 0x00bf, # ARABIC QUESTION MARK
0x0640: 0x00e0, # ARABIC TATWEEL
0x0651: 0x00f1, # ARABIC SHADDAH
0x0660: 0x00b0, # ARABIC-INDIC DIGIT ZERO
0x0661: 0x00b1, # ARABIC-INDIC DIGIT ONE
0x0662: 0x00b2, # ARABIC-INDIC DIGIT TWO
0x0663: 0x00b3, # ARABIC-INDIC DIGIT THREE
0x0664: 0x00b4, # ARABIC-INDIC DIGIT FOUR
0x0665: 0x00b5, # ARABIC-INDIC DIGIT FIVE
0x0666: 0x00b6, # ARABIC-INDIC DIGIT SIX
0x0667: 0x00b7, # ARABIC-INDIC DIGIT SEVEN
0x0668: 0x00b8, # ARABIC-INDIC DIGIT EIGHT
0x0669: 0x00b9, # ARABIC-INDIC DIGIT NINE
0x066a: 0x0025, # ARABIC PERCENT SIGN
0x2219: 0x0082, # BULLET OPERATOR
0x221a: 0x0083, # SQUARE ROOT
0x221e: 0x0091, # INFINITY
0x2248: 0x0096, # ALMOST EQUAL TO
0x2500: 0x0085, # FORMS LIGHT HORIZONTAL
0x2502: 0x0086, # FORMS LIGHT VERTICAL
0x250c: 0x008d, # FORMS LIGHT DOWN AND RIGHT
0x2510: 0x008c, # FORMS LIGHT DOWN AND LEFT
0x2514: 0x008e, # FORMS LIGHT UP AND RIGHT
0x2518: 0x008f, # FORMS LIGHT UP AND LEFT
0x251c: 0x008a, # FORMS LIGHT VERTICAL AND RIGHT
0x2524: 0x0088, # FORMS LIGHT VERTICAL AND LEFT
0x252c: 0x0089, # FORMS LIGHT DOWN AND HORIZONTAL
0x2534: 0x008b, # FORMS LIGHT UP AND HORIZONTAL
0x253c: 0x0087, # FORMS LIGHT VERTICAL AND HORIZONTAL
0x2592: 0x0084, # MEDIUM SHADE
0x25a0: 0x00fe, # BLACK SQUARE
0xfe7d: 0x00f0, # ARABIC SHADDA MEDIAL FORM
0xfe80: 0x00c1, # ARABIC LETTER HAMZA ISOLATED FORM
0xfe81: 0x00c2, # ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
0xfe82: 0x00a2, # ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
0xfe83: 0x00c3, # ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
0xfe84: 0x00a5, # ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
0xfe85: 0x00c4, # ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
0xfe8b: 0x00c6, # ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
0xfe8d: 0x00c7, # ARABIC LETTER ALEF ISOLATED FORM
0xfe8e: 0x00a8, # ARABIC LETTER ALEF FINAL FORM
0xfe8f: 0x00a9, # ARABIC LETTER BEH ISOLATED FORM
0xfe91: 0x00c8, # ARABIC LETTER BEH INITIAL FORM
0xfe93: 0x00c9, # ARABIC LETTER TEH MARBUTA ISOLATED FORM
0xfe95: 0x00aa, # ARABIC LETTER TEH ISOLATED FORM
0xfe97: 0x00ca, # ARABIC LETTER TEH INITIAL FORM
0xfe99: 0x00ab, # ARABIC LETTER THEH ISOLATED FORM
0xfe9b: 0x00cb, # ARABIC LETTER THEH INITIAL FORM
0xfe9d: 0x00ad, # ARABIC LETTER JEEM ISOLATED FORM
0xfe9f: 0x00cc, # ARABIC LETTER JEEM INITIAL FORM
0xfea1: 0x00ae, # ARABIC LETTER HAH ISOLATED FORM
0xfea3: 0x00cd, # ARABIC LETTER HAH INITIAL FORM
0xfea5: 0x00af, # ARABIC LETTER KHAH ISOLATED FORM
0xfea7: 0x00ce, # ARABIC LETTER KHAH INITIAL FORM
0xfea9: 0x00cf, # ARABIC LETTER DAL ISOLATED FORM
0xfeab: 0x00d0, # ARABIC LETTER THAL ISOLATED FORM
0xfead: 0x00d1, # ARABIC LETTER REH ISOLATED FORM
0xfeaf: 0x00d2, # ARABIC LETTER ZAIN ISOLATED FORM
0xfeb1: 0x00bc, # ARABIC LETTER SEEN ISOLATED FORM
0xfeb3: 0x00d3, # ARABIC LETTER SEEN INITIAL FORM
0xfeb5: 0x00bd, # ARABIC LETTER SHEEN ISOLATED FORM
0xfeb7: 0x00d4, # ARABIC LETTER SHEEN INITIAL FORM
0xfeb9: 0x00be, # ARABIC LETTER SAD ISOLATED FORM
0xfebb: 0x00d5, # ARABIC LETTER SAD INITIAL FORM
0xfebd: 0x00eb, # ARABIC LETTER DAD ISOLATED FORM
0xfebf: 0x00d6, # ARABIC LETTER DAD INITIAL FORM
0xfec1: 0x00d7, # ARABIC LETTER TAH ISOLATED FORM
0xfec5: 0x00d8, # ARABIC LETTER ZAH ISOLATED FORM
0xfec9: 0x00df, # ARABIC LETTER AIN ISOLATED FORM
0xfeca: 0x00c5, # ARABIC LETTER AIN FINAL FORM
0xfecb: 0x00d9, # ARABIC LETTER AIN INITIAL FORM
0xfecc: 0x00ec, # ARABIC LETTER AIN MEDIAL FORM
0xfecd: 0x00ee, # ARABIC LETTER GHAIN ISOLATED FORM
0xfece: 0x00ed, # ARABIC LETTER GHAIN FINAL FORM
0xfecf: 0x00da, # ARABIC LETTER GHAIN INITIAL FORM
0xfed0: 0x00f7, # ARABIC LETTER GHAIN MEDIAL FORM
0xfed1: 0x00ba, # ARABIC LETTER FEH ISOLATED FORM
0xfed3: 0x00e1, # ARABIC LETTER FEH INITIAL FORM
0xfed5: 0x00f8, # ARABIC LETTER QAF ISOLATED FORM
0xfed7: 0x00e2, # ARABIC LETTER QAF INITIAL FORM
0xfed9: 0x00fc, # ARABIC LETTER KAF ISOLATED FORM
0xfedb: 0x00e3, # ARABIC LETTER KAF INITIAL FORM
0xfedd: 0x00fb, # ARABIC LETTER LAM ISOLATED FORM
0xfedf: 0x00e4, # ARABIC LETTER LAM INITIAL FORM
0xfee1: 0x00ef, # ARABIC LETTER MEEM ISOLATED FORM
0xfee3: 0x00e5, # ARABIC LETTER MEEM INITIAL FORM
0xfee5: 0x00f2, # ARABIC LETTER NOON ISOLATED FORM
0xfee7: 0x00e6, # ARABIC LETTER NOON INITIAL FORM
0xfee9: 0x00f3, # ARABIC LETTER HEH ISOLATED FORM
0xfeeb: 0x00e7, # ARABIC LETTER HEH INITIAL FORM
0xfeec: 0x00f4, # ARABIC LETTER HEH MEDIAL FORM
0xfeed: 0x00e8, # ARABIC LETTER WAW ISOLATED FORM
0xfeef: 0x00e9, # ARABIC LETTER ALEF MAKSURA ISOLATED FORM
0xfef0: 0x00f5, # ARABIC LETTER ALEF MAKSURA FINAL FORM
0xfef1: 0x00fd, # ARABIC LETTER YEH ISOLATED FORM
0xfef2: 0x00f6, # ARABIC LETTER YEH FINAL FORM
0xfef3: 0x00ea, # ARABIC LETTER YEH INITIAL FORM
0xfef5: 0x00f9, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
0xfef6: 0x00fa, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
0xfef7: 0x0099, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
0xfef8: 0x009a, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
0xfefb: 0x009d, # ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
0xfefc: 0x009e, # ARABIC LIGATURE LAM WITH ALEF FINAL FORM
}
| gpl-3.0 |
i386x/doit | doit/support/observer.py | 1 | 2549 | # -*- coding: utf-8 -*-
#! \file ./doit/support/observer.py
#! \author Jiří Kučera, <sanczes@gmail.com>
#! \stamp 2016-02-10 23:00:07 (UTC+01:00, DST+00:00)
#! \project DoIt!: Tools and Libraries for Building DSLs
#! \license MIT
#! \version 0.0.0
#! \fdesc @pyfile.docstr
#
"""\
Observer.\
"""
__license__ = """\
Copyright (c) 2014 - 2017 Jiří Kučera.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.\
"""
from doit.support.errors import not_implemented
class Observable(object):
"""
"""
__slots__ = [ '__listeners' ]
def __init__(self):
"""
"""
self.__listeners = []
#-def
def attach(self, listener):
"""
"""
if listener not in self.__listeners:
self.__listeners.append(listener)
#-def
def detach(self, listener):
"""
"""
while listener in self.__listeners:
self.__listeners.remove(listener)
#-def
def notify(self):
"""
"""
for listener in self.__listeners:
listener.update(self)
#-def
def get_state(self):
"""
"""
not_implemented()
#-def
#-class
class Observer(object):
"""
"""
__slots__ = []
def __init__(self):
"""
"""
pass
#-def
def update(self, notifier):
"""
"""
not_implemented()
#-def
def __eq__(self, rhs):
"""
"""
return id(self) == id(rhs)
#-def
#-class
| mit |
dimagol/trex-core | scripts/external_libs/requests/requests/packages/urllib3/contrib/appengine.py | 224 | 10865 | """
This module provides a pool manager that uses Google App Engine's
`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
Example usage::
from urllib3 import PoolManager
from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
if is_appengine_sandbox():
# AppEngineManager uses AppEngine's URLFetch API behind the scenes
http = AppEngineManager()
else:
# PoolManager uses a socket-level API behind the scenes
http = PoolManager()
r = http.request('GET', 'https://google.com/')
There are `limitations <https://cloud.google.com/appengine/docs/python/\
urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
the best choice for your application. There are three options for using
urllib3 on Google App Engine:
1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
cost-effective in many circumstances as long as your usage is within the
limitations.
2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
Sockets also have `limitations and restrictions
<https://cloud.google.com/appengine/docs/python/sockets/\
#limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
To use sockets, be sure to specify the following in your ``app.yaml``::
env_variables:
GAE_USE_SOCKETS_HTTPLIB : 'true'
3. If you are using `App Engine Flexible
<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
:class:`PoolManager` without any configuration or special environment variables.
"""
from __future__ import absolute_import
import logging
import os
import warnings
from ..packages.six.moves.urllib.parse import urljoin
from ..exceptions import (
HTTPError,
HTTPWarning,
MaxRetryError,
ProtocolError,
TimeoutError,
SSLError
)
from ..packages.six import BytesIO
from ..request import RequestMethods
from ..response import HTTPResponse
from ..util.timeout import Timeout
from ..util.retry import Retry
try:
from google.appengine.api import urlfetch
except ImportError:
urlfetch = None
log = logging.getLogger(__name__)
class AppEnginePlatformWarning(HTTPWarning):
pass
class AppEnginePlatformError(HTTPError):
pass
class AppEngineManager(RequestMethods):
"""
Connection manager for Google App Engine sandbox applications.
This manager uses the URLFetch service directly instead of using the
emulated httplib, and is subject to URLFetch limitations as described in
the App Engine documentation `here
<https://cloud.google.com/appengine/docs/python/urlfetch>`_.
Notably it will raise an :class:`AppEnginePlatformError` if:
* URLFetch is not available.
* If you attempt to use this on App Engine Flexible, as full socket
support is available.
* If a request size is more than 10 megabytes.
* If a response size is more than 32 megabtyes.
* If you use an unsupported request method such as OPTIONS.
Beyond those cases, it will raise normal urllib3 errors.
"""
def __init__(self, headers=None, retries=None, validate_certificate=True,
urlfetch_retries=True):
if not urlfetch:
raise AppEnginePlatformError(
"URLFetch is not available in this environment.")
if is_prod_appengine_mvms():
raise AppEnginePlatformError(
"Use normal urllib3.PoolManager instead of AppEngineManager"
"on Managed VMs, as using URLFetch is not necessary in "
"this environment.")
warnings.warn(
"urllib3 is using URLFetch on Google App Engine sandbox instead "
"of sockets. To use sockets directly instead of URLFetch see "
"https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.",
AppEnginePlatformWarning)
RequestMethods.__init__(self, headers)
self.validate_certificate = validate_certificate
self.urlfetch_retries = urlfetch_retries
self.retries = retries or Retry.DEFAULT
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Return False to re-raise any potential exceptions
return False
def urlopen(self, method, url, body=None, headers=None,
retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
**response_kw):
retries = self._get_retries(retries, redirect)
try:
follow_redirects = (
redirect and
retries.redirect != 0 and
retries.total)
response = urlfetch.fetch(
url,
payload=body,
method=method,
headers=headers or {},
allow_truncated=False,
follow_redirects=self.urlfetch_retries and follow_redirects,
deadline=self._get_absolute_timeout(timeout),
validate_certificate=self.validate_certificate,
)
except urlfetch.DeadlineExceededError as e:
raise TimeoutError(self, e)
except urlfetch.InvalidURLError as e:
if 'too large' in str(e):
raise AppEnginePlatformError(
"URLFetch request too large, URLFetch only "
"supports requests up to 10mb in size.", e)
raise ProtocolError(e)
except urlfetch.DownloadError as e:
if 'Too many redirects' in str(e):
raise MaxRetryError(self, url, reason=e)
raise ProtocolError(e)
except urlfetch.ResponseTooLargeError as e:
raise AppEnginePlatformError(
"URLFetch response too large, URLFetch only supports"
"responses up to 32mb in size.", e)
except urlfetch.SSLCertificateError as e:
raise SSLError(e)
except urlfetch.InvalidMethodError as e:
raise AppEnginePlatformError(
"URLFetch does not support method: %s" % method, e)
http_response = self._urlfetch_response_to_http_response(
response, retries=retries, **response_kw)
# Handle redirect?
redirect_location = redirect and http_response.get_redirect_location()
if redirect_location:
# Check for redirect response
if (self.urlfetch_retries and retries.raise_on_redirect):
raise MaxRetryError(self, url, "too many redirects")
else:
if http_response.status == 303:
method = 'GET'
try:
retries = retries.increment(method, url, response=http_response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
raise MaxRetryError(self, url, "too many redirects")
return http_response
retries.sleep_for_retry(http_response)
log.debug("Redirecting %s -> %s", url, redirect_location)
redirect_url = urljoin(url, redirect_location)
return self.urlopen(
method, redirect_url, body, headers,
retries=retries, redirect=redirect,
timeout=timeout, **response_kw)
# Check if we should retry the HTTP response.
has_retry_after = bool(http_response.getheader('Retry-After'))
if retries.is_retry(method, http_response.status, has_retry_after):
retries = retries.increment(
method, url, response=http_response, _pool=self)
log.debug("Retry: %s", url)
retries.sleep(http_response)
return self.urlopen(
method, url,
body=body, headers=headers,
retries=retries, redirect=redirect,
timeout=timeout, **response_kw)
return http_response
def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
if is_prod_appengine():
# Production GAE handles deflate encoding automatically, but does
# not remove the encoding header.
content_encoding = urlfetch_resp.headers.get('content-encoding')
if content_encoding == 'deflate':
del urlfetch_resp.headers['content-encoding']
transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
# We have a full response's content,
# so let's make sure we don't report ourselves as chunked data.
if transfer_encoding == 'chunked':
encodings = transfer_encoding.split(",")
encodings.remove('chunked')
urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
return HTTPResponse(
# In order for decoding to work, we must present the content as
# a file-like object.
body=BytesIO(urlfetch_resp.content),
headers=urlfetch_resp.headers,
status=urlfetch_resp.status_code,
**response_kw
)
def _get_absolute_timeout(self, timeout):
if timeout is Timeout.DEFAULT_TIMEOUT:
return None # Defer to URLFetch's default.
if isinstance(timeout, Timeout):
if timeout._read is not None or timeout._connect is not None:
warnings.warn(
"URLFetch does not support granular timeout settings, "
"reverting to total or default URLFetch timeout.",
AppEnginePlatformWarning)
return timeout.total
return timeout
def _get_retries(self, retries, redirect):
if not isinstance(retries, Retry):
retries = Retry.from_int(
retries, redirect=redirect, default=self.retries)
if retries.connect or retries.read or retries.redirect:
warnings.warn(
"URLFetch only supports total retries and does not "
"recognize connect, read, or redirect retry parameters.",
AppEnginePlatformWarning)
return retries
def is_appengine():
return (is_local_appengine() or
is_prod_appengine() or
is_prod_appengine_mvms())
def is_appengine_sandbox():
return is_appengine() and not is_prod_appengine_mvms()
def is_local_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Development/' in os.environ['SERVER_SOFTWARE'])
def is_prod_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
not is_prod_appengine_mvms())
def is_prod_appengine_mvms():
return os.environ.get('GAE_VM', False) == 'true'
| apache-2.0 |
nugget/home-assistant | homeassistant/components/sensor/pyload.py | 8 | 5399 | """
Support for monitoring pyLoad.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.pyload/
"""
from datetime import timedelta
import logging
from aiohttp.hdrs import CONTENT_TYPE
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_SSL, CONF_HOST, CONF_NAME, CONF_PORT, CONF_PASSWORD, CONF_USERNAME,
CONTENT_TYPE_JSON, CONF_MONITORED_VARIABLES)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = 'localhost'
DEFAULT_NAME = 'pyLoad'
DEFAULT_PORT = 8000
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=15)
SENSOR_TYPES = {
'speed': ['speed', 'Speed', 'MB/s'],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_MONITORED_VARIABLES, default=['speed']):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_USERNAME): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the pyLoad sensors."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
ssl = 's' if config.get(CONF_SSL) else ''
name = config.get(CONF_NAME)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
monitored_types = config.get(CONF_MONITORED_VARIABLES)
url = "http{}://{}:{}/api/".format(ssl, host, port)
try:
pyloadapi = PyLoadAPI(
api_url=url, username=username, password=password)
pyloadapi.update()
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError) as conn_err:
_LOGGER.error("Error setting up pyLoad API: %s", conn_err)
return False
devices = []
for ng_type in monitored_types:
new_sensor = PyLoadSensor(
api=pyloadapi, sensor_type=SENSOR_TYPES.get(ng_type),
client_name=name)
devices.append(new_sensor)
add_entities(devices, True)
class PyLoadSensor(Entity):
"""Representation of a pyLoad sensor."""
def __init__(self, api, sensor_type, client_name):
"""Initialize a new pyLoad sensor."""
self._name = '{} {}'.format(client_name, sensor_type[1])
self.type = sensor_type[0]
self.api = api
self._state = None
self._unit_of_measurement = sensor_type[2]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Update state of sensor."""
try:
self.api.update()
except requests.exceptions.ConnectionError:
# Error calling the API, already logged in api.update()
return
if self.api.status is None:
_LOGGER.debug("Update of %s requested, but no status is available",
self._name)
return
value = self.api.status.get(self.type)
if value is None:
_LOGGER.warning("Unable to locate value for %s", self.type)
return
if "speed" in self.type and value > 0:
# Convert download rate from Bytes/s to MBytes/s
self._state = round(value / 2**20, 2)
else:
self._state = value
class PyLoadAPI:
"""Simple wrapper for pyLoad's API."""
def __init__(self, api_url, username=None, password=None):
"""Initialize pyLoad API and set headers needed later."""
self.api_url = api_url
self.status = None
self.headers = {CONTENT_TYPE: CONTENT_TYPE_JSON}
if username is not None and password is not None:
self.payload = {'username': username, 'password': password}
self.login = requests.post(
'{}{}'.format(api_url, 'login'), data=self.payload, timeout=5)
self.update()
def post(self, method, params=None):
"""Send a POST request and return the response as a dict."""
payload = {'method': method}
if params:
payload['params'] = params
try:
response = requests.post(
'{}{}'.format(self.api_url, 'statusServer'), json=payload,
cookies=self.login.cookies, headers=self.headers, timeout=5)
response.raise_for_status()
_LOGGER.debug("JSON Response: %s", response.json())
return response.json()
except requests.exceptions.ConnectionError as conn_exc:
_LOGGER.error("Failed to update pyLoad status. Error: %s",
conn_exc)
raise
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update cached response."""
self.status = self.post('speed')
| apache-2.0 |
mateor/pants | src/python/pants/backend/python/tasks/python_binary_create.py | 5 | 3592 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pex.pex_info import PexInfo
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.tasks.python_task import PythonTask
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.util.dirutil import safe_mkdir_for
from pants.util.fileutil import atomic_copy
class PythonBinaryCreate(PythonTask):
@classmethod
def product_types(cls):
return ['pex_archives', 'deployable_archives']
@classmethod
def implementation_version(cls):
return super(PythonBinaryCreate, cls).implementation_version() + [('PythonBinaryCreate', 1)]
@property
def cache_target_dirs(self):
return True
@staticmethod
def is_binary(target):
return isinstance(target, PythonBinary)
def __init__(self, *args, **kwargs):
super(PythonBinaryCreate, self).__init__(*args, **kwargs)
self._distdir = self.get_options().pants_distdir
def execute(self):
binaries = self.context.targets(self.is_binary)
# Check for duplicate binary names, since we write the pexes to <dist>/<name>.pex.
names = {}
for binary in binaries:
name = binary.name
if name in names:
raise TaskError('Cannot build two binaries with the same name in a single invocation. '
'{} and {} both have the name {}.'.format(binary, names[name], name))
names[name] = binary
with self.invalidated(binaries, invalidate_dependents=True) as invalidation_check:
python_deployable_archive = self.context.products.get('deployable_archives')
python_pex_product = self.context.products.get('pex_archives')
for vt in invalidation_check.all_vts:
pex_path = os.path.join(vt.results_dir, '{}.pex'.format(vt.target.name))
if not vt.valid:
self.context.log.debug('cache for {} is invalid, rebuilding'.format(vt.target))
self.create_binary(vt.target, vt.results_dir)
else:
self.context.log.debug('using cache for {}'.format(vt.target))
python_pex_product.add(binary, os.path.dirname(pex_path)).append(os.path.basename(pex_path))
python_deployable_archive.add(binary, os.path.dirname(pex_path)).append(os.path.basename(pex_path))
self.context.log.debug('created {}'.format(os.path.relpath(pex_path, get_buildroot())))
# Create a copy for pex.
pex_copy = os.path.join(self._distdir, os.path.basename(pex_path))
safe_mkdir_for(pex_copy)
atomic_copy(pex_path, pex_copy)
self.context.log.info('created pex {}'.format(os.path.relpath(pex_copy, get_buildroot())))
def create_binary(self, binary, results_dir):
interpreter = self.select_interpreter_for_targets(binary.closure())
run_info_dict = self.context.run_tracker.run_info.get_as_dict()
build_properties = PexInfo.make_build_properties()
build_properties.update(run_info_dict)
pexinfo = binary.pexinfo.copy()
pexinfo.build_properties = build_properties
with self.temporary_chroot(interpreter=interpreter, pex_info=pexinfo, targets=[binary],
platforms=binary.platforms) as chroot:
pex_path = os.path.join(results_dir, '{}.pex'.format(binary.name))
chroot.package_pex(pex_path)
return pex_path
| apache-2.0 |
tectronics/pyafipws | trazarenpre.py | 5 | 11808 | #!/usr/bin/python
# -*- coding: latin-1 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Módulo para Trazabilidad de Precursores Químicos RENPRE Resolución 900/12"
# Información adicional y documentación:
# http://www.sistemasagiles.com.ar/trac/wiki/TrazabilidadPrecursoresQuimicos
__author__ = "Mariano Reingart <reingart@gmail.com>"
__copyright__ = "Copyright (C) 2011 Mariano Reingart"
__license__ = "GPL 3.0+"
__version__ = "1.11a"
#http://renpre.servicios.pami.org.ar/portal_traza_renpre/paso5.html
import os
import socket
import sys
import datetime, time
import pysimplesoap.client
from pysimplesoap.client import SoapFault
from utils import BaseWS, inicializar_y_capturar_excepciones, get_install_dir
HOMO = False
TYPELIB = False
WSDL = "https://servicios.pami.org.ar/trazamed.WebServiceSDRN?wsdl"
LOCATION = "https://servicios.pami.org.ar/trazamed.WebServiceSDRN?wsdl"
##WSDL = "https://trazabilidad.pami.org.ar:59050/trazamed.WebServiceSDRN?wsdl" # prod.
class TrazaRenpre(BaseWS):
"Interfaz para el WebService de Trazabilidad de Precursores Quimicos SEDRONAR SNT"
_public_methods_ = ['SaveTransacciones',
'SendCancelacTransacc',
'Conectar', 'LeerError', 'LeerTransaccion',
'SetUsername',
'SetParametro', 'GetParametro',
'GetCodigoTransaccion', 'GetResultado', 'LoadTestXML']
_public_attrs_ = [
'Username', 'Password',
'CodigoTransaccion', 'Errores', 'Resultado',
'XmlRequest', 'XmlResponse',
'Version', 'InstallDir',
'Traceback', 'Excepcion', 'LanzarExcepciones',
]
_reg_progid_ = "TrazaRenpre"
_reg_clsid_ = "{461298DB-0531-47CA-B3D9-B36FE6967209}"
# Variables globales para BaseWS:
HOMO = HOMO
WSDL = WSDL
Version = "%s %s %s" % (__version__, HOMO and 'Homologación' or '',
pysimplesoap.client.__version__)
def __init__(self, reintentos=1):
self.Username = self.Password = None
BaseWS.__init__(self, reintentos)
def inicializar(self):
BaseWS.inicializar(self)
self.CodigoTransaccion = self.Errores = self.Resultado = None
def __analizar_errores(self, ret):
"Comprueba y extrae errores si existen en la respuesta XML"
self.Errores = ["%s: %s" % (it.get('_c_error', ""), it.get('_d_error', ""))
for it in ret.get('errores', [])]
self.Resultado = ret.get('resultado')
def Conectar(self, cache=None, wsdl=None, proxy="", wrapper=None, cacert=None, timeout=30):
# Conecto usando el método estandard:
ok = BaseWS.Conectar(self, cache, wsdl, proxy, wrapper, cacert, timeout,
soap_server="jetty")
if ok:
# si el archivo es local, asumo que ya esta corregido:
if not self.wsdl.startswith("file"):
# corrijo ubicación del servidor (localhost:9050 en el WSDL)
location = self.wsdl[:-5]
if 'IWebServiceSDRNService' in self.client.services:
ws = self.client.services['IWebServiceSDRNService']
else:
ws = self.client.services['IWebServiceSDRN']
ws['ports']['IWebServiceSDRNPort']['location'] = location
# Establecer credenciales de seguridad:
self.client['wsse:Security'] = {
'wsse:UsernameToken': {
'wsse:Username': self.Username,
'wsse:Password': self.Password,
}
}
return ok
@inicializar_y_capturar_excepciones
def SaveTransacciones(self, usuario, password,
gln_origen=None, gln_destino=None, f_operacion=None,
id_evento=None, cod_producto=None, n_cantidad=None,
n_documento_operacion=None, n_remito=None,
id_tipo_transporte=None,
id_paso_frontera_ingreso=None,
id_paso_frontera_egreso=None,
id_tipo_documento_operacion=None,
d_dominio_tractor=None,
d_dominio_semi=None,
n_serie=None, n_lote=None, doc_despacho_plaza=None,
djai=None, n_cert_impo_expo=None,
id_tipo_documento=None, n_documento=None,
m_calidad_analitica=None, m_entrega_parcial=None,
doc_permiso_embarque=None, gln_transportista=None,
operacion_excento_djai=None, control_duplicidad=None,
):
"Permite informe por parte de un agente de una o varias transacciones"
# creo los parámetros para esta llamada
params = { 'gln_origen': gln_origen, 'gln_destino': gln_destino,
'f_operacion': f_operacion, 'id_evento': id_evento,
'cod_producto': cod_producto, 'n_cantidad': n_cantidad,
'n_documento_operacion': n_documento_operacion,
'n_remito': n_remito,
'id_tipo_transporte': id_tipo_transporte,
'id_paso_frontera_ingreso': id_paso_frontera_ingreso,
'id_paso_frontera_egreso': id_paso_frontera_egreso,
'id_tipo_documento_operacion': id_tipo_documento_operacion,
'd_dominio_tractor': d_dominio_tractor,
'd_dominio_semi': d_dominio_semi,
'n_serie': n_serie, 'n_lote': n_lote,
'doc_despacho_plaza': doc_despacho_plaza,
'djai': djai, 'n_cert_impo_expo': n_cert_impo_expo,
'id_tipo_documento': id_tipo_documento,
'n_documento': n_documento,
'm_calidad_analitica': m_calidad_analitica,
'm_entrega_parcial': m_entrega_parcial,
'doc_permiso_embarque': doc_permiso_embarque,
'gln_transportista': gln_transportista,
'operacion_excento_djai': operacion_excento_djai,
'control_duplicidad': control_duplicidad,
}
# actualizo con parámetros generales:
params.update(self.params_in)
res = self.client.saveTransacciones(
arg0=params,
arg1=usuario,
arg2=password,
)
ret = res['return']
self.CodigoTransaccion = ret.get('codigoTransaccion')
self.__analizar_errores(ret)
return True
@inicializar_y_capturar_excepciones
def SendCancelacTransacc(self, usuario, password, codigo_transaccion):
" Realiza la cancelación de una transacción"
res = self.client.sendCancelaTransac(
arg0=codigo_transaccion,
arg1=usuario,
arg2=password,
)
ret = res['return']
self.CodigoTransaccion = ret['codigoTransaccion']
self.__analizar_errores(ret)
return True
@inicializar_y_capturar_excepciones
def SendConfirmaTransacc(self, usuario, password, p_ids_transac, f_operacion):
"Confirma la recepción de un medicamento"
res = self.client.sendConfirmaTransacc(
arg0=usuario,
arg1=password,
arg2={'p_ids_transac': p_ids_transac, 'f_operacion': f_operacion},
)
ret = res['return']
self.CodigoTransaccion = ret.get('id_transac_asociada')
self.__analizar_errores(ret)
return True
@inicializar_y_capturar_excepciones
def SendAlertaTransacc(self, usuario, password, p_ids_transac_ws):
"Alerta un medicamento, acción contraria a confirmar la transacción."
res = self.client.sendAlertaTransacc(
arg0=usuario,
arg1=password,
arg2=p_ids_transac_ws,
)
ret = res['return']
self.CodigoTransaccion = ret.get('id_transac_asociada')
self.__analizar_errores(ret)
return True
def SetUsername(self, username):
"Establezco el nombre de usuario"
self.Username = username
def SetPassword(self, password):
"Establezco la contraseña"
self.Password = password
def GetCodigoTransaccion(self):
"Devuelvo el código de transacción"
return self.CodigoTransaccion
def GetResultado(self):
"Devuelvo el resultado"
return self.Resultado
def main():
"Función principal de pruebas (transaccionar!)"
import os, time, sys
global WSDL, LOCATION
DEBUG = '--debug' in sys.argv
ws = TrazaRenpre()
ws.Username = 'testwservice'
ws.Password = 'testwservicepsw'
if '--prod' in sys.argv and not HOMO:
WSDL = "https://trazabilidad.pami.org.ar:59050/trazamed.WebServiceSDRN?wsdl"
print "Usando WSDL:", WSDL
sys.argv.pop(0)
ws.Conectar("", WSDL)
if ws.Excepcion:
print ws.Excepcion
print ws.Traceback
sys.exit(-1)
#print ws.client.services
#op = ws.client.get_operation("sendMedicamentos")
#import pdb;pdb.set_trace()
if '--test' in sys.argv:
ws.SaveTransacciones(
usuario='pruebasws', password='pruebasws',
gln_origen=8888888888888,
gln_destino=8888888888888,
f_operacion="20/05/2014",
id_evento=44,
cod_producto=88800000000035, # acido sulfúrico
n_cantidad=1,
n_documento_operacion=1,
#m_entrega_parcial="",
n_remito=123,
n_serie=112,
)
print "Resultado", ws.Resultado
print "CodigoTransaccion", ws.CodigoTransaccion
print "Excepciones", ws.Excepcion
print "Erroes", ws.Errores
elif '--cancela' in sys.argv:
ws.SendCancelacTransacc(*sys.argv[sys.argv.index("--cancela")+1:])
else:
ws.SaveTransacciones(*sys.argv[1:])
print "|Resultado %5s|CodigoTransaccion %10s|Errores|%s|" % (
ws.Resultado,
ws.CodigoTransaccion,
'|'.join(ws.Errores),
)
if ws.Excepcion:
print ws.Traceback
# busco el directorio de instalación (global para que no cambie si usan otra dll)
INSTALL_DIR = TrazaRenpre.InstallDir = get_install_dir()
if __name__ == '__main__':
# ajusto el encoding por defecto (si se redirije la salida)
if sys.stdout.encoding is None:
import codecs, locale
sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout,"replace");
sys.stderr = codecs.getwriter(locale.getpreferredencoding())(sys.stderr,"replace");
if '--register' in sys.argv or '--unregister' in sys.argv:
import pythoncom
import win32com.server.register
win32com.server.register.UseCommandLine(TrazaRenpre)
elif "/Automate" in sys.argv:
# MS seems to like /automate to run the class factories.
import win32com.server.localserver
#win32com.server.localserver.main()
# start the server.
win32com.server.localserver.serve([TrazaRenpre._reg_clsid_])
else:
main()
| gpl-3.0 |
opensourcechipspark/platform_external_chromium_org | chrome/common/extensions/docs/server2/instance_servlet_test.py | 24 | 1988 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from empty_dir_file_system import EmptyDirFileSystem
from instance_servlet import InstanceServlet
from servlet import Request
from fail_on_access_file_system import FailOnAccessFileSystem
from test_branch_utility import TestBranchUtility
from test_util import DisableLogging
# XXX(kalman): what is this test supposed to be?
# Create a test host file system creator which failz?
# NOTE(kalman): The ObjectStore created by the InstanceServlet is backed onto
# our fake AppEngine memcache/datastore, so the tests aren't isolated.
class _TestDelegate(InstanceServlet.Delegate):
def __init__(self, file_system_type):
self._file_system_type = file_system_type
def CreateBranchUtility(self, object_store_creator):
return TestBranchUtility.CreateWithCannedData()
def CreateAppSamplesFileSystem(self, object_store_creator):
return EmptyDirFileSystem()
class InstanceServletTest(unittest.TestCase):
@DisableLogging('warning')
def testHostFileSystemNotAccessed(self):
delegate = _TestDelegate(FailOnAccessFileSystem)
constructor = InstanceServlet.GetConstructor(delegate_for_test=delegate)
def test_path(path, status=404):
response = constructor(Request.ForTest(path)).Get()
self.assertEqual(status, response.status)
test_path('extensions/storage.html')
test_path('apps/storage.html')
test_path('extensions/examples/foo.zip')
test_path('extensions/examples/foo.html')
test_path('static/foo.css')
test_path('beta/extensions/storage.html', status=301)
test_path('beta/apps/storage.html', status=301)
test_path('beta/extensions/examples/foo.zip', status=301)
test_path('beta/extensions/examples/foo.html', status=301)
test_path('beta/static/foo.css', status=301)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
matija94/show-me-the-code | data-science/CollectiveIntelligence/com/machinelearning/inaction/knn.py | 1 | 3026 | '''
Created on Apr 4, 2017
kNN: k Nearest Neighbors
Input: inX: vector to compare to existing dataset (1xN)
dataSet: size m data set of known vectors (NxM)
labels: data set labels (1xM vector)
k: number of neighbors to use for comparison (should be an odd number)
Output: the most popular class label
@author: matija
k nearest neighbors
'''
import numpy as np
import operator
import matplotlib.pyplot as plt
def createDataSet():
'''
for mocking purposes
'''
group = np.array([ [1.0,1.1], [1.0,1.0], [0,0], [0,0.1] ])
labels = ['A', 'A', 'B', 'B']
return group,labels
def file2matix(filename):
fr = open(filename)
numberOfLines = len(fr.readlines())
returnMat = np.zeros((numberOfLines,3))
classLabelVector = []
fr = open(filename)
index = 0
for line in fr.readlines():
line = line.strip()
listFromLine = line.split('\t')
returnMat[index,:] = listFromLine[0:3]
classLabelVector.append(int(listFromLine[-1]))
index+=1
return returnMat,classLabelVector
def autoNorm(dataSet):
'''
if the data has values that lie in diff ranges autoNorm will normalize the data
so each feature is treated 'equally' ( from 0 to 1)
uses function below to normalize the values between 0 and 1
ranges
newVal = (oldVal-minVal)/(max-min)
'''
#min vals from each col in mat
minVals = dataSet.min(0)
#max vals from each col in mat
maxVals = dataSet.max(0)
ranges = maxVals - minVals
normDataSet = np.zeros(np.shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet - np.tile(minVals, (m,1))
normDataSet = normDataSet/np.tile(ranges, (m,1))
return normDataSet, ranges, minVals
def classify0(inX, dataSet, labels, k):
#value of rows in dataSet
dataSetSize = dataSet.shape[0]
#make new mat with same dim as dataSet and values from inX
# and subtract it from dataSet mat
diffMat = np.tile(inX, (dataSetSize,1)) - dataSet
#square mat
sqDiffMat = diffMat**2
#sum mat vectors into vector, using axis 1(means sum elems from same rows), axis 0
# would sum elements from same columns
sqDistances = sqDiffMat.sum(axis=1)
#square root every element in vector sqDistances
distances = sqDistances**0.5 #eq to np.sqrt(sqDistances)
### CODE ABOVE WAS USING EUCLIDEAN DISTANCE FORMULA
#sort distance indicies in increasing manner, resulting set will be indicies of
#sorted set
sortedDistIndicies = distances.argsort()
classCount={}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
#compute list of tuples(label,classCount) in reversed order=>largest to smallest
sortedClassCount = sorted(classCount.iteritems(),
key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
| mit |
submergerock/avatar-hadoop | src/examples/python/pyAbacus/JythonAbacus.py | 123 | 2767 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from org.apache.hadoop.fs import Path
from org.apache.hadoop.io import *
from org.apache.hadoop.mapred import *
from org.apache.hadoop.abacus import *
from java.util import *;
import sys
class AbacusMapper(ValueAggregatorMapper):
def map(self, key, value, output, reporter):
ValueAggregatorMapper.map(self, key, value, output, reporter);
class AbacusReducer(ValueAggregatorReducer):
def reduce(self, key, values, output, reporter):
ValueAggregatorReducer.reduce(self, key, values, output, reporter);
class AbacusCombiner(ValueAggregatorCombiner):
def reduce(self, key, values, output, reporter):
ValueAggregatorCombiner.reduce(self, key, values, output, reporter);
def printUsage(code):
print "Abacus <input> <output> <numOfReducers> <inputformat> <specfile>"
sys.exit(code)
def main(args):
if len(args) < 6:
printUsage(1);
inDir = args[1];
outDir = args[2];
numOfReducers = int(args[3]);
theInputFormat = args[4];
specFile = args[5];
print "numOfReducers: ", numOfReducers, "theInputFormat: ", theInputFormat, "specFile: ", specFile
conf = JobConf(AbacusMapper);
conf.setJobName("recordcount");
conf.addDefaultResource(Path(specFile));
if theInputFormat=="textinputformat":
conf.setInputFormat(TextInputFormat);
else:
conf.setInputFormat(SequenceFileInputFormat);
conf.setOutputFormat(TextOutputFormat);
conf.setMapOutputKeyClass(Text);
conf.setMapOutputValueClass(Text);
conf.setOutputKeyClass(Text);
conf.setOutputValueClass(Text);
conf.setNumMapTasks(1);
conf.setNumReduceTasks(numOfReducers);
conf.setMapperClass(AbacusMapper);
conf.setCombinerClass(AbacusCombiner);
conf.setReducerClass(AbacusReducer);
conf.setInputPath(Path(args[1]))
conf.setOutputPath(Path(args[2]))
JobClient.runJob(conf);
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 |
RyanSchuster/AzIncHost | controlboard.py | 1 | 3633 | # (C) 2013 Andy McCormick, Ryan Schuster
# MIT license
# see LICENSE.txt for details
import regtran
# control board interface module
# TODO:
# factor out HEX file parsing to a separate module
def wordToByteString(word):
return eval('"\\x' + hex(word & 0xff)[2:].rjust(2, '0') + '\\x' + hex((word & 0xff00) >> 8)[2:].rjust(2, '0') + '"')
class ControlBoard:
REG_STATE = 's'
REG_ADDRESS = 'a'
REG_FLASH = 'p'
REG_EEPROM = 'e'
REG_TEST = 't'
REG_MOTION = 'm'
def __init__(self):
self.regtran = regtran.RegTran()
def open(self, port, baud):
self.regtran.channelOpen(port, baud, 2)
def close(self):
self.regtran.channelClose()
def reset(self):
self.regtran.reset()
def protocolTest(self):
return self.regtran.commandRead(ControlBoard.REG_TEST)
def getState(self):
return self.regtran.commandRead(ControlBoard.REG_STATE)
def pmodeStart(self):
self.regtran.commandWrite(ControlBoard.REG_STATE, 'program'.ljust(32))
def pmodeEnd(self):
self.regtran.commandWrite(ControlBoard.REG_STATE, 'idle'.ljust(32))
def sampleStart(self):
self.regtran.commandWrite(ControlBoard.REG_STATE, 'sample'.ljust(32))
def sampleEnd(self):
self.regtran.commandWrite(ControlBoard.REG_STATE, 'idle'.ljust(32))
def resetSlave(self):
self.regtran.commandWrite(ControlBoard.REG_STATE, 'reset'.ljust(32));
def erase(self):
self.regtran.commandWrite(ControlBoard.REG_STATE, 'erase'.ljust(32))
def writeEeprom(self, address, data):
self.regtran.commandWrite(ControlBoard.REG_ADDRESS, wordToByteString(address).ljust(32))
self.regtran.commandWrite(ControlBoard.REG_EEPROM, data)
def readEeprom(self, address):
self.regtran.commandWrite(ControlBoard.REG_ADDRESS, wordToByteString(address).ljust(32))
return self.regtran.commandRead(ControlBoard.REG_EEPROM)
def parseHexLine(self, line):
line = line.rstrip()
if len(line) < 11:
print "invalid HEX line (too short)"
return False
if line[0] != ':':
print "invalid HEX line (no start colon)"
return False
byteCount = int(line[1:3], 16)
if len(line) != 2 * byteCount + 11:
print "invalid HEX line (byte count mismatch, found ", len(line), " expected ", 2 * byteCount + 11, ")"
return False
address = int(line[3:7], 16)
addressString = '"\\x' + line[5:7] + '\\x' + line[3:5] + '"'
# TODO: check address
recordType = int(line[7:9], 16)
dataString = '"'
for i in range(byteCount):
dataString = dataString + '\\x' + line[9 + i * 2 : 11 + i * 2]
dataString = dataString + '"'
data = eval(dataString)
checksum = int(line[9 + byteCount * 2 : 11 + byteCount * 2], 16)
# TODO: check checksum
if recordType == 0:
return (address >> 1, data)
elif recordType == 1:
return False
else:
print "invalid HEX line (bad record type: ", recordType, ")"
return False
def writeHexFile(self, filename):
f = open(filename, 'r')
parsed = list();
for line in f:
new = self.parseHexLine(line)
if new != False:
parsed.append(new)
pages = dict();
for line in parsed:
key = line[0] & 0xfff0
if pages.has_key(key):
if line[0] & 0x0f:
pages[key] = pages[key] + line[1]
else:
pages[key] = line[1] + pages[key]
else:
if line[0] & 0x0f:
pages[key] = line[1]
else:
pages[key] = line[1]
for key in pages:
addrStr = '"\\x' + hex(key & 0xff)[2:].rjust(2, '0') + '\\x' + hex((key & 0xff00) >> 8)[2:].rjust(2, '0') + '"'
self.regtran.commandWrite(ControlBoard.REG_ADDRESS, eval(addrStr).ljust(32, '\0'))
self.regtran.commandWrite(ControlBoard.REG_FLASH, pages[key].ljust(32, '\0'))
def readMotion(self):
return self.regtran.commandRead(ControlBoard.REG_MOTION)
| mit |
pigeonflight/strider-plone | docker/appengine/lib/django-1.3/django/conf/locale/nl/formats.py | 329 | 3056 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y' # '20 januari 2009'
TIME_FORMAT = 'H:i' # '15:23'
DATETIME_FORMAT = 'j F Y H:i' # '20 januari 2009 15:23'
YEAR_MONTH_FORMAT = 'F Y' # 'januari 2009'
MONTH_DAY_FORMAT = 'j F' # '20 januari'
SHORT_DATE_FORMAT = 'j-n-Y' # '20-1-2009'
SHORT_DATETIME_FORMAT = 'j-n-Y H:i' # '20-1-2009 15:23'
FIRST_DAY_OF_WEEK = 1 # Monday (in Dutch 'maandag')
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d-%m-%Y', '%d-%m-%y', '%Y-%m-%d', # '20-01-2009', '20-01-09', '2009-01-20'
# '%d %b %Y', '%d %b %y', # '20 jan 2009', '20 jan 09'
# '%d %B %Y', '%d %B %y', # '20 januari 2009', '20 januari 09'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '15:23:35'
'%H.%M:%S', # '15.23:35'
'%H.%M', # '15.23'
'%H:%M', # '15:23'
)
DATETIME_INPUT_FORMATS = (
# With time in %H:%M:%S :
'%d-%m-%Y %H:%M:%S', '%d-%m-%y %H:%M:%S', '%Y-%m-%d %H:%M:%S', # '20-01-2009 15:23:35', '20-01-09 15:23:35', '2009-01-20 15:23:35'
# '%d %b %Y %H:%M:%S', '%d %b %y %H:%M:%S', # '20 jan 2009 15:23:35', '20 jan 09 15:23:35'
# '%d %B %Y %H:%M:%S', '%d %B %y %H:%M:%S', # '20 januari 2009 15:23:35', '20 januari 2009 15:23:35'
# With time in %H.%M:%S :
'%d-%m-%Y %H.%M:%S', '%d-%m-%y %H.%M:%S', # '20-01-2009 15.23:35', '20-01-09 15.23:35'
# '%d %b %Y %H.%M:%S', '%d %b %y %H.%M:%S', # '20 jan 2009 15.23:35', '20 jan 09 15.23:35'
# '%d %B %Y %H.%M:%S', '%d %B %y %H.%M:%S', # '20 januari 2009 15.23:35', '20 januari 2009 15.23:35'
# With time in %H:%M :
'%d-%m-%Y %H:%M', '%d-%m-%y %H:%M', '%Y-%m-%d %H:%M', # '20-01-2009 15:23', '20-01-09 15:23', '2009-01-20 15:23'
# '%d %b %Y %H:%M', '%d %b %y %H:%M', # '20 jan 2009 15:23', '20 jan 09 15:23'
# '%d %B %Y %H:%M', '%d %B %y %H:%M', # '20 januari 2009 15:23', '20 januari 2009 15:23'
# With time in %H.%M :
'%d-%m-%Y %H.%M', '%d-%m-%y %H.%M', # '20-01-2009 15.23', '20-01-09 15.23'
# '%d %b %Y %H.%M', '%d %b %y %H.%M', # '20 jan 2009 15.23', '20 jan 09 15.23'
# '%d %B %Y %H.%M', '%d %B %y %H.%M', # '20 januari 2009 15.23', '20 januari 2009 15.23'
# Without time :
'%d-%m-%Y', '%d-%m-%y', '%Y-%m-%d', # '20-01-2009', '20-01-09', '2009-01-20'
# '%d %b %Y', '%d %b %y', # '20 jan 2009', '20 jan 09'
# '%d %B %Y', '%d %B %y', # '20 januari 2009', '20 januari 2009'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| mit |
mattvonrocketstein/ymir | ymir/schema/validators.py | 1 | 2602 | # -*- coding: utf-8 -*-
""" ymir.schema.validators
"""
import os
from voluptuous import Invalid
def nested_vagrant_validator(dct, ):
""" """
if not isinstance(dct, dict):
err = ("expected hash for key @ `vagrant`")
raise Invalid(err)
for key in 'name boot_timeout box box_check_update sync_disabled ram cpus'.split():
if key not in dct:
err = 'key at `vagrant` would contain sub-key "{0}"'
raise Invalid(err.format(key))
def filepath_validator(string, key='unknown'):
""" """
if not isinstance(string, basestring):
raise Invalid("expected string for key @ `{0}`".format(
key))
string = string.strip()
if string.startswith("~"):
string = os.path.expanduser(string)
if not os.path.isabs(string):
string = os.path.abspath(string)
if not os.path.exists(string):
err = "filepath '{0}' at `{1}` does not exist"
raise Invalid(err.format(string, key))
if not os.path.isfile(string):
err = "filepath '{0}' at `{1}` exists, but is not a file"
raise Invalid(err.format(string, key))
_validate_extends_field = lambda val: filepath_validator(val, key="extends")
def list_of_dicts(lst, key=None):
""" """
if not isinstance(lst, list):
err = ("expected list of strings for key @ `{0}`")
err = err.format(key or 'unknown')
raise Invalid(err)
for i, x in enumerate(lst):
if not isinstance(x, dict):
err = ('expected JSON but top[{0}][{1}] is {2}')
err = err.format(key, i, type(x))
raise Invalid(err)
def list_of_strings(lst, key=None):
if not isinstance(lst, list):
err = ("expected list of strings for key @ `{0}`, got {1}")
err = err.format(key or 'unknown', str(list))
raise Invalid(err)
for i, x in enumerate(lst):
if not isinstance(x, basestring):
print lst
err = (
'expected string for key@`{0}`, but index {1} is "{3}" of type {2}')
err = err.format(
key, i, type(x).__name__, x)
raise Invalid(err)
string_or_int = lambda x: isinstance(x, (unicode, int))
_validate_sl_field = lambda lst: list_of_strings(lst, key='setup_list')
_validate_sg_field = lambda lst: list_of_strings(lst, key='security_groups')
_validate_pl_field = lambda lst: list_of_strings(lst, key='provision_list')
def _validate_puppet_parser(x):
""" """
if x != 'future':
err = "puppet_parser has only one acceptable value: 'future'"
raise Invalid(err)
| mit |
jk1/intellij-community | python/lib/Lib/site-packages/django/db/utils.py | 78 | 6129 | import inspect
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
DEFAULT_DB_ALIAS = 'default'
# Define some exceptions that mirror the PEP249 interface.
# We will rethrow any backend-specific errors using these
# common wrappers
class DatabaseError(Exception):
pass
class IntegrityError(DatabaseError):
pass
def load_backend(backend_name):
try:
module = import_module('.base', 'django.db.backends.%s' % backend_name)
import warnings
warnings.warn(
"Short names for DATABASE_ENGINE are deprecated; prepend with 'django.db.backends.'",
DeprecationWarning
)
return module
except ImportError, e:
# Look for a fully qualified database backend name
try:
return import_module('.base', backend_name)
except ImportError, e_user:
# The database backend wasn't found. Display a helpful error message
# listing all possible (built-in) database backends.
backend_dir = os.path.join(os.path.dirname(__file__), 'backends')
try:
available_backends = [f for f in os.listdir(backend_dir)
if os.path.isdir(os.path.join(backend_dir, f))
and not f.startswith('.')]
except EnvironmentError:
available_backends = []
available_backends.sort()
if backend_name not in available_backends:
error_msg = ("%r isn't an available database backend. \n" +
"Try using django.db.backends.XXX, where XXX is one of:\n %s\n" +
"Error was: %s") % \
(backend_name, ", ".join(map(repr, available_backends)), e_user)
raise ImproperlyConfigured(error_msg)
else:
raise # If there's some other error, this must be an error in Django itself.
class ConnectionDoesNotExist(Exception):
pass
class ConnectionHandler(object):
def __init__(self, databases):
self.databases = databases
self._connections = {}
def ensure_defaults(self, alias):
"""
Puts the defaults into the settings dictionary for a given connection
where no settings is provided.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy'
conn.setdefault('OPTIONS', {})
conn.setdefault('TEST_CHARSET', None)
conn.setdefault('TEST_COLLATION', None)
conn.setdefault('TEST_NAME', None)
conn.setdefault('TEST_MIRROR', None)
conn.setdefault('TIME_ZONE', settings.TIME_ZONE)
for setting in ('NAME', 'USER', 'PASSWORD', 'HOST', 'PORT'):
conn.setdefault(setting, '')
def __getitem__(self, alias):
if alias in self._connections:
return self._connections[alias]
self.ensure_defaults(alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
conn = backend.DatabaseWrapper(db, alias)
self._connections[alias] = conn
return conn
def __iter__(self):
return iter(self.databases)
def all(self):
return [self[alias] for alias in self]
class ConnectionRouter(object):
def __init__(self, routers):
self.routers = []
for r in routers:
if isinstance(r, basestring):
try:
module_name, klass_name = r.rsplit('.', 1)
module = import_module(module_name)
except ImportError, e:
raise ImproperlyConfigured('Error importing database router %s: "%s"' % (klass_name, e))
try:
router_class = getattr(module, klass_name)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a database router name "%s"' % (module, klass_name))
else:
router = router_class()
else:
router = r
self.routers.append(router)
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
try:
return hints['instance']._state.db or DEFAULT_DB_ALIAS
except KeyError:
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func('db_for_read')
db_for_write = _router_func('db_for_write')
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
method = router.allow_relation
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(obj1, obj2, **hints)
if allow is not None:
return allow
return obj1._state.db == obj2._state.db
def allow_syncdb(self, db, model):
for router in self.routers:
try:
method = router.allow_syncdb
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(db, model)
if allow is not None:
return allow
return True
| apache-2.0 |
mohamed/trap-gen | cxx_writer/tests/testFileDumper.py | 2 | 15305 | # -*- coding: iso-8859-1 -*-
####################################################################################
# ___ ___ ___ ___
# / /\ / /\ / /\ / /\
# / /:/ / /::\ / /::\ / /::\
# / /:/ / /:/\:\ / /:/\:\ / /:/\:\
# / /:/ / /:/~/:/ / /:/~/::\ / /:/~/:/
# / /::\ /__/:/ /:/___ /__/:/ /:/\:\ /__/:/ /:/
# /__/:/\:\ \ \:\/:::::/ \ \:\/:/__\/ \ \:\/:/
# \__\/ \:\ \ \::/~~~~ \ \::/ \ \::/
# \ \:\ \ \:\ \ \:\ \ \:\
# \ \ \ \ \:\ \ \:\ \ \:\
# \__\/ \__\/ \__\/ \__\/
#
# This file is part of TRAP.
#
# TRAP is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this TRAP; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# or see <http://www.gnu.org/licenses/>.
#
# (c) Luca Fossati, fossati@elet.polimi.it, fossati.l@gmail.com
#
####################################################################################
import unittest
import writer_code
import os
class TestFileDumper(unittest.TestCase):
def testDumpVariablesImpl(self):
tempType = writer_code.TemplateType('std::map', [writer_code.intType, writer_code.stringType], 'map')
tempVar = writer_code.Variable('pippo', tempType)
dumper = writer_code.FileDumper('prova.cpp', False)
dumper.addMember(tempVar)
dumper.write()
testFile = open('prova.cpp', 'rt')
lines = testFile.readlines()
testFile.close()
os.remove('prova.cpp')
self.assertEqual(len(lines), 4 + 15)
self.assertEqual(lines[14], '#include <map>\n')
self.assertEqual(lines[15], '#include <string>\n')
self.assertEqual(lines[17], 'std::map< int, std::string > pippo;\n')
def testDumpVariablesHeader(self):
tempType = writer_code.TemplateType('std::map', [writer_code.intType, writer_code.stringType], 'map')
tempVar = writer_code.Variable('pippo', tempType)
dumper = writer_code.FileDumper('prova.cpp', True)
dumper.addMember(tempVar)
dumper.write()
testFile = open('prova.cpp', 'rt')
lines = testFile.readlines()
testFile.close()
os.remove('prova.cpp')
self.assertEqual(len(lines), 3 + 19)
self.assertEqual(lines[16], '#include <map>\n')
self.assertEqual(lines[17], '#include <string>\n')
self.assertEqual(lines[18], '\n')
self.assertEqual(lines[19], '\n')
self.assertEqual(lines[20], '\n')
def testDumpFunctionsHeader(self):
tempType = writer_code.TemplateType('std::map', [writer_code.intType, writer_code.stringType], 'map')
tempVar = writer_code.Function('pippo', writer_code.Code('std::map<int, std::string> myMap;\nmyMap[5] = \"ccc\";\nreturn myMap;'), tempType)
dumper = writer_code.FileDumper('prova.cpp', True)
dumper.addMember(tempVar)
dumper.write()
testFile = open('prova.cpp', 'rt')
lines = testFile.readlines()
testFile.close()
os.remove('prova.cpp')
self.assertEqual(len(lines), 4 + 19)
self.assertEqual(lines[16], '#include <map>\n')
self.assertEqual(lines[17], '#include <string>\n')
self.assertEqual(lines[19], 'std::map< int, std::string > pippo();\n')
def testDumpFunctionsImpl(self):
tempType = writer_code.TemplateType('std::map', [writer_code.intType, writer_code.stringType], 'map')
tempVar = writer_code.Function('pippo', writer_code.Code('std::map<int, std::string> myMap;\nmyMap[5] = \"ccc\";\nreturn myMap;'), tempType)
dumper = writer_code.FileDumper('prova.cpp', False)
dumper.addMember(tempVar)
dumper.write()
testFile = open('prova.cpp', 'rt')
lines = testFile.readlines()
testFile.close()
os.remove('prova.cpp')
self.assertEqual(len(lines), 8 + 15)
self.assertEqual(lines[14], '#include <map>\n')
self.assertEqual(lines[15], '#include <string>\n')
self.assertEqual(lines[17], 'std::map< int, std::string > pippo(){\n')
self.assertEqual(lines[18], ' std::map<int, std::string> myMap;\n')
self.assertEqual(lines[19], ' myMap[5] = \"ccc\";\n')
self.assertEqual(lines[20], ' return myMap;\n')
self.assertEqual(lines[21], '}\n')
def testTemplateFunctionsHeader(self):
tempType = writer_code.TemplateType('std::map', [writer_code.intType, writer_code.stringType], 'map')
tempVar = writer_code.Function('pippo', writer_code.Code('std::map<int, std::string> myMap;\nmyMap[5] = \"ccc\";\nreturn myMap;'), tempType, [], False, False, ['T'])
dumper = writer_code.FileDumper('prova.cpp', True)
dumper.addMember(tempVar)
dumper.write()
testFile = open('prova.cpp', 'rt')
lines = testFile.readlines()
testFile.close()
os.remove('prova.cpp')
self.assertEqual(len(lines), 8 + 19)
self.assertEqual(lines[16], '#include <map>\n')
self.assertEqual(lines[17], '#include <string>\n')
self.assertEqual(lines[19], 'template < typename T > std::map< int, std::string > pippo(){\n')
self.assertEqual(lines[20], ' std::map<int, std::string> myMap;\n')
self.assertEqual(lines[21], ' myMap[5] = \"ccc\";\n')
self.assertEqual(lines[22], ' return myMap;\n')
self.assertEqual(lines[23], '}\n')
def testTemplateFunctionsImpl(self):
tempType = writer_code.TemplateType('std::map', [writer_code.intType, writer_code.stringType], 'map')
tempVar = writer_code.Function('pippo', writer_code.Code('std::map<int, std::string> myMap;\nmyMap[5] = \"ccc\";\nreturn myMap;'), tempType, [], False, ['T'])
dumper = writer_code.FileDumper('prova.cpp', False)
dumper.addMember(tempVar)
dumper.write()
testFile = open('prova.cpp', 'rt')
lines = testFile.readlines()
testFile.close()
os.remove('prova.cpp')
self.assertEqual(len(lines), 3 + 15)
self.assertEqual(lines[14], '#include <map>\n')
self.assertEqual(lines[15], '#include <string>\n')
def testDumpClassHeader(self):
intDecl = writer_code.intType
privateVar = writer_code.Attribute('pippo', intDecl, 'pri')
emptyBody = writer_code.Code('')
publicConstr = writer_code.Constructor(emptyBody, 'pu')
classDecl = writer_code.ClassDeclaration('MyClass', [privateVar])
classDecl.addConstructor(publicConstr)
dumper = writer_code.FileDumper('prova.cpp', True)
dumper.addMember(classDecl)
dumper.write()
testFile = open('prova.cpp', 'rt')
lines = testFile.readlines()
testFile.close()
os.remove('prova.cpp')
self.assertEqual(len(lines), 8 + 19)
self.assertEqual(lines[16], 'class MyClass{\n')
self.assertEqual(lines[17], ' private:\n')
self.assertEqual(lines[18], ' int pippo;\n')
self.assertEqual(lines[19], '\n')
self.assertEqual(lines[20], ' public:\n')
self.assertEqual(lines[21], ' MyClass();\n')
self.assertEqual(lines[22], '};\n')
def testDumpClassImpl(self):
intDecl = writer_code.intType
privateVar = writer_code.Attribute('pippo', intDecl, 'pri')
emptyBody = writer_code.Code('')
publicConstr = writer_code.Constructor(emptyBody, 'pu')
classDecl = writer_code.ClassDeclaration('MyClass', [privateVar])
classDecl.addConstructor(publicConstr)
dumper = writer_code.FileDumper('prova.cpp', False)
dumper.addMember(classDecl)
dumper.write()
testFile = open('prova.cpp', 'rt')
lines = testFile.readlines()
testFile.close()
os.remove('prova.cpp')
self.assertEqual(len(lines), 4 + 15)
self.assertEqual(lines[14], 'MyClass::MyClass(){\n')
self.assertEqual(lines[15], '\n')
self.assertEqual(lines[16], '}\n')
def testDumpTemplateClassHeader(self):
intDecl = writer_code.intType
stringDecl = writer_code.stringType
privateVar = writer_code.Attribute('pippo', intDecl, 'pri')
emptyBody = writer_code.Code('')
publicConstr = writer_code.Constructor(emptyBody, 'pu', [], ['std::string()'])
classDecl = writer_code.ClassDeclaration('MyClass', [privateVar], [stringDecl], ['T'])
classDecl.addConstructor(publicConstr)
dumper = writer_code.FileDumper('prova.cpp', True)
dumper.addMember(classDecl)
dumper.write()
testFile = open('prova.cpp', 'rt')
lines = testFile.readlines()
testFile.close()
os.remove('prova.cpp')
self.assertEqual(len(lines), 12 + 20)
self.assertEqual(lines[18], 'template < typename T > class MyClass : public std::string{\n')
self.assertEqual(lines[19], ' private:\n')
self.assertEqual(lines[20], ' int pippo;\n')
self.assertEqual(lines[21], '\n')
self.assertEqual(lines[22], ' public:\n')
self.assertEqual(lines[23], ' MyClass() : std::string(){\n')
self.assertEqual(lines[24], '\n')
self.assertEqual(lines[25], ' }\n')
self.assertEqual(lines[26], '\n')
self.assertEqual(lines[27], '};\n')
def testDumpTemplateClassImpl(self):
intDecl = writer_code.intType
stringDecl = writer_code.stringType
privateVar = writer_code.Attribute('pippo', intDecl, 'pri')
emptyBody = writer_code.Code('')
publicConstr = writer_code.Constructor(emptyBody, 'pu', [], ['std::string()'])
classDecl = writer_code.ClassDeclaration('MyClass', [privateVar], [stringDecl], ['T'])
classDecl.addConstructor(publicConstr)
dumper = writer_code.FileDumper('prova.cpp', False)
dumper.addMember(classDecl)
dumper.write()
testFile = open('prova.cpp', 'rt')
lines = testFile.readlines()
testFile.close()
os.remove('prova.cpp')
self.assertEqual(len(lines), 1 + 16)
def testEmptyFolder(self):
folder = writer_code.Folder('')
folder.create()
os.remove('wscript')
def testEmptyFolder(self):
folder = writer_code.Folder('temp/try')
folder.create()
self.assert_(os.path.exists('temp/try/wscript'))
os.remove('temp/try/wscript')
import shutil
shutil.rmtree('temp', True)
def testDumpAll(self):
folder = writer_code.Folder('temp')
intDecl = writer_code.intType
privateVar = writer_code.Attribute('pippo', intDecl, 'pri')
emptyBody = writer_code.Code('')
publicConstr = writer_code.Constructor(emptyBody, 'pu')
classDecl = writer_code.ClassDeclaration('MyClass', [privateVar])
classDecl.addConstructor(publicConstr)
implFile = writer_code.FileDumper('prova.cpp', False)
implFile.addMember(classDecl)
headFile = writer_code.FileDumper('prova.hpp', True)
headFile.addMember(classDecl)
folder.addHeader(headFile)
folder.addCode(implFile)
folder.create()
testImplFile = open('temp/prova.cpp', 'rt')
lines = testImplFile.readlines()
testImplFile.close()
os.remove('temp/prova.cpp')
self.assertEqual(len(lines), 19)
self.assertEqual(lines[14], 'MyClass::MyClass(){\n')
self.assertEqual(lines[15], '\n')
self.assertEqual(lines[16], '}\n')
testHeadFile = open('temp/prova.hpp', 'rt')
lines = testHeadFile.readlines()
testHeadFile.close()
os.remove('temp/prova.hpp')
self.assertEqual(len(lines), 27)
self.assertEqual(lines[16], 'class MyClass{\n')
self.assertEqual(lines[17], ' private:\n')
self.assertEqual(lines[18], ' int pippo;\n')
self.assertEqual(lines[19], '\n')
self.assertEqual(lines[20], ' public:\n')
self.assertEqual(lines[21], ' MyClass();\n')
self.assertEqual(lines[22], '};\n')
testWscriptFile = open('temp/wscript', 'rt')
lines = testWscriptFile.readlines()
testWscriptFile.close()
os.remove('temp/wscript')
self.assertEqual(len(lines), 16)
self.assertEqual(lines[0], '#!/usr/bin/env python\n')
self.assertEqual(lines[1], '# -*- coding: iso-8859-1 -*-\n')
self.assertEqual(lines[2], '\n')
self.assertEqual(lines[3], 'import os\n')
self.assertEqual(lines[4], '\n')
self.assertEqual(lines[5], '\n')
self.assertEqual(lines[6], 'def build(bld):\n')
self.assertEqual(lines[7], ' sources = \"\"\"\n')
self.assertEqual(lines[8], ' prova.cpp\n')
self.assertEqual(lines[9], ' \"\"\"\n')
self.assertEqual(lines[10], ' uselib = \'BOOST BOOST_THREAD BOOST_FILESYSTEM BOOST_SYSTEM SYSTEMC TLM TRAP\'\n')
self.assertEqual(lines[11], ' objects = \'\'\n')
self.assertEqual(lines[12], ' includes = \'.\'\n')
self.assertEqual(lines[13], ' target = \'temp\'\n')
self.assertEqual(lines[15], ' bld.program(source = sources, target = target, use = uselib + \' \' + objects, includes = includes)\n')
import shutil
shutil.rmtree('temp', True)
def testNestedDirs1(self):
folder = writer_code.Folder('temp')
nestFolder = writer_code.Folder('nested')
folder.addSubFolder(nestFolder)
folder.create()
nestFolder.create()
self.assert_(os.path.exists('temp/wscript'))
self.assert_(os.path.exists('temp/nested/wscript'))
os.remove('temp/wscript')
os.remove('temp/nested/wscript')
import shutil
shutil.rmtree('temp', True)
def testNestedDirs2(self):
folder = writer_code.Folder('temp')
nestFolder = writer_code.Folder('nested')
folder.addSubFolder(nestFolder)
nestFolder.create()
folder.create()
self.assert_(os.path.exists('temp/wscript'))
self.assert_(os.path.exists('temp/nested/wscript'))
os.remove('temp/wscript')
os.remove('temp/nested/wscript')
import shutil
shutil.rmtree('temp', True)
def testNestedDirsCommonPath(self):
folder = writer_code.Folder('temp')
nestFolder = writer_code.Folder('temp/nested')
folder.addSubFolder(nestFolder)
nestFolder.create()
folder.create()
os.remove('temp/wscript')
os.remove('temp/nested/wscript')
import shutil
shutil.rmtree('temp', True)
| gpl-3.0 |
pkappesser/youtube-dl | youtube_dl/extractor/dropbox.py | 236 | 1299 | # coding: utf-8
from __future__ import unicode_literals
import os.path
import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import url_basename
class DropboxIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?dropbox[.]com/sh?/(?P<id>[a-zA-Z0-9]{15})/.*'
_TESTS = [
{
'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0',
'info_dict': {
'id': 'nelirfsxnmcfbfh',
'ext': 'mp4',
'title': 'youtube-dl test video \'ä"BaW_jenozKc'
}
}, {
'url': 'https://www.dropbox.com/sh/662glsejgzoj9sr/AAByil3FGH9KFNZ13e08eSa1a/Pregame%20Ceremony%20Program%20PA%2020140518.m4v',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
fn = compat_urllib_parse_unquote(url_basename(url))
title = os.path.splitext(fn)[0]
video_url = re.sub(r'[?&]dl=0', '', url)
video_url += ('?' if '?' not in video_url else '&') + 'dl=1'
return {
'id': video_id,
'title': title,
'url': video_url,
}
| unlicense |
ericholscher/django | tests/known_related_objects/tests.py | 6 | 5774 | from __future__ import unicode_literals
from django.test import TestCase
from .models import Tournament, Organiser, Pool, PoolStyle
class ExistingRelatedInstancesTests(TestCase):
fixtures = ['tournament.json']
def test_foreign_key(self):
with self.assertNumQueries(2):
tournament = Tournament.objects.get(pk=1)
pool = tournament.pool_set.all()[0]
self.assertIs(tournament, pool.tournament)
def test_foreign_key_prefetch_related(self):
with self.assertNumQueries(2):
tournament = (Tournament.objects.prefetch_related('pool_set').get(pk=1))
pool = tournament.pool_set.all()[0]
self.assertIs(tournament, pool.tournament)
def test_foreign_key_multiple_prefetch(self):
with self.assertNumQueries(2):
tournaments = list(Tournament.objects.prefetch_related('pool_set').order_by('pk'))
pool1 = tournaments[0].pool_set.all()[0]
self.assertIs(tournaments[0], pool1.tournament)
pool2 = tournaments[1].pool_set.all()[0]
self.assertIs(tournaments[1], pool2.tournament)
def test_queryset_or(self):
tournament_1 = Tournament.objects.get(pk=1)
tournament_2 = Tournament.objects.get(pk=2)
with self.assertNumQueries(1):
pools = tournament_1.pool_set.all() | tournament_2.pool_set.all()
related_objects = set(pool.tournament for pool in pools)
self.assertEqual(related_objects, set((tournament_1, tournament_2)))
def test_queryset_or_different_cached_items(self):
tournament = Tournament.objects.get(pk=1)
organiser = Organiser.objects.get(pk=1)
with self.assertNumQueries(1):
pools = tournament.pool_set.all() | organiser.pool_set.all()
first = pools.filter(pk=1)[0]
self.assertIs(first.tournament, tournament)
self.assertIs(first.organiser, organiser)
def test_queryset_or_only_one_with_precache(self):
tournament_1 = Tournament.objects.get(pk=1)
tournament_2 = Tournament.objects.get(pk=2)
# 2 queries here as pool id 3 has tournament 2, which is not cached
with self.assertNumQueries(2):
pools = tournament_1.pool_set.all() | Pool.objects.filter(pk=3)
related_objects = set(pool.tournament for pool in pools)
self.assertEqual(related_objects, set((tournament_1, tournament_2)))
# and the other direction
with self.assertNumQueries(2):
pools = Pool.objects.filter(pk=3) | tournament_1.pool_set.all()
related_objects = set(pool.tournament for pool in pools)
self.assertEqual(related_objects, set((tournament_1, tournament_2)))
def test_queryset_and(self):
tournament = Tournament.objects.get(pk=1)
organiser = Organiser.objects.get(pk=1)
with self.assertNumQueries(1):
pools = tournament.pool_set.all() & organiser.pool_set.all()
first = pools.filter(pk=1)[0]
self.assertIs(first.tournament, tournament)
self.assertIs(first.organiser, organiser)
def test_one_to_one(self):
with self.assertNumQueries(2):
style = PoolStyle.objects.get(pk=1)
pool = style.pool
self.assertIs(style, pool.poolstyle)
def test_one_to_one_select_related(self):
with self.assertNumQueries(1):
style = PoolStyle.objects.select_related('pool').get(pk=1)
pool = style.pool
self.assertIs(style, pool.poolstyle)
def test_one_to_one_multi_select_related(self):
with self.assertNumQueries(1):
poolstyles = list(PoolStyle.objects.select_related('pool').order_by('pk'))
self.assertIs(poolstyles[0], poolstyles[0].pool.poolstyle)
self.assertIs(poolstyles[1], poolstyles[1].pool.poolstyle)
def test_one_to_one_prefetch_related(self):
with self.assertNumQueries(2):
style = PoolStyle.objects.prefetch_related('pool').get(pk=1)
pool = style.pool
self.assertIs(style, pool.poolstyle)
def test_one_to_one_multi_prefetch_related(self):
with self.assertNumQueries(2):
poolstyles = list(PoolStyle.objects.prefetch_related('pool').order_by('pk'))
self.assertIs(poolstyles[0], poolstyles[0].pool.poolstyle)
self.assertIs(poolstyles[1], poolstyles[1].pool.poolstyle)
def test_reverse_one_to_one(self):
with self.assertNumQueries(2):
pool = Pool.objects.get(pk=2)
style = pool.poolstyle
self.assertIs(pool, style.pool)
def test_reverse_one_to_one_select_related(self):
with self.assertNumQueries(1):
pool = Pool.objects.select_related('poolstyle').get(pk=2)
style = pool.poolstyle
self.assertIs(pool, style.pool)
def test_reverse_one_to_one_prefetch_related(self):
with self.assertNumQueries(2):
pool = Pool.objects.prefetch_related('poolstyle').get(pk=2)
style = pool.poolstyle
self.assertIs(pool, style.pool)
def test_reverse_one_to_one_multi_select_related(self):
with self.assertNumQueries(1):
pools = list(Pool.objects.select_related('poolstyle').order_by('pk'))
self.assertIs(pools[1], pools[1].poolstyle.pool)
self.assertIs(pools[2], pools[2].poolstyle.pool)
def test_reverse_one_to_one_multi_prefetch_related(self):
with self.assertNumQueries(2):
pools = list(Pool.objects.prefetch_related('poolstyle').order_by('pk'))
self.assertIs(pools[1], pools[1].poolstyle.pool)
self.assertIs(pools[2], pools[2].poolstyle.pool)
| bsd-3-clause |
adrianN/line_intersection | line_intersection.py | 1 | 2228 | from bisect import bisect_left, insort
class search:
def __init__(self):
self.values = []
def insert(self, value):
assert not value in self.values
print "insert", value
insort(self.values, value)
def delete(self, value):
assert value in self.values
print "delete", value
assert(self.values.pop(self.position(value)) == value)
def find_neighbors(self, value):
p = self.position(value)
l = None
r = None
if p>0: l = self.values[p-1]
if p<len(self.values)-1: r = self.values[p+1]
return (l,r)
def position(self, value):
i = bisect_left(self.values, value)
if i != len(self.values) and self.values[i] == value:
return i
raise ValueError
def cross_product((x1,y1), (x2,y2)):
return x1*y2 - y1*x2
def intersect((o1,p1), (o2,p2)):
d1 = (p1[0]-o1[0], p1[1]-o1[1])
d2 = (p2[0]-o2[0], p2[1]-o2[1])
cross = cross_product(d1,d2)
x = (o2[0]-o1[0], o2[1]-o1[1])
if abs(cross) == 0:
#in the collinear case t or u are also zero, I think
return False
t = cross_product(x,d2)
u = cross_product(x,d1)
t = float(t)/cross
u = float(u)/cross
if 0<t<1 and 0<u<1:
# print "intersection", (o1,p1), (o2,p2)
inter1 = (o1[0]+p1[0]*u,o1[1]+p1[1]*u)
inter2 = (o2[0]+p2[0]*t, o2[1]+p2[1]*t)
# print t,u, inter1, inter2
return True
return False
def base_direction_transform(line_segments):
for (x1,y1), (x2,y2) in line_segments:
yield (x1,x2), (x2-x1, y2-y1)
def line_intersections(line_segments):
""" A line is defined by a start point and an end point """
end_points = []
for i, ((x,_), (x2,_)) in enumerate(line_segments):
end_points.append((x,i,x>=x2))
end_points.append((x2,i,x<x2))
end_points = sorted(end_points)
search_thingy = search()
for _, label, is_right in end_points:
segment = line_segments[label]
if not is_right:
search_thingy.insert(label)
for n in search_thingy.find_neighbors(label):
if n is not None and intersect(segment, line_segments[n]):
yield segment, line_segments[n]
else:
p,s = search_thingy.find_neighbors(label)
if p is not None and s is not None:
pred = line_segments[p]
succ = line_segments[s]
if intersect(pred,succ):
yield pred,succ
search_thingy.delete(label)
| mit |
RyanBeatty/Steer-Clear-Backend | tests/api_tests/views_tests.py | 3 | 26836 | from steerclear import app, db
from steerclear.models import Ride
from steerclear.api.views import query_distance_matrix_api
from tests.base import base
from testfixtures import replace, test_datetime
from flask import url_for
from datetime import datetime, timedelta
import vcr
# vcr object used to record api request responses or return already recorded responses
myvcr = vcr.VCR(cassette_library_dir='tests/fixtures/vcr_cassettes/api_tests/')
"""
RideListAPITestCase
-------------------
Test cases for the RideListAPI class that deals with managing and
interacting with the list of ride requests
"""
class RideListAPITestCase(base.SteerClearBaseTestCase):
"""
setUp
-----
Overrides base test case setUp(). makes sure
The user is logged in before each test is run
"""
def setUp(self):
super(RideListAPITestCase, self).setUp()
"""
test_get_ride_list_requires_login
---------------------------------
Tests that getting the list of ride requests
requires that the user be logged in
"""
def test_get_ride_list_requires_login(self):
response = self.client.get(url_for('api.rides'))
self.assertEquals(response.status_code, 401)
"""
test_get_ride_list_requires_admin_permission
---------------------------------------------------
Tests that trying to access the GET RideList API requires
the User to be a admin
"""
def test_get_ride_list_requires_admin_permission(self):
self._test_url_requires_roles(
self.client.get,
url_for('api.rides'),
[self.admin_role]
)
"""
test_get_ride_list_empty_list
---------------
Tests that listing all of the rides in the queue is correct.
"""
def test_get_ride_list_empty_list(self):
self._login(self.admin_user)
response = self.client.get(url_for('api.rides'))
self.assertEquals(response.status_code, 200)
self.assertEquals(response.json, {"rides": []})
"""
test_get_ride_list_not_empty_list
-------------------------
Tests that api can return the rides list correctly when
the queue is not empty
"""
def test_get_ride_list_not_empty_list(self):
self._login(self.admin_user)
# create ride objects
r1 = self._create_ride(self.admin_user)
r2 = self._create_ride(self.admin_user)
r3 = self._create_ride(self.admin_user)
# store dict versions
r1_dict = r1.as_dict()
r2_dict = r2.as_dict()
r3_dict = r3.as_dict()
# assign correct id and time vals
r1_dict['pickup_time'] = 'Mon, 01 Jan 0001 00:00:00 -0000'
r2_dict['pickup_time'] = 'Mon, 01 Jan 0001 00:00:00 -0000'
r3_dict['pickup_time'] = 'Mon, 01 Jan 0001 00:00:00 -0000'
r1_dict['dropoff_time'] = 'Mon, 01 Jan 0001 00:00:00 -0000'
r2_dict['dropoff_time'] = 'Mon, 01 Jan 0001 00:00:00 -0000'
r3_dict['dropoff_time'] = 'Mon, 01 Jan 0001 00:00:00 -0000'
# test response
response = self.client.get(url_for('api.rides'))
self.assertEquals(response.status_code, 200)
self.assertEquals(response.json, {'rides': [r1_dict, r2_dict, r3_dict]})
"""
test_get_ride_list_filter_by_location
-------------------------------------
Tests that using the filter query string 'location'
correctly filters the ride request queue
"""
def test_get_ride_list_filter_by_location(self):
self._login(self.admin_user)
# create and save some off campus and on campus ride requests
n = 6
on_campus_rides = []
off_campus_rides = []
for _ in xrange(n):
on_campus_rides.append(self._create_ride(self.admin_user, on_campus=True))
off_campus_rides.append(self._create_ride(self.admin_user, on_campus=False))
# create 1 extra on campus ride just to have imbalance
on_campus_rides.append(self._create_ride(self.admin_user, on_campus=True))
# check that filtering by on campus rides only returns
# ride requests that were on campus
response = self.client.get(url_for('api.rides', location='on_campus'))
rides = response.json['rides']
for ride in rides:
# every ride in response should be on campus
self.assertTrue(ride['on_campus'])
# there should be n+1 number of rides on campus
self.assertEqual(len(rides), n+1)
# check that filtering by off campus rides only returns
# ride requests that were off campus
response = self.client.get(url_for('api.rides', location='off_campus'))
rides = response.json['rides']
for ride in rides:
# every ride in response should be off campus
self.assertFalse(ride['on_campus'])
# there should be n number of rides off campus
self.assertEquals(len(rides), n)
# check that putting a random value for 'location' returns
# all current ride requests
response = self.client.get(url_for('api.rides', location='foobar'))
rides = response.json['rides']
on_campus_count = 0
off_campus_count = 0
for ride in rides:
# count the number of on campus and off campus rides in response
if ride['on_campus']:
on_campus_count += 1
else:
off_campus_count += 1
# there should be n+n+1 number of rides,
# n+1 number of on campus rides,
# and n number of off campus rides
self.assertEquals(len(rides), n+n+1)
self.assertEquals(on_campus_count, n+1)
self.assertEquals(off_campus_count, n)
# check that omitting 'location' returns all rides
response = self.client.get(url_for('api.rides'))
rides = response.json['rides']
on_campus_count = 0
off_campus_count = 0
for ride in rides:
# count the number of on campus and off campus rides in response
if ride['on_campus']:
on_campus_count += 1
else:
off_campus_count += 1
# there should be n+n+1 number of rides,
# n+1 number of on campus rides,
# and n number of off campus rides
self.assertEquals(len(rides), n+n+1)
self.assertEquals(on_campus_count, n+1)
self.assertEquals(off_campus_count, n)
"""
test_post_ride_list_requires_login
----------------------------------
Tests that the user must be logged in in order to
create a new ride request
"""
def test_post_ride_list_requires_login(self):
response = self.client.post(url_for('api.rides'), data={})
self.assertEquals(response.status_code, 401)
"""
test_post_ride_list
-------------
Tests that adding a new ride request works. Sends POST ride
request data to '/rides/' and checks if the response json object
is a valid ride request
"""
@myvcr.use_cassette()
@replace('steerclear.api.views.datetime', test_datetime(2015,6,13,1,2,3))
def test_post_ride_list(self):
self._login(self.student_user)
travel_time = 239
expected_pickup_time = datetime(2015,6,13,1,2,3) + timedelta(0, 10 * 60)
expected_dropoff_time = expected_pickup_time + timedelta(0, travel_time)
expected_pickup_string = expected_pickup_time.strftime('%a, %d %b %Y %H:%M:%S -0000')
expected_dropoff_string = expected_dropoff_time.strftime('%a, %d %b %Y %H:%M:%S -0000')
payload = {
u"num_passengers": 3,
u"start_latitude": 37.2735,
u"start_longitude": -76.7196,
u"end_latitude": 37.2809,
u"end_longitude": -76.7197,
u"pickup_time": expected_pickup_string,
u"travel_time": travel_time,
u"dropoff_time": expected_dropoff_string,
u'pickup_address': u'2006 Brooks Street, Williamsburg, VA 23185, USA',
u'dropoff_address': u'1234 Richmond Road, Williamsburg, VA 23185, USA',
u'on_campus': True
}
response = self.client.post(url_for('api.rides'), data=payload)
payload[u'id'] = 1
self.assertEquals(response.status_code, 201)
self.assertEquals(response.json, {u"ride": payload})
self.assertEquals(Ride.query.get(1).user, self.student_user)
"""
test_post_ride_list_pickup_loc_outside_radius
---------------------------------------------
Tests that requesting a bad pickup location returns a 400
"""
def test_post_ride_list_pickup_loc_outside_radius(self):
self._login(self.student_user)
payload = {
u"num_passengers": 3,
u"start_latitude": 37.269850,
u"start_longitude": -76.758869,
u"end_latitude": 37.2809,
u"end_longitude": -76.7197
}
response = self.client.post(url_for('api.rides'), data=payload)
self.assertEquals(response.status_code, 400)
"""
test_post_ride_list_dropoff_loc_outside_radius
---------------------------------------------
Tests that requesting a bad dropoff location returns a 400
"""
def test_post_ride_list_dropoff_loc_outside_radius(self):
self._login(self.student_user)
payload = {
u"num_passengers": 3,
u"start_latitude": 37.2735,
u"start_longitude": -76.7196,
u"end_latitude": 37.269850,
u"end_longitude": -76.758869,
}
response = self.client.post(url_for('api.rides'), data=payload)
self.assertEquals(response.status_code, 400)
"""
test_post_ride_list_bad_form_data
---------------------------------
Tests that trying to create a new ride fails if
required fields are not in form
"""
def test_post_ride_list_bad_form_data(self):
self._login(self.student_user)
payload = {
u"num_passengers": 3,
u"start_latitude": 37.273485,
u"start_longitude": -76.719628,
u"end_latitude": 37.280893,
u"end_longitude": -76.719691,
}
bad_payload = payload.copy()
bad_payload.pop('num_passengers', None)
bad_payload.pop('id', None)
r = self.client.post(url_for('api.rides'), data=bad_payload)
self.assertEquals(r.status_code, 400)
bad_payload = payload.copy()
bad_payload.pop('start_latitude', None)
bad_payload.pop('id', None)
r = self.client.post(url_for('api.rides'), data=bad_payload)
self.assertEquals(r.status_code, 400)
bad_payload = payload.copy()
bad_payload.pop('start_longitude', None)
bad_payload.pop('id', None)
r = self.client.post(url_for('api.rides'), data=bad_payload)
self.assertEquals(r.status_code, 400)
bad_payload = payload.copy()
bad_payload.pop('end_latitude', None)
bad_payload.pop('id', None)
r = self.client.post(url_for('api.rides'), data=bad_payload)
self.assertEquals(r.status_code, 400)
bad_payload = payload.copy()
bad_payload.pop('end_longitude', None)
bad_payload.pop('id', None)
r = self.client.post(url_for('api.rides'), data=bad_payload)
self.assertEquals(r.status_code, 400)
"""
RideAPITestCase
---------------
Test Cases for the RideAPI class that deals with
managing and interacting with individual Ride objects
"""
class RideAPITestCase(base.SteerClearBaseTestCase):
"""
setUp
-----
Overrides super class setUp(). Makes sure the user
is logged in before each test is run
"""
def setUp(self):
super(RideAPITestCase, self).setUp()
"""
test_get_ride_requires_login
----------------------------
Tests that user must be logged in to access a ride request
"""
def test_get_ride_requires_login(self):
response = self.client.get(url_for('api.ride', ride_id=0))
self.assertEquals(response.status_code, 401)
"""
test_get_ride_requires_student_or_admin_permission
---------------------------------------------------
Tests that trying to access the get Ride API requires
the User to be a student or an admin
"""
def test_get_ride_requires_student_or_admin_permission(self):
# Create ride so that we try to get an existing ride
self._create_ride(self.student_user)
self._test_url_requires_roles(
self.client.get,
url_for('api.ride', ride_id=1),
[self.student_role, self.admin_role]
)
"""
test_get_ride_bad_ride_id
--------------------------
Tests that trying to get a specific ride with
a bad ride id returns 404
"""
def test_get_ride_bad_ride_id(self):
self._login(self.student_user)
# check that bad ride_id get request returns 404
response = self.client.get(url_for('api.ride', ride_id=1))
self.assertEquals(response.status_code, 403)
ride = self._create_ride(self.student_user)
# check that bad ride_id with not empty database returns 404
response = self.client.get(url_for('api.ride', ride_id=2))
self.assertEquals(response.status_code, 403)
# finally, check that good ride_id, doesn't return 403/404
response = self.client.get(url_for('api.ride', ride_id=1))
self.assertEquals(response.status_code, 200)
"""
test_get_ride_success
----------------------
Tests that api successfully gets a specified
ride object given its ride_id
"""
def test_get_ride_success(self):
self._login(self.student_user)
# create ride objects to db
r1 = self._create_ride(self.student_user)
r2 = self._create_ride(self.student_user)
r3 = self._create_ride(self.student_user)
# store dict versions
r1_dict = r1.as_dict()
r2_dict = r2.as_dict()
r3_dict = r3.as_dict()
# assign correct id vals
r1_dict[u'pickup_time'] = u'Mon, 01 Jan 0001 00:00:00 -0000'
r2_dict[u'pickup_time'] = u'Mon, 01 Jan 0001 00:00:00 -0000'
r3_dict[u'pickup_time'] = u'Mon, 01 Jan 0001 00:00:00 -0000'
r1_dict[u'dropoff_time'] = u'Mon, 01 Jan 0001 00:00:00 -0000'
r2_dict[u'dropoff_time'] = u'Mon, 01 Jan 0001 00:00:00 -0000'
r3_dict[u'dropoff_time'] = u'Mon, 01 Jan 0001 00:00:00 -0000'
response = self.client.get(url_for('api.ride', ride_id=1))
self.assertEquals(response.status_code, 200)
self.assertEquals(response.json, {'ride': r1_dict})
response = self.client.get(url_for('api.ride', ride_id=2))
self.assertEquals(response.status_code, 200)
self.assertEquals(response.json, {'ride': r2_dict})
response = self.client.get(url_for('api.ride', ride_id=3))
self.assertEquals(response.status_code, 200)
self.assertEquals(response.json, {'ride': r3_dict})
"""
test_get_ride_can_only_get_accessible_ride
------------------------------------------
Tests that Users can only access the GET RideAPI for
Ride requests they have made
"""
def test_get_ride_can_only_get_accessible_ride(self):
# create 2 Ride objects by 2 different students
self._create_ride(self.student_user)
self._create_ride(self.student_user2)
# login first student
self._login(self.student_user)
# check that first student can access the ride they placed
response = self.client.get(url_for('api.ride', ride_id=1))
self.assertEquals(response.status_code, 200)
# check that first student cannot access any rides they didn't place
response = self.client.get(url_for('api.ride', ride_id=2))
self.assertEquals(response.status_code, 403)
# login second student
self._login(self.student_user2)
# check that the second student cannot access any rides they didn't place
response = self.client.get(url_for('api.ride', ride_id=1))
self.assertEquals(response.status_code, 403)
# check that the second student can access the ride they placed
response = self.client.get(url_for('api.ride', ride_id=2))
self.assertEquals(response.status_code, 200)
"""
test_delete_ride_requires_login
-------------------------------
Tests that a user must be logged in to delete a ride request
"""
def test_delete_ride_requires_login(self):
response = self.client.delete(url_for('api.ride', ride_id=0))
self.assertEquals(response.status_code, 401)
"""
test_delete_ride_requires_student_or_admin_permission
---------------------------------------------------
Tests that trying to access the delete Ride API requires
the User to be a student or an admin
"""
def test_delete_ride_requires_student_or_admin_permission(self):
# Create ride so that we try to get an existing ride
self._create_ride(self.student_user)
self._test_url_requires_roles(
self.client.delete,
url_for('api.ride', ride_id=1),
[self.student_role, self.admin_role]
)
"""
test_delete_ride_bad_ride_id
-----------------------------
Test that api returns 404 to a ride id that doesn't exist
"""
def test_delete_ride_bad_ride_id(self):
self._login(self.student_user)
# check that bad ride_id delete request returns 404
response = self.client.delete(url_for('api.ride', ride_id=1))
self.assertEquals(response.status_code, 403)
ride = self._create_ride(self.student_user)
# check that bad ride_id with not empty database returns 404
response = self.client.delete(url_for('api.ride', ride_id=2))
self.assertEquals(response.status_code, 403)
# finally check to see that accessing the ride object doesn't fail
response = self.client.delete(url_for('api.ride', ride_id=1))
self.assertEquals(response.status_code, 204)
"""
test_delete_ride_success
-------------------------
Tests that deleting a ride works
"""
def test_delete_ride_success(self):
self._login(self.student_user)
# create Ride objects
r1 = self._create_ride(self.student_user)
r2 = self._create_ride(self.student_user)
r3 = self._create_ride(self.student_user)
# store dict versions
r2_dict = r2.as_dict()
r3_dict = r3.as_dict()
# test can delete a ride
response = self.client.delete(url_for('api.ride', ride_id=1))
self.assertEquals(response.status_code, 204)
self.assertEquals(response.data, '')
self.assertEquals(len(Ride.query.all()), 2)
self.assertEquals(Ride.query.get(1), None)
self.assertEquals(Ride.query.get(2).as_dict(), r2_dict)
self.assertEquals(Ride.query.get(3).as_dict(), r3_dict)
# test can delete a ride out of order
response = self.client.delete(url_for('api.ride', ride_id=3))
self.assertEquals(response.status_code, 204)
self.assertEquals(response.data, '')
self.assertEquals(len(Ride.query.all()), 1)
self.assertEquals(Ride.query.get(1), None)
self.assertEquals(Ride.query.get(2).as_dict(), r2_dict)
self.assertEquals(Ride.query.get(3), None)
# test can delete final ride
response = self.client.delete(url_for('api.ride', ride_id=2))
self.assertEquals(response.status_code, 204)
self.assertEquals(response.data, '')
self.assertEquals(Ride.query.all(), [])
self.assertEquals(Ride.query.get(1), None)
self.assertEquals(Ride.query.get(2), None)
self.assertEquals(Ride.query.get(3), None)
"""
test_delete_ride_can_only_delete_accessible_ride
------------------------------------------
Tests that Users can only access the DELETE RideAPI for
Ride requests they have made
"""
def test_delete_ride_can_only_delete_accessible_ride(self):
# create 2 Ride objects by 2 different students
self._create_ride(self.student_user)
self._create_ride(self.student_user2)
# login first student
self._login(self.student_user)
# check that first student can delete the ride they placed
response = self.client.delete(url_for('api.ride', ride_id=1))
self.assertEquals(response.status_code, 204)
# check that first student cannot delete any rides they didn't place
response = self.client.delete(url_for('api.ride', ride_id=2))
self.assertEquals(response.status_code, 403)
# create another ride by the first student
self._create_ride(self.student_user)
# login second student
self._login(self.student_user2)
# check that the second student cannot delete any rides they didn't place
response = self.client.delete(url_for('api.ride', ride_id=3))
self.assertEquals(response.status_code, 403)
# check that the second student can delete the ride they placed
response = self.client.delete(url_for('api.ride', ride_id=2))
self.assertEquals(response.status_code, 204)
"""
NotificationAPITestCase
-----------------------
Test case for testing the notifications api
"""
class NotificationAPITestCase(base.SteerClearBaseTestCase):
"""
setUp
-----
Overrides super class setUp(). Makes sure the user
is logged in before each test is run
"""
def setUp(self):
super(NotificationAPITestCase, self).setUp()
"""
test_post_notifications_requires_login
--------------------------------------
Tests that the notifications route requires the user to be logged in
"""
def test_post_notifications_requires_login(self):
response = self.client.post(url_for('api.notifications'), data={})
self.assertEquals(response.status_code, 401)
"""
test_post_notifications_requires_admin_permission
---------------------------------------------------
Tests that trying to access the notifications API requires
the User to be a admin
"""
def test_post_notifications_requires_admin_permission(self):
self._test_url_requires_roles(
self.client.post,
url_for('api.notifications'),
[self.admin_role]
)
"""
test_post_notifications_bad_ride_id
-----------------------------------
Tests that the notifications route fails if the
request ride_id does not exist
"""
def test_post_notifications_bad_ride_id(self):
self._login(self.admin_user)
response = self.client.post(url_for('api.notifications'), data={'ride_id': 1})
self.assertEquals(response.status_code, 400)
ride = self._create_ride(self.student_user)
response = self.client.post(url_for('api.notifications'), data={'ride_id': 2})
self.assertEquals(response.status_code, 400)
"""
ETAAPITestCase
------------------
TestCase for testing API interface with eta calculation module
"""
class ETAAPITestCase(base.SteerClearBaseTestCase):
@myvcr.use_cassette()
@replace('steerclear.api.views.datetime', test_datetime(2015,6,13,1,2,3))
def test_query_distance_matrix_api_no_rides(self):
pickup_loc = (37.273485, -76.719628)
dropoff_loc = (37.280893, -76.719691)
expected_pickup_time = datetime(2015,6,13,1,2,3) + timedelta(0, 10 * 60)
expected_dropoff_time = expected_pickup_time + timedelta(0, 239)
result = query_distance_matrix_api(pickup_loc, dropoff_loc)
(pickup_time, travel_time, dropoff_time) = result[0]
self.assertEquals(pickup_time, expected_pickup_time)
self.assertEquals(travel_time, 239)
self.assertEquals(dropoff_time, expected_dropoff_time)
pickup_address, dropoff_address = result[1]
self.assertEquals(pickup_address, u'2006 Brooks Street, Williamsburg, VA 23185, USA')
self.assertEquals(dropoff_address, u'1234 Richmond Road, Williamsburg, VA 23185, USA')
@myvcr.use_cassette()
def test_query_distance_matrix_api_no_rides_bad_pickup_loc(self):
pickup_loc = (0.0, 0.0)
dropoff_loc = (37.280893, -76.719691)
result = query_distance_matrix_api(pickup_loc, dropoff_loc)
self.assertEquals(result, None)
@myvcr.use_cassette()
def test_query_distance_matrix_api_no_rides_bad_dest_loc(self):
pickup_loc = (37.280893, -76.719691)
dropoff_loc = (0.0, 0.0)
result = query_distance_matrix_api(pickup_loc, dropoff_loc)
self.assertEquals(result, None)
@myvcr.use_cassette()
def test_query_distance_matrix_api_with_last_ride(self):
user = self._create_user()
ride = self._create_ride(user, 1, 0.0, 0.0, 37.272042, -76.714027, dropoff_time=datetime(2015,6,13,1,2,3))
pickup_loc = (37.273485, -76.719628)
dropoff_loc = (37.280893, -76.719691)
expected_pickup_time = datetime(2015,6,13,1,2,3) + timedelta(0, 373)
expected_travel_time = 239
expected_dropoff_time = expected_pickup_time + timedelta(0, expected_travel_time)
result = query_distance_matrix_api(pickup_loc, dropoff_loc)
(pickup_time, travel_time, dropoff_time) = result[0]
self.assertEquals(pickup_time, expected_pickup_time)
self.assertEquals(travel_time, expected_travel_time)
self.assertEquals(dropoff_time, expected_dropoff_time)
pickup_address, dropoff_address = result[1]
self.assertEquals(pickup_address, u'2006 Brooks Street, Williamsburg, VA 23185, USA')
self.assertEquals(dropoff_address, u'1234 Richmond Road, Williamsburg, VA 23185, USA')
@myvcr.use_cassette()
def test_query_distance_matrix_api_with_last_ride_bad_pickup_loc(self):
user = self._create_user()
self._create_ride(user, 1, 0.0, 0.0, 37.272042, -76.714027, dropoff_time=datetime(2015,6,13,1,2,3))
pickup_loc = (0.0, 0.0)
dropoff_loc = (37.280893, -76.719691)
result = query_distance_matrix_api(pickup_loc, dropoff_loc)
self.assertEquals(result, None)
@myvcr.use_cassette()
def test_query_distance_matrix_api_with_last_ride_bad_dropoff_loc(self):
user = self._create_user()
self._create_ride(user, 1, 0.0, 0.0, 37.272042, -76.714027, dropoff_time=datetime(2015,6,13,1,2,3))
pickup_loc = (37.280893, -76.719691)
dropoff_loc = (0.0, 0.0)
result = query_distance_matrix_api(pickup_loc, dropoff_loc)
self.assertEquals(result, None)
| mit |
goofwear/raspberry_pwn | src/pentest/voiper/sulley/impacket/uuid.py | 8 | 2352 | # Copyright (c) 2003-2006 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# $Id: uuid.py,v 1.4 2006/05/23 21:19:26 gera Exp $
#
# Description:
# Generate UUID compliant with http://www.webdav.org/specs/draft-leach-uuids-guids-01.txt.
# A different, much simpler (not necessarily better) algorithm is used.
#
# Author:
# Javier Kohen (jkohen)
#
import re
from random import randrange
from struct import pack, unpack
def generate():
# UHm... crappy Python has an maximum integer of 2**31-1.
top = (1L<<31)-1
return pack("IIII", randrange(top), randrange(top), randrange(top), randrange(top))
def bin_to_string(uuid):
uuid1, uuid2, uuid3 = unpack('<LHH', uuid[:8])
uuid4, uuid5, uuid6 = unpack('>HHL', uuid[8:16])
return '%08X-%04X-%04X-%04X-%04X%08X' % (uuid1, uuid2, uuid3, uuid4, uuid5, uuid6)
def string_to_bin(uuid):
matches = re.match('([\dA-Fa-f]{8})-([\dA-Fa-f]{4})-([\dA-Fa-f]{4})-([\dA-Fa-f]{4})-([\dA-Fa-f]{4})([\dA-Fa-f]{8})', uuid)
(uuid1, uuid2, uuid3, uuid4, uuid5, uuid6) = map(lambda x: long(x, 16), matches.groups())
uuid = pack('<LHH', uuid1, uuid2, uuid3)
uuid += pack('>HHL', uuid4, uuid5, uuid6)
return uuid
def stringver_to_bin(s):
(maj,min) = s.split('.')
return pack('<H',int(maj)) + pack('<H',int(min))
def uuidtup_to_bin(tup):
if len(tup) != 2: return
return string_to_bin(tup[0]) + stringver_to_bin(tup[1])
#input: string
#output: tuple (uuid,version)
#if version is not found in the input string "1.0" is returned
#example:
# "00000000-0000-0000-0000-000000000000 3.0" returns ('00000000-0000-0000-0000-000000000000','3.0')
# "10000000-2000-3000-4000-500000000000 version 3.0" returns ('00000000-0000-0000-0000-000000000000','3.0')
# "10000000-2000-3000-4000-500000000000 v 3.0" returns ('00000000-0000-0000-0000-000000000000','3.0')
# "10000000-2000-3000-4000-500000000000" returns ('00000000-0000-0000-0000-000000000000','1.0')
def string_to_uuidtup(s):
g = re.search("([A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}).*?([0-9]{1,5}\.[0-9]{1,5})",s+" 1.0")
if g:
(u,v) = g.groups()
return (u,v)
return
| gpl-3.0 |
aaronfang/personal_scripts | external/curvyEdges.py | 2 | 5421 | # import curvyEdges;curvyEdges.UI()
import maya.cmds as cmds
import pymel.core as pm
class UI(object):
def __init__(self):
title = 'curvyEdges'
version = '1.01'
self.ceObj = spline(self)
if pm.window('curvyEdgesWin', exists=True):
pm.deleteUI('curvyEdgesWin')
with pm.window('curvyEdgesWin', title='{0} | {1}'.format(title, version),
mnb=False, mxb=False, sizeable=False) as window:
with pm.columnLayout():
# curve Frame
with pm.frameLayout(l='Curve Settings', cll=True, cl=False, bs='out'):
with pm.columnLayout():
self.curveType = pm.radioButtonGrp(l='Curve Type:', sl=0, nrb=2, cw3=[96, 96, 128],
labelArray2=['BezierCurve', 'NurbsCurve'])
self.spans = pm.intSliderGrp(field=True, l='Curve Spans:', minValue=2, maxValue=24,
fieldMinValue=2, fieldMaxValue=128, value=2, cw3=[96, 64, 128])
with pm.rowColumnLayout(nc=2, cw=[1, 96], co=[1, 'right', 1]):
self.selOnly = pm.checkBox(v=False, l='Selection Only')
pm.button(l='Create Curve', c=self._create, width=201)
# Deformer Frame
with pm.frameLayout(l='Deformer Settings', bs='out', cl=False, cll=True):
with pm.columnLayout():
self.currentCrv = pm.textFieldGrp(editable=False, l='Current Curve:', cw2=[96, 195])
self.deformers = [attrSlider(1, 0, 1, 'envelope', self.ceObj),
attrSlider(1, -10, 10, 'tension', self.ceObj),
attrSlider(0, 0, 256, 'dropoffDistance[0]', self.ceObj),
attrSlider(1, 0, 2, 'scale[0]', self.ceObj),
attrSlider(1, 0, 1, 'rotation', self.ceObj)]
window.show()
pm.scriptJob(event=['SelectionChanged', self.select], protected=True, p=window)
self.select()
def _create(self, *args):
try:
self.ceObj.create(self.curveType.getSelect(), self.spans.getValue(), self.selOnly.getValue())
for i in self.deformers:
i.setEnable(True)
i.get()
except:
pass
def select(self, *args):
try:
self.ceObj.select()
for i in self.deformers:
i.setEnable(True)
i.get()
except:
self.setCurrentCurve('Select a curvyEdges curve!')
for i in self.deformers:
i.setEnable(False)
def setCurrentCurve(self, curve):
self.currentCrv.setText(curve)
class spline(object):
def __init__(self, uiObj):
self.uiObj = uiObj
def create(self, curveType, spans, selOnly):
sel = pm.selected()
cmds.CreateCurveFromPoly()
curve = pm.selected()
pm.rebuildCurve(curve, spans=spans)
# set UI curve
self.uiObj.setCurrentCurve(curve[0].shortName())
if curveType == 1:
pm.nurbsCurveToBezier()
pm.delete(curve, ch=True)
# Deform
if selOnly:
sel = pm.polyListComponentConversion(sel, fe=True, tv=True)
self.wire = pm.wire(sel, w=curve)
else:
#Object
self.wire = pm.wire(sel[0].node(), w=curve)
def select(self):
sel = pm.selected()
if isinstance(sel[0], pm.nt.Transform):
if not isinstance(sel[0].getShape(), pm.nt.NurbsCurve):
raise Exception('Invalid Selection Type')
elif isinstance(sel[0], pm.NurbsCurveCV):
sel = [i.node().getParent() for i in sel]
else:
raise Exception('Invalid Selection Type')
self.wire = pm.listConnections(sel[0].getShape())
self.uiObj.setCurrentCurve(sel[0].shortName())
class attrSlider(object):
def __init__(self, value, min, max, name, ceObj):
self.name = name
self.ceObj = ceObj
self.undoState = False
self.attr = pm.floatSliderGrp(field=True, l=self.name, value=value, pre=3, enable=False,
minValue=min, maxValue=max, dc=lambda *args: self.set(cc=False),
cc=lambda *args: self.set(cc=True), cw3=[96, 64, 128])
pm.scriptJob(event=['Undo', self.get], protected=True, p=self.attr)
def get(self, *args):
try:
value = getattr(self.ceObj.wire[0], self.name).get(self.attr.getValue())
self.attr.setValue(value)
except:
AttributeError('{0} node does not exist'.format(self.ceObj.wire[0]))
def set(self, cc=False):
if not cc and not self.undoState:
self.undoState = True
pm.undoInfo(openChunk=True)
try:
getattr(self.ceObj.wire[0], self.name).set(self.attr.getValue())
except:
AttributeError('{0} node does no longer exist'.format(self.ceObj.wire[0]))
if cc and self.undoState:
pm.undoInfo(closeChunk=True)
self.undoState = False
def setEnable(self, val):
self.attr.setEnable(val)
| mit |
mozilla/fjord | vendor/packages/pytz-2015.7/pytz/tests/test_lazy.py | 3 | 9772 | from operator import *
import os.path
import sys
import unittest
import warnings
if __name__ == '__main__':
# Only munge path if invoked as a script. Testrunners should have setup
# the paths already
sys.path.insert(0, os.path.abspath(os.path.join(os.pardir, os.pardir)))
from pytz.lazy import LazyList, LazySet
class LazyListTestCase(unittest.TestCase):
initial_data = [3,2,1]
def setUp(self):
self.base = [3, 2, 1]
self.lesser = [2, 1, 0]
self.greater = [4, 3, 2]
self.lazy = LazyList(iter(list(self.base)))
def test_unary_ops(self):
unary_ops = [str, repr, len, bool, not_]
try:
unary_ops.append(unicode)
except NameError:
pass # unicode no longer exists in Python 3.
for op in unary_ops:
self.assertEqual(
op(self.lazy),
op(self.base), str(op))
def test_binary_ops(self):
binary_ops = [eq, ge, gt, le, lt, ne, add, concat]
try:
binary_ops.append(cmp)
except NameError:
pass # cmp no longer exists in Python 3.
for op in binary_ops:
self.assertEqual(
op(self.lazy, self.lazy),
op(self.base, self.base), str(op))
for other in [self.base, self.lesser, self.greater]:
self.assertEqual(
op(self.lazy, other),
op(self.base, other), '%s %s' % (op, other))
self.assertEqual(
op(other, self.lazy),
op(other, self.base), '%s %s' % (op, other))
# Multiplication
self.assertEqual(self.lazy * 3, self.base * 3)
self.assertEqual(3 * self.lazy, 3 * self.base)
# Contains
self.assertTrue(2 in self.lazy)
self.assertFalse(42 in self.lazy)
def test_iadd(self):
self.lazy += [1]
self.base += [1]
self.assertEqual(self.lazy, self.base)
def test_bool(self):
self.assertTrue(bool(self.lazy))
self.assertFalse(bool(LazyList()))
self.assertFalse(bool(LazyList(iter([]))))
def test_hash(self):
self.assertRaises(TypeError, hash, self.lazy)
def test_isinstance(self):
self.assertTrue(isinstance(self.lazy, list))
self.assertFalse(isinstance(self.lazy, tuple))
def test_callable(self):
try:
callable
except NameError:
return # No longer exists with Python 3.
self.assertFalse(callable(self.lazy))
def test_append(self):
self.base.append('extra')
self.lazy.append('extra')
self.assertEqual(self.lazy, self.base)
def test_count(self):
self.assertEqual(self.lazy.count(2), 1)
def test_index(self):
self.assertEqual(self.lazy.index(2), 1)
def test_extend(self):
self.base.extend([6, 7])
self.lazy.extend([6, 7])
self.assertEqual(self.lazy, self.base)
def test_insert(self):
self.base.insert(0, 'ping')
self.lazy.insert(0, 'ping')
self.assertEqual(self.lazy, self.base)
def test_pop(self):
self.assertEqual(self.lazy.pop(), self.base.pop())
self.assertEqual(self.lazy, self.base)
def test_remove(self):
self.base.remove(2)
self.lazy.remove(2)
self.assertEqual(self.lazy, self.base)
def test_reverse(self):
self.base.reverse()
self.lazy.reverse()
self.assertEqual(self.lazy, self.base)
def test_reversed(self):
self.assertEqual(list(reversed(self.lazy)), list(reversed(self.base)))
def test_sort(self):
self.base.sort()
self.assertNotEqual(self.lazy, self.base, 'Test data already sorted')
self.lazy.sort()
self.assertEqual(self.lazy, self.base)
def test_sorted(self):
self.assertEqual(sorted(self.lazy), sorted(self.base))
def test_getitem(self):
for idx in range(-len(self.base), len(self.base)):
self.assertEqual(self.lazy[idx], self.base[idx])
def test_setitem(self):
for idx in range(-len(self.base), len(self.base)):
self.base[idx] = idx + 1000
self.assertNotEqual(self.lazy, self.base)
self.lazy[idx] = idx + 1000
self.assertEqual(self.lazy, self.base)
def test_delitem(self):
del self.base[0]
self.assertNotEqual(self.lazy, self.base)
del self.lazy[0]
self.assertEqual(self.lazy, self.base)
del self.base[-2]
self.assertNotEqual(self.lazy, self.base)
del self.lazy[-2]
self.assertEqual(self.lazy, self.base)
def test_iter(self):
self.assertEqual(list(iter(self.lazy)), list(iter(self.base)))
def test_getslice(self):
for i in range(-len(self.base), len(self.base)):
for j in range(-len(self.base), len(self.base)):
for step in [-1, 1]:
self.assertEqual(self.lazy[i:j:step], self.base[i:j:step])
def test_setslice(self):
for i in range(-len(self.base), len(self.base)):
for j in range(-len(self.base), len(self.base)):
for step in [-1, 1]:
replacement = range(0, len(self.base[i:j:step]))
self.base[i:j:step] = replacement
self.lazy[i:j:step] = replacement
self.assertEqual(self.lazy, self.base)
def test_delslice(self):
del self.base[0:1]
del self.lazy[0:1]
self.assertEqual(self.lazy, self.base)
del self.base[-1:1:-1]
del self.lazy[-1:1:-1]
self.assertEqual(self.lazy, self.base)
class LazySetTestCase(unittest.TestCase):
initial_data = set([3,2,1])
def setUp(self):
self.base = set([3, 2, 1])
self.lazy = LazySet(iter(set(self.base)))
def test_unary_ops(self):
# These ops just need to work.
unary_ops = [str, repr]
try:
unary_ops.append(unicode)
except NameError:
pass # unicode no longer exists in Python 3.
for op in unary_ops:
op(self.lazy) # These ops just need to work.
# These ops should return identical values as a real set.
unary_ops = [len, bool, not_]
for op in unary_ops:
self.assertEqual(
op(self.lazy),
op(self.base), '%s(lazy) == %r' % (op, op(self.lazy)))
def test_binary_ops(self):
binary_ops = [eq, ge, gt, le, lt, ne, sub, and_, or_, xor]
try:
binary_ops.append(cmp)
except NameError:
pass # cmp no longer exists in Python 3.
for op in binary_ops:
self.assertEqual(
op(self.lazy, self.lazy),
op(self.base, self.base), str(op))
self.assertEqual(
op(self.lazy, self.base),
op(self.base, self.base), str(op))
self.assertEqual(
op(self.base, self.lazy),
op(self.base, self.base), str(op))
# Contains
self.assertTrue(2 in self.lazy)
self.assertFalse(42 in self.lazy)
def test_iops(self):
try:
iops = [isub, iand, ior, ixor]
except NameError:
return # Don't exist in older Python versions.
for op in iops:
# Mutating operators, so make fresh copies.
lazy = LazySet(self.base)
base = self.base.copy()
op(lazy, set([1]))
op(base, set([1]))
self.assertEqual(lazy, base, str(op))
def test_bool(self):
self.assertTrue(bool(self.lazy))
self.assertFalse(bool(LazySet()))
self.assertFalse(bool(LazySet(iter([]))))
def test_hash(self):
self.assertRaises(TypeError, hash, self.lazy)
def test_isinstance(self):
self.assertTrue(isinstance(self.lazy, set))
def test_callable(self):
try:
callable
except NameError:
return # No longer exists with Python 3.
self.assertFalse(callable(self.lazy))
def test_add(self):
self.base.add('extra')
self.lazy.add('extra')
self.assertEqual(self.lazy, self.base)
def test_copy(self):
self.assertEqual(self.lazy.copy(), self.base)
def test_method_ops(self):
ops = [
'difference', 'intersection', 'isdisjoint',
'issubset', 'issuperset', 'symmetric_difference', 'union',
'difference_update', 'intersection_update',
'symmetric_difference_update', 'update']
for op in ops:
if not hasattr(set, op):
continue # Not in this version of Python.
# Make a copy, as some of the ops are mutating.
lazy = LazySet(set(self.base))
base = set(self.base)
self.assertEqual(
getattr(self.lazy, op)(set([1])),
getattr(self.base, op)(set([1])), op)
self.assertEqual(self.lazy, self.base, op)
def test_discard(self):
self.base.discard(1)
self.assertNotEqual(self.lazy, self.base)
self.lazy.discard(1)
self.assertEqual(self.lazy, self.base)
def test_pop(self):
self.assertEqual(self.lazy.pop(), self.base.pop())
self.assertEqual(self.lazy, self.base)
def test_remove(self):
self.base.remove(2)
self.lazy.remove(2)
self.assertEqual(self.lazy, self.base)
def test_clear(self):
self.lazy.clear()
self.assertEqual(self.lazy, set())
if __name__ == '__main__':
warnings.simplefilter('error') # Warnings should be fatal in tests.
unittest.main()
| bsd-3-clause |
xzturn/tensorflow | tensorflow/python/__init__.py | 2 | 8777 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import core names of TensorFlow.
Programs that want to build TensorFlow Ops and Graphs without having to import
the constructors and utilities individually can import this file:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
"""
import ctypes
import importlib
import sys
import traceback
# TODO(drpng): write up instructions for editing this file in a doc and point to
# the doc instead.
# If you want to edit this file to expose modules in public tensorflow API, you
# need to follow these steps:
# 1. Consult with tensorflow team and get approval for adding a new API to the
# public interface.
# 2. Document the module in the gen_docs_combined.py.
# 3. Import the module in the main tensorflow namespace by adding an import
# statement in this file.
# 4. Sanitize the entry point by making sure that your module does not expose
# transitively imported modules used for implementation, such as os, sys.
# go/tf-wildcard-import
# pylint: disable=wildcard-import,g-bad-import-order,g-import-not-at-top
import numpy as np
from tensorflow.python import pywrap_tensorflow
# Protocol buffers
from tensorflow.core.framework.graph_pb2 import *
from tensorflow.core.framework.node_def_pb2 import *
from tensorflow.core.framework.summary_pb2 import *
from tensorflow.core.framework.attr_value_pb2 import *
from tensorflow.core.protobuf.meta_graph_pb2 import TensorInfo
from tensorflow.core.protobuf.meta_graph_pb2 import MetaGraphDef
from tensorflow.core.protobuf.config_pb2 import *
from tensorflow.core.protobuf.tensorflow_server_pb2 import *
from tensorflow.core.util.event_pb2 import *
# Framework
from tensorflow.python.framework.framework_lib import * # pylint: disable=redefined-builtin
from tensorflow.python.framework.versions import *
from tensorflow.python.framework import config
from tensorflow.python.framework import errors
from tensorflow.python.framework import graph_util
# Session
from tensorflow.python.client.client_lib import *
# Ops
from tensorflow.python.ops.standard_ops import *
# Namespaces
from tensorflow.python.ops import initializers_ns as initializers
# pylint: enable=wildcard-import
# Bring in subpackages.
from tensorflow.python import data
from tensorflow.python import distribute
from tensorflow.python import keras
from tensorflow.python.feature_column import feature_column_lib as feature_column
from tensorflow.python.layers import layers
from tensorflow.python.module import module
from tensorflow.python.ops import bitwise_ops as bitwise
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import image_ops as image
from tensorflow.python.ops import manip_ops as manip
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn
from tensorflow.python.ops import ragged
from tensorflow.python.ops import sets
from tensorflow.python.ops import stateful_random_ops
from tensorflow.python.ops.distributions import distributions
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.ops.linalg.sparse import sparse
from tensorflow.python.ops.losses import losses
from tensorflow.python.ops.ragged import ragged_ops as _ragged_ops
from tensorflow.python.ops.signal import signal
from tensorflow.python.profiler import profiler
from tensorflow.python.profiler import profiler_client
from tensorflow.python.profiler import profiler_v2
from tensorflow.python.saved_model import saved_model
from tensorflow.python.summary import summary
from tensorflow.python.tpu import api
from tensorflow.python.user_ops import user_ops
from tensorflow.python.util import compat
# Update the RaggedTensor package docs w/ a list of ops that support dispatch.
ragged.__doc__ += _ragged_ops.ragged_dispatch.ragged_op_list()
# Import to make sure the ops are registered.
from tensorflow.python.ops import gen_audio_ops
from tensorflow.python.ops import gen_boosted_trees_ops
from tensorflow.python.ops import gen_cudnn_rnn_ops
from tensorflow.python.ops import gen_rnn_ops
from tensorflow.python.ops import gen_sendrecv_ops
# Import the names from python/training.py as train.Name.
from tensorflow.python.training import training as train
from tensorflow.python.training import quantize_training as _quantize_training
# Sub-package for performing i/o directly instead of via ops in a graph.
from tensorflow.python.lib.io import python_io
# Make some application and test modules available.
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import sysconfig
from tensorflow.python.platform import test
from tensorflow.python.compat import v2_compat
from tensorflow.python.util.all_util import make_all
from tensorflow.python.util.tf_export import tf_export
# Eager execution
from tensorflow.python.eager.context import executing_eagerly
from tensorflow.python.eager.remote import connect_to_remote_host
from tensorflow.python.eager.def_function import function
from tensorflow.python.framework.ops import enable_eager_execution
# Check whether TF2_BEHAVIOR is turned on.
from tensorflow.python.eager import monitoring as _monitoring
from tensorflow.python import tf2 as _tf2
_tf2_gauge = _monitoring.BoolGauge('/tensorflow/api/tf2_enable',
'Environment variable TF2_BEHAVIOR is set".')
_tf2_gauge.get_cell().set(_tf2.enabled())
# Necessary for the symbols in this module to be taken into account by
# the namespace management system (API decorators).
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
# TensorFlow Debugger (tfdbg).
from tensorflow.python.debug.lib import check_numerics_callback
from tensorflow.python.debug.lib import dumping_callback
from tensorflow.python.ops import gen_debug_ops
# DLPack
from tensorflow.python.dlpack.dlpack import from_dlpack
from tensorflow.python.dlpack.dlpack import to_dlpack
# XLA JIT compiler APIs.
from tensorflow.python.compiler.xla import jit
from tensorflow.python.compiler.xla import xla
# MLIR APIs.
from tensorflow.python.compiler.mlir import mlir
# Required due to `rnn` and `rnn_cell` not being imported in `nn` directly
# (due to a circular dependency issue: rnn depends on layers).
nn.dynamic_rnn = rnn.dynamic_rnn
nn.static_rnn = rnn.static_rnn
nn.raw_rnn = rnn.raw_rnn
nn.bidirectional_dynamic_rnn = rnn.bidirectional_dynamic_rnn
nn.static_state_saving_rnn = rnn.static_state_saving_rnn
nn.rnn_cell = rnn_cell
# Export protos
# pylint: disable=undefined-variable
tf_export(v1=['AttrValue'])(AttrValue)
tf_export(v1=['ConfigProto'])(ConfigProto)
tf_export(v1=['Event', 'summary.Event'])(Event)
tf_export(v1=['GPUOptions'])(GPUOptions)
tf_export(v1=['GraphDef'])(GraphDef)
tf_export(v1=['GraphOptions'])(GraphOptions)
tf_export(v1=['HistogramProto'])(HistogramProto)
tf_export(v1=['LogMessage'])(LogMessage)
tf_export(v1=['MetaGraphDef'])(MetaGraphDef)
tf_export(v1=['NameAttrList'])(NameAttrList)
tf_export(v1=['NodeDef'])(NodeDef)
tf_export(v1=['OptimizerOptions'])(OptimizerOptions)
tf_export(v1=['RunMetadata'])(RunMetadata)
tf_export(v1=['RunOptions'])(RunOptions)
tf_export(v1=['SessionLog', 'summary.SessionLog'])(SessionLog)
tf_export(v1=['Summary', 'summary.Summary'])(Summary)
tf_export(v1=['summary.SummaryDescription'])(SummaryDescription)
tf_export(v1=['SummaryMetadata'])(SummaryMetadata)
tf_export(v1=['summary.TaggedRunMetadata'])(TaggedRunMetadata)
tf_export(v1=['TensorInfo'])(TensorInfo)
# pylint: enable=undefined-variable
# Special dunders that we choose to export:
_exported_dunders = set([
'__version__',
'__git_version__',
'__compiler_version__',
'__cxx11_abi_flag__',
'__monolithic_build__',
])
# Expose symbols minus dunders, unless they are whitelisted above.
# This is necessary to export our dunders.
__all__ = [s for s in dir() if s in _exported_dunders or not s.startswith('_')]
| apache-2.0 |
alikins/ansible | lib/ansible/modules/storage/netapp/sf_snapshot_schedule_manager.py | 43 | 13003 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: sf_snapshot_schedule_manager
short_description: Manage SolidFire snapshot schedules
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.3'
author: Sumit Kumar (sumit4@netapp.com)
description:
- Create, destroy, or update accounts on SolidFire
options:
state:
description:
- Whether the specified schedule should exist or not.
required: true
choices: ['present', 'absent']
paused:
description:
- Pause / Resume a schedule.
required: false
recurring:
description:
- Should the schedule recur?
required: false
time_interval_days:
description: Time interval in days.
required: false
default: 1
time_interval_hours:
description: Time interval in hours.
required: false
default: 0
time_interval_minutes:
description: Time interval in minutes.
required: false
default: 0
name:
description:
- Name for the snapshot schedule.
required: true
snapshot_name:
description:
- Name for the created snapshots.
required: false
volumes:
description:
- Volume IDs that you want to set the snapshot schedule for.
- At least 1 volume ID is required for creating a new schedule.
- required when C(state=present)
required: false
retention:
description:
- Retention period for the snapshot.
- Format is 'HH:mm:ss'.
required: false
schedule_id:
description:
- The schedule ID for the schedule that you want to update or delete.
required: false
starting_date:
description:
- Starting date for the schedule.
- Required when C(state=present).
- Please use two '-' in the above format, or you may see an error- TypeError, is not JSON serializable description.
- "Format: C(2016--12--01T00:00:00Z)"
required: false
'''
EXAMPLES = """
- name: Create Snapshot schedule
sf_snapshot_schedule_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: Schedule_A
time_interval_days: 1
starting_date: 2016--12--01T00:00:00Z
volumes: 7
- name: Update Snapshot schedule
sf_snapshot_schedule_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
schedule_id: 6
recurring: True
snapshot_name: AnsibleSnapshots
- name: Delete Snapshot schedule
sf_snapshot_schedule_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: absent
schedule_id: 6
"""
RETURN = """
schedule_id:
description: Schedule ID of the newly created schedule
returned: success
type: string
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class SolidFireSnapShotSchedule(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
time_interval_days=dict(required=False, type='int', default=1),
time_interval_hours=dict(required=False, type='int', default=0),
time_interval_minutes=dict(required=False, type='int', default=0),
paused=dict(required=False, type='bool'),
recurring=dict(required=False, type='bool'),
starting_date=dict(type='str'),
snapshot_name=dict(required=False, type='str'),
volumes=dict(required=False, type='list'),
retention=dict(required=False, type='str'),
schedule_id=dict(type='int'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['starting_date', 'volumes'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
# self.interval = p['interval']
self.time_interval_days = p['time_interval_days']
self.time_interval_hours = p['time_interval_hours']
self.time_interval_minutes = p['time_interval_minutes']
self.paused = p['paused']
self.recurring = p['recurring']
self.starting_date = p['starting_date']
if self.starting_date is not None:
self.starting_date = self.starting_date.replace("--", "-")
self.snapshot_name = p['snapshot_name']
self.volumes = p['volumes']
self.retention = p['retention']
self.schedule_id = p['schedule_id']
self.create_schedule_result = None
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def get_schedule(self):
schedule_list = self.sfe.list_schedules()
for schedule in schedule_list.schedules:
if schedule.name == self.name:
# Update self.schedule_id:
if self.schedule_id is not None:
if schedule.schedule_id == self.schedule_id:
return schedule
else:
self.schedule_id = schedule.schedule_id
return schedule
return None
def create_schedule(self):
try:
sched = netapp_utils.Schedule()
# if self.interval == 'time_interval':
sched.frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
hours=self.time_interval_hours,
minutes=self.time_interval_minutes)
# Create schedule
sched.name = self.name
sched.schedule_info = netapp_utils.ScheduleInfo(
volume_ids=self.volumes,
snapshot_name=self.snapshot_name,
retention=self.retention
)
sched.paused = self.paused
sched.recurring = self.recurring
sched.starting_date = self.starting_date
self.create_schedule_result = self.sfe.create_schedule(schedule=sched)
except Exception as e:
self.module.fail_json(msg='Error creating schedule %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def delete_schedule(self):
try:
get_schedule_result = self.sfe.get_schedule(schedule_id=self.schedule_id)
sched = get_schedule_result.schedule
sched.to_be_deleted = True
self.sfe.modify_schedule(schedule=sched)
except Exception as e:
self.module.fail_json(msg='Error deleting schedule %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def update_schedule(self):
try:
get_schedule_result = self.sfe.get_schedule(schedule_id=self.schedule_id)
sched = get_schedule_result.schedule
# Update schedule properties
# if self.interval == 'time_interval':
temp_frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
hours=self.time_interval_hours,
minutes=self.time_interval_minutes)
if sched.frequency.days != temp_frequency.days or \
sched.frequency.hours != temp_frequency.hours \
or sched.frequency.minutes != temp_frequency.minutes:
sched.frequency = temp_frequency
sched.name = self.name
if self.volumes is not None:
sched.schedule_info.volume_ids = self.volumes
if self.retention is not None:
sched.schedule_info.retention = self.retention
if self.snapshot_name is not None:
sched.schedule_info.snapshot_name = self.snapshot_name
if self.paused is not None:
sched.paused = self.paused
if self.recurring is not None:
sched.recurring = self.recurring
if self.starting_date is not None:
sched.starting_date = self.starting_date
# Make API call
self.sfe.modify_schedule(schedule=sched)
except Exception as e:
self.module.fail_json(msg='Error updating schedule %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
schedule_exists = False
update_schedule = False
schedule_detail = self.get_schedule()
if schedule_detail:
schedule_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
# Check if we need to update the account
if self.retention is not None and schedule_detail.schedule_info.retention != self.retention:
update_schedule = True
changed = True
elif schedule_detail.name != self.name:
update_schedule = True
changed = True
elif self.snapshot_name is not None and schedule_detail.schedule_info.snapshot_name != self.snapshot_name:
update_schedule = True
changed = True
elif self.volumes is not None and schedule_detail.schedule_info.volume_ids != self.volumes:
update_schedule = True
changed = True
elif self.paused is not None and schedule_detail.paused != self.paused:
update_schedule = True
changed = True
elif self.recurring is not None and schedule_detail.recurring != self.recurring:
update_schedule = True
changed = True
elif self.starting_date is not None and schedule_detail.starting_date != self.starting_date:
update_schedule = True
changed = True
elif self.time_interval_minutes is not None or self.time_interval_hours is not None \
or self.time_interval_days is not None:
temp_frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
hours=self.time_interval_hours,
minutes=self.time_interval_minutes)
if schedule_detail.frequency.days != temp_frequency.days or \
schedule_detail.frequency.hours != temp_frequency.hours \
or schedule_detail.frequency.minutes != temp_frequency.minutes:
update_schedule = True
changed = True
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
# Skip changes
pass
else:
if self.state == 'present':
if not schedule_exists:
self.create_schedule()
elif update_schedule:
self.update_schedule()
elif self.state == 'absent':
self.delete_schedule()
if self.create_schedule_result is not None:
self.module.exit_json(changed=changed, schedule_id=self.create_schedule_result.schedule_id)
else:
self.module.exit_json(changed=changed)
def main():
v = SolidFireSnapShotSchedule()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
livid/v2ex | html5lib/tests/test_formfiller.py | 73 | 16437 | import sys
import unittest
from html5lib.filters.formfiller import SimpleFilter
class FieldStorage(dict):
def getlist(self, name):
l = self[name]
if isinstance(l, list):
return l
elif isinstance(l, tuple) or hasattr(l, '__iter__'):
return list(l)
return [l]
class TestCase(unittest.TestCase):
def runTest(self, input, formdata, expected):
try:
output = list(SimpleFilter(input, formdata))
except NotImplementedError, nie:
# Amnesty for those that confess...
print >>sys.stderr, "Not implemented:", str(nie)
else:
errorMsg = "\n".join(["\n\nInput:", str(input),
"\nForm data:", str(formdata),
"\nExpected:", str(expected),
"\nReceived:", str(output)])
self.assertEquals(output, expected, errorMsg)
def testSingleTextInputWithValue(self):
self.runTest(
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"text"), (u"name", u"foo"), (u"value", u"quux")]}],
FieldStorage({"foo": "bar"}),
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"text"), (u"name", u"foo"), (u"value", u"bar")]}])
def testSingleTextInputWithoutValue(self):
self.runTest(
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"text"), (u"name", u"foo")]}],
FieldStorage({"foo": "bar"}),
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"text"), (u"name", u"foo"), (u"value", u"bar")]}])
def testSingleCheckbox(self):
self.runTest(
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"bar")]}],
FieldStorage({"foo": "bar"}),
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"bar"), (u"checked", u"")]}])
def testSingleCheckboxShouldBeUnchecked(self):
self.runTest(
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"quux")]}],
FieldStorage({"foo": "bar"}),
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"quux")]}])
def testSingleCheckboxCheckedByDefault(self):
self.runTest(
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"bar"), (u"checked", u"")]}],
FieldStorage({"foo": "bar"}),
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"bar"), (u"checked", u"")]}])
def testSingleCheckboxCheckedByDefaultShouldBeUnchecked(self):
self.runTest(
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"quux"), (u"checked", u"")]}],
FieldStorage({"foo": "bar"}),
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"quux")]}])
def testSingleTextareaWithValue(self):
self.runTest(
[{"type": u"StartTag", "name": u"textarea", "data": [(u"name", u"foo")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"textarea", "data": []}],
FieldStorage({"foo": "bar"}),
[{"type": u"StartTag", "name": u"textarea", "data": [(u"name", u"foo")]},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"textarea", "data": []}])
def testSingleTextareaWithoutValue(self):
self.runTest(
[{"type": u"StartTag", "name": u"textarea", "data": [(u"name", u"foo")]},
{"type": u"EndTag", "name": u"textarea", "data": []}],
FieldStorage({"foo": "bar"}),
[{"type": u"StartTag", "name": u"textarea", "data": [(u"name", u"foo")]},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"textarea", "data": []}])
def testSingleSelectWithValue(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "bar"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar"), (u"selected", u"")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectWithValueShouldBeUnselected(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "quux"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectWithoutValue(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "bar"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"selected", u"")]},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectWithoutValueShouldBeUnselected(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "quux"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectTwoOptionsWithValue(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "bar"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar"), (u"selected", u"")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectTwoOptionsWithValueShouldBeUnselected(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"baz")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "quux"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"baz")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectTwoOptionsWithoutValue(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "bar"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"selected", u"")]},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectTwoOptionsWithoutValueShouldBeUnselected(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"baz"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "quux"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"baz"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectMultiple(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo"), (u"multiple", u"")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": ["bar", "quux"]}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo"), (u"multiple", u"")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar"), (u"selected", u"")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux"), (u"selected", u"")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testTwoSelect(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []},
{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": ["bar", "quux"]}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar"), (u"selected", u"")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []},
{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux"), (u"selected", u"")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def buildTestSuite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == "__main__":
main()
| bsd-3-clause |
danielhers/ucca | scripts/standard_to_pickle.py | 1 | 1276 | #!/usr/bin/env python3
import sys
import argparse
import os
from tqdm import tqdm
from ucca.ioutil import file2passage, passage2file, external_write_mode
desc = """Parses an XML in UCCA standard format, and writes them in binary Pickle format."""
def main(args):
os.makedirs(args.outdir, exist_ok=True)
for filename in tqdm(args.filenames, desc="Converting", unit=" passages"):
if args.verbose:
with external_write_mode():
print("Reading passage '%s'..." % filename, file=sys.stderr)
passage = file2passage(filename)
basename = os.path.splitext(os.path.basename(filename))[0]
outfile = args.outdir + os.path.sep + basename + ".pickle"
if args.verbose:
with external_write_mode():
print("Writing file '%s'..." % outfile, file=sys.stderr)
passage2file(passage, outfile, binary=True)
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description=desc)
argparser.add_argument('filenames', nargs='+', help="XML file names to convert")
argparser.add_argument('-o', '--outdir', default='.', help="output directory")
argparser.add_argument('-v', '--verbose', action="store_true", help="verbose output")
main(argparser.parse_args())
| gpl-3.0 |
anryko/ansible | test/units/modules/cloud/xenserver/conftest.py | 24 | 2539 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import sys
import importlib
import pytest
from .FakeAnsibleModule import FakeAnsibleModule
@pytest.fixture
def fake_ansible_module(request):
"""Returns fake AnsibleModule with fake module params."""
if hasattr(request, 'param'):
return FakeAnsibleModule(request.param)
else:
params = {
"hostname": "somehost",
"username": "someuser",
"password": "somepwd",
"validate_certs": True,
}
return FakeAnsibleModule(params)
@pytest.fixture(autouse=True)
def XenAPI():
"""Imports and returns fake XenAPI module."""
# Import of fake XenAPI module is wrapped by fixture so that it does not
# affect other unit tests which could potentialy also use XenAPI module.
# First we use importlib.import_module() to import the module and assign
# it to a local symbol.
fake_xenapi = importlib.import_module('units.modules.cloud.xenserver.FakeXenAPI')
# Now we populate Python module cache with imported fake module using the
# original module name (XenAPI). That way, any 'import XenAPI' statement
# will just load already imported fake module from the cache.
sys.modules['XenAPI'] = fake_xenapi
return fake_xenapi
@pytest.fixture
def xenserver_guest_info(XenAPI):
"""Imports and returns xenserver_guest_info module."""
# Since we are wrapping fake XenAPI module inside a fixture, all modules
# that depend on it have to be imported inside a test function. To make
# this easier to handle and remove some code repetition, we wrap the import
# of xenserver_guest_info module with a fixture.
from ansible.modules.cloud.xenserver import xenserver_guest_info
return xenserver_guest_info
@pytest.fixture
def xenserver_guest_powerstate(XenAPI):
"""Imports and returns xenserver_guest_powerstate module."""
# Since we are wrapping fake XenAPI module inside a fixture, all modules
# that depend on it have to be imported inside a test function. To make
# this easier to handle and remove some code repetition, we wrap the import
# of xenserver_guest_powerstate module with a fixture.
from ansible.modules.cloud.xenserver import xenserver_guest_powerstate
return xenserver_guest_powerstate
| gpl-3.0 |
opendroid-Team/enigma2-4.1 | lib/python/Screens/Scart.py | 126 | 1771 | from Screen import Screen
from MessageBox import MessageBox
from Components.AVSwitch import AVSwitch
from Tools import Notifications
class Scart(Screen):
def __init__(self, session, start_visible=True):
Screen.__init__(self, session)
self.msgBox = None
self.notificationVisible = None
self.avswitch = AVSwitch()
if start_visible:
self.onExecBegin.append(self.showMessageBox)
self.msgVisible = None
else:
self.msgVisible = False
def showMessageBox(self):
if self.msgVisible is None:
self.onExecBegin.remove(self.showMessageBox)
self.msgVisible = False
if not self.msgVisible:
self.msgVisible = True
self.avswitch.setInput("SCART")
if not self.session.in_exec:
self.notificationVisible = True
Notifications.AddNotificationWithCallback(self.MsgBoxClosed, MessageBox, _("If you see this, something is wrong with\nyour scart connection. Press OK to return."), MessageBox.TYPE_ERROR, msgBoxID = "scart_msgbox")
else:
self.msgBox = self.session.openWithCallback(self.MsgBoxClosed, MessageBox, _("If you see this, something is wrong with\nyour scart connection. Press OK to return."), MessageBox.TYPE_ERROR)
def MsgBoxClosed(self, *val):
self.msgBox = None
self.switchToTV()
def switchToTV(self, *val):
if self.msgVisible:
if self.msgBox:
self.msgBox.close() # ... MsgBoxClosed -> switchToTV again..
return
self.avswitch.setInput("ENCODER")
self.msgVisible = False
if self.notificationVisible:
self.avswitch.setInput("ENCODER")
self.notificationVisible = False
for notification in Notifications.current_notifications:
try:
if notification[1].msgBoxID == "scart_msgbox":
notification[1].close()
except:
print "other notification is open. try another one."
| gpl-2.0 |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/io/tests/parser/common.py | 1 | 41697 | # -*- coding: utf-8 -*-
import csv
import os
import platform
import re
import sys
from datetime import datetime
import nose
import numpy as np
from numpy.testing.decorators import slow
from pandas.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import(StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.io.common import DtypeWarning, EmptyDataError, URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# C parser: supports only length-1 decimals
# Python parser: 'decimal' not supported yet
self.assertRaises(ValueError, self.read_csv,
StringIO(data), decimal='')
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_multiple_skts_example(self):
# TODO: Complete this
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11." # noqa
pass
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assertRaisesRegexp(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assertRaisesRegexp(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assertRaisesRegexp(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assertRaisesRegexp(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skip_footer is not supported with the C parser yet
if self.engine == 'python':
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assertRaisesRegexp(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skip_footer=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_duplicate_columns(self):
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
for method in ('read_csv', 'read_table'):
# check default behavior
df = getattr(self, method)(StringIO(data), sep=',')
self.assertEqual(list(df.columns),
['A', 'A.1', 'B', 'B.1', 'B.2'])
df = getattr(self, method)(StringIO(data), sep=',',
mangle_dupe_cols=False)
self.assertEqual(list(df.columns),
['A', 'A', 'B', 'B', 'B'])
df = getattr(self, method)(StringIO(data), sep=',',
mangle_dupe_cols=True)
self.assertEqual(list(df.columns),
['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
# TODO: complete this
df = self.read_csv(StringIO(data)) # noqa
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[
:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
# skip_footer is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/parser/data/salary.table.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table.csv')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(IOError, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
s.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
warning_type = False
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if self.engine == 'c' and self.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_integer_overflow_bug(self):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(ValueError, self.read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# see gh-3374, gh-6607
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_chunk_begins_with_newline_whitespace(self):
# see gh-10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# see gh-9735: this issue is C parser-specific (bug when
# parsing whitespace and characters at chunk boundary)
if self.engine == 'c':
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = self.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# see gh-10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_empty_with_multiindex(self):
# see gh-10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_float_parser(self):
# see gh-9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(self):
# see gh-12215
df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']),
('y', ['42e']), ('z', ['632E'])])
data = df.to_csv(index=False)
for prec in self.float_precision_choices:
df_roundtrip = self.read_csv(
StringIO(data), float_precision=prec)
tm.assert_frame_equal(df_roundtrip, df)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises(OverflowError, self.read_csv,
StringIO(data), converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# see gh-9535
expected = DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(self.read_csv(
StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
# 'as_recarray' is not supported yet for the Python parser
if self.engine == 'c':
result = self.read_csv(StringIO('foo,bar\n'),
nrows=10, as_recarray=True)
result = DataFrame(result[2], columns=result[1],
index=result[0])
tm.assert_frame_equal(DataFrame.from_records(
result), expected, check_index_type=False)
result = next(iter(self.read_csv(
StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# see gh-10728, gh-10548
# With skip_blank_lines = True
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# gh-10728: WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# gh-10548: EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
def test_uneven_lines_with_usecols(self):
# See gh-12203
csv = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10
"""
# make sure that an error is still thrown
# when the 'usecols' parameter is not provided
msg = "Expected \d+ fields in line \d+, saw \d+"
with tm.assertRaisesRegexp(ValueError, msg):
df = self.read_csv(StringIO(csv))
expected = DataFrame({
'a': [0, 3, 8],
'b': [1, 4, 9]
})
usecols = [0, 1]
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['a', 'b']
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_read_empty_with_usecols(self):
# See gh-12493
names = ['Dummy', 'X', 'Dummy_2']
usecols = names[1:2] # ['X']
# first, check to see that the response of
# parser when faced with no provided columns
# throws the correct error, with or without usecols
errmsg = "No columns to parse from file"
with tm.assertRaisesRegexp(EmptyDataError, errmsg):
self.read_csv(StringIO(''))
with tm.assertRaisesRegexp(EmptyDataError, errmsg):
self.read_csv(StringIO(''), usecols=usecols)
expected = DataFrame(columns=usecols, index=[0], dtype=np.float64)
df = self.read_csv(StringIO(',,'), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
expected = DataFrame(columns=usecols)
df = self.read_csv(StringIO(''), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
expected = DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# gh-8661, gh-8679: this should ignore six lines including
# lines with trailing whitespace and blank lines
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# gh-8983: test skipping set of rows after a row with trailing spaces
expected = DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_raise_on_sep_with_delim_whitespace(self):
# see gh-6607
data = 'a b c\n1 2 3'
with tm.assertRaisesRegexp(ValueError, 'you can only specify one'):
self.read_table(StringIO(data), sep='\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# see gh-9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_regex_separator(self):
# see gh-6607
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try: # engines are verbose in different ways
self.read_csv(StringIO(text), verbose=True)
if self.engine == 'c':
self.assertIn('Tokenization took:', buf.getvalue())
self.assertIn('Parser memory cleanup took:', buf.getvalue())
else: # Python engine
self.assertEqual(buf.getvalue(),
'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try: # engines are verbose in different ways
self.read_csv(StringIO(text), verbose=True, index_col=0)
if self.engine == 'c':
self.assertIn('Tokenization took:', buf.getvalue())
self.assertIn('Parser memory cleanup took:', buf.getvalue())
else: # Python engine
self.assertEqual(buf.getvalue(),
'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
if self.engine == 'c':
tm.assertRaises(Exception, self.read_table,
f, squeeze=True, header=None)
else:
result = self.read_table(f, squeeze=True, header=None)
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
| mit |
chrishas35/django-travis-ci | django/contrib/localflavor/py/py_department.py | 90 | 1129 | # -*- coding: utf-8 -*-
# http://www.statoids.com/upy.html
DEPARTMENT_CHOICES = (
('AG', u'Alto Paraguay'),
('AA', u'Alto Paraná'),
('AM', u'Amambay'),
('AS', u'Asunción'),
('BQ', u'Boquerón'),
('CG', u'Caaguazú'),
('CZ', u'Caazapá'),
('CY', u'Canindeyú'),
('CE', u'Central'),
('CN', u'Concepción'),
('CR', u'Cordillera'),
('GU', u'Guairá'),
('IT', u'Itapúa'),
('MI', u'Misiones'),
('NE', u'Ñeembucú'),
('PG', u'Paraguarí'),
('PH', u'Pdte. Hayes'),
('SP', u'San Pedro'),
)
DEPARTMENT_ROMAN_CHOICES = (
('CN', u'I Concepción'),
('SP', u'II San Pedro'),
('CR', u'III Cordillera'),
('GU', u'IV Guairá'),
('CG', u'V Caaguazú'),
('CZ', u'VI Caazapá'),
('IT', u'VII Itapúa'),
('MI', u'VIII Misiones'),
('PG', u'IX Paraguarí'),
('AA', u'X Alto Paraná'),
('CE', u'XI Central'),
('NE', u'XII Ñeembucú'),
('AM', u'XIII Amambay'),
('CY', u'XIV Canindeyú'),
('PH', u'XV Pdte. Hayes'),
('AG', u'XVI Alto Paraguay'),
('BQ', u'XVII Boquerón'),
('AS', u'XVIII Asunción'),
)
| bsd-3-clause |
jalonsob/Informes | grimoirelib_alch/query/scr.py | 4 | 6699 | #! /usr/bin/python
# -*- coding: utf-8 -*-
## Copyright (C) 2014 Bitergia
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##
## Package to deal with queries for SCR (source code review) data
## from *Grimoire (Bicho databases with Gerrit backend)
##
## Authors:
## Jesus M. Gonzalez-Barahona <jgb@bitergia.com>
##
from grimoirelib_alch.query.its import (
DB as ITSDB, Query as ITSQuery
)
from grimoirelib_alch.query.common import GrimoireDatabase
from sqlalchemy import func, Column, Integer, ForeignKey, or_
from sqlalchemy.sql import label
class DB (ITSDB):
"""Class for dealing with SCR (Bicho-Gerrit) databases.
Inherits from the class to deal with ITS databases, because it
is basically the same (factoring out tables produced by the
Gerrit backend).
"""
def _query_cls(self):
"""Return que defauld Query class for this database
Returns
-------
GrimoireQuery: default Query class.
"""
return Query
def _create_tables(self, tables = None, tables_id = None):
"""Create all SQLAlchemy tables.
Builds a SQLAlchemy class per SQL table, by using _table().
It assumes self.Base, self.schema and self.schema_id are already
set (see super.__init__() code).
Uses super._create_tables() to create ITS basic tables, which
are shared by SCR databases.
"""
ITSDB._create_tables(self, tables, tables_id)
DB.IssuesExtGerrit = GrimoireDatabase._table (
bases = (self.Base,), name = 'IssuesExtGerrit',
tablename = 'issues_ext_gerrit',
schemaname = self.schema
)
class Query (ITSQuery):
"""Class for dealing with SCR queries.
Inherits all methods for dealing with ITS queries
"""
def select_changes (self, count = False, distinct = False,
changers = False):
"""Select fields from changes
Include fields in Changes table as columns in SELECT. If changers
is True, include who did the change.
If distinct is True, select distinct change ids.
If count is True, select the number of change ids.
Parameters
----------
count: bool
Produce count of changes instead of list
distinct: bool
Select distinct change ids.
changers: bool
Include id of changer as a column (or not).
Returns
-------
Query
Including new columns in SELECT
"""
id_field = DB.Changes.id
if distinct:
id_field = func.distinct(id_field)
if count:
id_field = func.count(id_field)
query = self.add_columns (
label("id", id_field),
label("issue_id", DB.Changes.issue_id),
label("field", DB.Changes.field),
label("patchset", DB.Changes.old_value),
label("value", DB.Changes.new_value),
label("date", DB.Changes.changed_on)
)
if changers:
query = query.add_columns (
label("changed_by", DB.Changes.changed_by))
return query
def select_issues (self, count = False, distinct = False,
summary = False, submitter = False):
"""Select fields from issues
Include fields in Issues table as columns in SELECT. If summary
and/or submitter is True, include the summary and/or the
submitter id.
If distinct is True, select distinct change ids.
If count is True, select the number of change ids.
Parameters
----------
count: bool
Produce count of changes instead of list
distinct: bool
Select distinct change ids.
summary: bool
Include summary as a column (or not).
submitter: bool
Include submitter id as a column (or not).
Returns
-------
Query
Including new columns in SELECT
"""
id_field = DB.Issues.id
if distinct:
id_field = func.distinct(id_field)
if count:
id_field = func.count(id_field)
query = self.add_columns (
label("id", id_field),
label("issue", DB.Issues.issue),
label("status", DB.Issues.status),
label("date", DB.Issues.submitted_on)
)
if summary:
query = query.add_columns (
label("summary", DB.Changes.summary))
if submitter:
query = query.add_columns (
label("submitter", DB.Changes.submitted_by))
return query
if __name__ == "__main__":
from grimoirelib_alch.aux.standalone import stdout_utf8, print_banner
stdout_utf8()
database = DB (url = 'mysql://jgb:XXX@localhost/',
schema = 'reviews_wikimedia_2014_09_11',
schema_id = 'vizgrimoire_cvsanaly')
session = database.build_session(Query, echo = False)
#---------------------------------
print_banner ("List of openers")
res = session.query() \
.select_personsdata("openers") \
.group_by_person()
print res
for row in res.limit(10).all():
print row.person_id, row.name, row.email
res = session.query() \
.select_changes()
print res
for row in res.limit(10).all():
print row.id, row.issue_id, row.patchset, row.field, row.value, row.date
res = session.query() \
.select_issues()
print res
for row in res.limit(10).all():
print row.id, row.issue, row.status, row.date
res = session.query() \
.select_changes() \
.filter (DB.Changes.field == "Status") \
.filter (or_ (DB.Changes.new_value == "MERGED",
DB.Changes.new_value == "ABANDONED"))
print res
for row in res.limit(10).all():
print row.id, row.issue_id, row.patchset, row.field, row.value, row.date
| gpl-3.0 |
pleaseproject/python-for-android | python-modules/twisted/twisted/application/internet.py | 49 | 11934 | # -*- test-case-name: twisted.application.test.test_internet,twisted.test.test_application,twisted.test.test_cooperator -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Reactor-based Services
Here are services to run clients, servers and periodic services using
the reactor.
If you want to run a server service, L{StreamServerEndpointService} defines a
service that can wrap an arbitrary L{IStreamServerEndpoint} as an L{IService}.
See also L{twisted.internet.strports.service} for constructing one of these
directly from a descriptive string.
Additionally, this module (dynamically) defines various Service subclasses that
let you represent clients and servers in a Service hierarchy. Endpoints APIs
should be preferred for stream server services, but since those APIs do not yet
exist for clients or datagram services, many of these are still useful.
They are as follows::
TCPServer, TCPClient,
UNIXServer, UNIXClient,
SSLServer, SSLClient,
UDPServer, UDPClient,
UNIXDatagramServer, UNIXDatagramClient,
MulticastServer
These classes take arbitrary arguments in their constructors and pass
them straight on to their respective reactor.listenXXX or
reactor.connectXXX calls.
For example, the following service starts a web server on port 8080:
C{TCPServer(8080, server.Site(r))}. See the documentation for the
reactor.listen/connect* methods for more information.
"""
import warnings
from twisted.python import log
from twisted.application import service
from twisted.internet import task
from twisted.internet.defer import CancelledError
def _maybeGlobalReactor(maybeReactor):
"""
@return: the argument, or the global reactor if the argument is C{None}.
"""
if maybeReactor is None:
from twisted.internet import reactor
return reactor
else:
return maybeReactor
class _VolatileDataService(service.Service):
volatile = []
def __getstate__(self):
d = service.Service.__getstate__(self)
for attr in self.volatile:
if attr in d:
del d[attr]
return d
class _AbstractServer(_VolatileDataService):
"""
@cvar volatile: list of attribute to remove from pickling.
@type volatile: C{list}
@ivar method: the type of method to call on the reactor, one of B{TCP},
B{UDP}, B{SSL} or B{UNIX}.
@type method: C{str}
@ivar reactor: the current running reactor.
@type reactor: a provider of C{IReactorTCP}, C{IReactorUDP},
C{IReactorSSL} or C{IReactorUnix}.
@ivar _port: instance of port set when the service is started.
@type _port: a provider of C{IListeningPort}.
"""
volatile = ['_port']
method = None
reactor = None
_port = None
def __init__(self, *args, **kwargs):
self.args = args
if 'reactor' in kwargs:
self.reactor = kwargs.pop("reactor")
self.kwargs = kwargs
def privilegedStartService(self):
service.Service.privilegedStartService(self)
self._port = self._getPort()
def startService(self):
service.Service.startService(self)
if self._port is None:
self._port = self._getPort()
def stopService(self):
service.Service.stopService(self)
# TODO: if startup failed, should shutdown skip stopListening?
# _port won't exist
if self._port is not None:
d = self._port.stopListening()
del self._port
return d
def _getPort(self):
"""
Wrapper around the appropriate listen method of the reactor.
@return: the port object returned by the listen method.
@rtype: an object providing L{IListeningPort}.
"""
return getattr(_maybeGlobalReactor(self.reactor),
'listen%s' % (self.method,))(*self.args, **self.kwargs)
class _AbstractClient(_VolatileDataService):
"""
@cvar volatile: list of attribute to remove from pickling.
@type volatile: C{list}
@ivar method: the type of method to call on the reactor, one of B{TCP},
B{UDP}, B{SSL} or B{UNIX}.
@type method: C{str}
@ivar reactor: the current running reactor.
@type reactor: a provider of C{IReactorTCP}, C{IReactorUDP},
C{IReactorSSL} or C{IReactorUnix}.
@ivar _connection: instance of connection set when the service is started.
@type _connection: a provider of C{IConnector}.
"""
volatile = ['_connection']
method = None
reactor = None
_connection = None
def __init__(self, *args, **kwargs):
self.args = args
if 'reactor' in kwargs:
self.reactor = kwargs.pop("reactor")
self.kwargs = kwargs
def startService(self):
service.Service.startService(self)
self._connection = self._getConnection()
def stopService(self):
service.Service.stopService(self)
if self._connection is not None:
self._connection.disconnect()
del self._connection
def _getConnection(self):
"""
Wrapper around the appropriate connect method of the reactor.
@return: the port object returned by the connect method.
@rtype: an object providing L{IConnector}.
"""
return getattr(_maybeGlobalReactor(self.reactor),
'connect%s' % (self.method,))(*self.args, **self.kwargs)
_doc={
'Client':
"""Connect to %(tran)s
Call reactor.connect%(method)s when the service starts, with the
arguments given to the constructor.
""",
'Server':
"""Serve %(tran)s clients
Call reactor.listen%(method)s when the service starts, with the
arguments given to the constructor. When the service stops,
stop listening. See twisted.internet.interfaces for documentation
on arguments to the reactor method.
""",
}
import new
for tran in 'TCP UNIX SSL UDP UNIXDatagram Multicast'.split():
for side in 'Server Client'.split():
if tran == "Multicast" and side == "Client":
continue
base = globals()['_Abstract'+side]
method = {'Generic': 'With'}.get(tran, tran)
doc = _doc[side]%vars()
klass = new.classobj(tran+side, (base,),
{'method': method, '__doc__': doc})
globals()[tran+side] = klass
class GenericServer(_AbstractServer):
"""
Serve Generic clients
Call reactor.listenWith when the service starts, with the arguments given to
the constructor. When the service stops, stop listening. See
twisted.internet.interfaces for documentation on arguments to the reactor
method.
This service is deprecated (because reactor.listenWith is deprecated).
"""
method = 'With'
def __init__(self, *args, **kwargs):
warnings.warn(
'GenericServer was deprecated in Twisted 10.1.',
category=DeprecationWarning,
stacklevel=2)
_AbstractServer.__init__(self, *args, **kwargs)
class GenericClient(_AbstractClient):
"""
Connect to Generic.
Call reactor.connectWith when the service starts, with the arguments given
to the constructor.
This service is deprecated (because reactor.connectWith is deprecated).
"""
method = 'With'
def __init__(self, *args, **kwargs):
warnings.warn(
'GenericClient was deprecated in Twisted 10.1.',
category=DeprecationWarning,
stacklevel=2)
_AbstractClient.__init__(self, *args, **kwargs)
class TimerService(_VolatileDataService):
"""Service to periodically call a function
Every C{step} seconds call the given function with the given arguments.
The service starts the calls when it starts, and cancels them
when it stops.
"""
volatile = ['_loop']
def __init__(self, step, callable, *args, **kwargs):
self.step = step
self.call = (callable, args, kwargs)
def startService(self):
service.Service.startService(self)
callable, args, kwargs = self.call
# we have to make a new LoopingCall each time we're started, because
# an active LoopingCall remains active when serialized. If
# LoopingCall were a _VolatileDataService, we wouldn't need to do
# this.
self._loop = task.LoopingCall(callable, *args, **kwargs)
self._loop.start(self.step, now=True).addErrback(self._failed)
def _failed(self, why):
# make a note that the LoopingCall is no longer looping, so we don't
# try to shut it down a second time in stopService. I think this
# should be in LoopingCall. -warner
self._loop.running = False
log.err(why)
def stopService(self):
if self._loop.running:
self._loop.stop()
return service.Service.stopService(self)
class CooperatorService(service.Service):
"""
Simple L{service.IService} which starts and stops a L{twisted.internet.task.Cooperator}.
"""
def __init__(self):
self.coop = task.Cooperator(started=False)
def coiterate(self, iterator):
return self.coop.coiterate(iterator)
def startService(self):
self.coop.start()
def stopService(self):
self.coop.stop()
class StreamServerEndpointService(service.Service, object):
"""
A L{StreamServerEndpointService} is an L{IService} which runs a server on a
listening port described by an L{IStreamServerEndpoint}.
@ivar factory: A server factory which will be used to listen on the
endpoint.
@ivar endpoint: An L{IStreamServerEndpoint} provider which will be used to
listen when the service starts.
@ivar _waitingForPort: a Deferred, if C{listen} has yet been invoked on the
endpoint, otherwise None.
@ivar _raiseSynchronously: Defines error-handling behavior for the case
where C{listen(...)} raises an exception before C{startService} or
C{privilegedStartService} have completed.
@type _raiseSynchronously: C{bool}
@since: 10.2
"""
_raiseSynchronously = None
def __init__(self, endpoint, factory):
self.endpoint = endpoint
self.factory = factory
self._waitingForPort = None
def privilegedStartService(self):
"""
Start listening on the endpoint.
"""
service.Service.privilegedStartService(self)
self._waitingForPort = self.endpoint.listen(self.factory)
raisedNow = []
def handleIt(err):
if self._raiseSynchronously:
raisedNow.append(err)
elif not err.check(CancelledError):
log.err(err)
self._waitingForPort.addErrback(handleIt)
if raisedNow:
raisedNow[0].raiseException()
def startService(self):
"""
Start listening on the endpoint, unless L{privilegedStartService} got
around to it already.
"""
service.Service.startService(self)
if self._waitingForPort is None:
self.privilegedStartService()
def stopService(self):
"""
Stop listening on the port if it is already listening, otherwise,
cancel the attempt to listen.
@return: a L{Deferred} which fires with C{None} when the port has
stopped listening.
"""
self._waitingForPort.cancel()
def stopIt(port):
if port is not None:
return port.stopListening()
d = self._waitingForPort.addCallback(stopIt)
def stop(passthrough):
self.running = False
return passthrough
d.addBoth(stop)
return d
__all__ = (['TimerService', 'CooperatorService', 'MulticastServer',
'StreamServerEndpointService'] +
[tran+side
for tran in 'Generic TCP UNIX SSL UDP UNIXDatagram'.split()
for side in 'Server Client'.split()])
| apache-2.0 |
emilroz/openmicroscopy | components/tools/OmeroPy/src/runTables.py | 5 | 1235 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# OMERO Tables Runner
# Copyright 2009 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
if __name__ == "__main__":
import sys
import Ice
import omero
import omero.clients
import omero.tables
from omero.util import Dependency
# Logging hack
omero.tables.TablesI.__module__ = "omero.tables"
omero.tables.TableI.__module__ = "omero.tables"
class TablesDependency(Dependency):
def __init__(self):
Dependency.__init__(self, "tables")
def get_version(self, target):
self.target = target
return "%s, hdf=%s" % (target.__version__, self.optional("hdf5", 1))
def optional(self, key, idx):
try:
x = self.target.whichLibVersion(key)
if x is not None:
return x[idx]
else:
return "unknown"
except:
return "error"
app = omero.util.Server(omero.tables.TablesI, "TablesAdapter", Ice.Identity("Tables", ""),
dependencies=(Dependency("numpy"), TablesDependency()))
sys.exit(app.main(sys.argv))
| gpl-2.0 |
florian-dacosta/OpenUpgrade | addons/base_import/tests/test_cases.py | 101 | 13059 | # -*- encoding: utf-8 -*-
import unittest2
from openerp.tests.common import TransactionCase
from .. import models
ID_FIELD = {'id': 'id', 'name': 'id', 'string': "External ID", 'required': False, 'fields': []}
def make_field(name='value', string='unknown', required=False, fields=[]):
return [
ID_FIELD,
{'id': name, 'name': name, 'string': string, 'required': required, 'fields': fields},
]
class test_basic_fields(TransactionCase):
def get_fields(self, field):
return self.registry('base_import.import')\
.get_fields(self.cr, self.uid, 'base_import.tests.models.' + field)
def test_base(self):
""" A basic field is not required """
self.assertEqual(self.get_fields('char'), make_field())
def test_required(self):
""" Required fields should be flagged (so they can be fill-required) """
self.assertEqual(self.get_fields('char.required'), make_field(required=True))
def test_readonly(self):
""" Readonly fields should be filtered out"""
self.assertEqual(self.get_fields('char.readonly'), [ID_FIELD])
def test_readonly_states(self):
""" Readonly fields with states should not be filtered out"""
self.assertEqual(self.get_fields('char.states'), make_field())
def test_readonly_states_noreadonly(self):
""" Readonly fields with states having nothing to do with
readonly should still be filtered out"""
self.assertEqual(self.get_fields('char.noreadonly'), [ID_FIELD])
def test_readonly_states_stillreadonly(self):
""" Readonly fields with readonly states leaving them readonly
always... filtered out"""
self.assertEqual(self.get_fields('char.stillreadonly'), [ID_FIELD])
def test_m2o(self):
""" M2O fields should allow import of themselves (name_get),
their id and their xid"""
self.assertEqual(self.get_fields('m2o'), make_field(fields=[
{'id': 'value', 'name': 'id', 'string': 'External ID', 'required': False, 'fields': []},
{'id': 'value', 'name': '.id', 'string': 'Database ID', 'required': False, 'fields': []},
]))
def test_m2o_required(self):
""" If an m2o field is required, its three sub-fields are
required as well (the client has to handle that: requiredness
is id-based)
"""
self.assertEqual(self.get_fields('m2o.required'), make_field(required=True, fields=[
{'id': 'value', 'name': 'id', 'string': 'External ID', 'required': True, 'fields': []},
{'id': 'value', 'name': '.id', 'string': 'Database ID', 'required': True, 'fields': []},
]))
class test_o2m(TransactionCase):
def get_fields(self, field):
return self.registry('base_import.import')\
.get_fields(self.cr, self.uid, 'base_import.tests.models.' + field)
def test_shallow(self):
self.assertEqual(self.get_fields('o2m'), make_field(fields=[
{'id': 'id', 'name': 'id', 'string': 'External ID', 'required': False, 'fields': []},
# FIXME: should reverse field be ignored?
{'id': 'parent_id', 'name': 'parent_id', 'string': 'unknown', 'required': False, 'fields': [
{'id': 'parent_id', 'name': 'id', 'string': 'External ID', 'required': False, 'fields': []},
{'id': 'parent_id', 'name': '.id', 'string': 'Database ID', 'required': False, 'fields': []},
]},
{'id': 'value', 'name': 'value', 'string': 'unknown', 'required': False, 'fields': []},
]))
class test_match_headers_single(TransactionCase):
def test_match_by_name(self):
match = self.registry('base_import.import')._match_header(
'f0', [{'name': 'f0'}], {})
self.assertEqual(match, [{'name': 'f0'}])
def test_match_by_string(self):
match = self.registry('base_import.import')._match_header(
'some field', [{'name': 'bob', 'string': "Some Field"}], {})
self.assertEqual(match, [{'name': 'bob', 'string': "Some Field"}])
def test_nomatch(self):
match = self.registry('base_import.import')._match_header(
'should not be', [{'name': 'bob', 'string': "wheee"}], {})
self.assertEqual(match, [])
def test_recursive_match(self):
f = {
'name': 'f0',
'string': "My Field",
'fields': [
{'name': 'f0', 'string': "Sub field 0", 'fields': []},
{'name': 'f1', 'string': "Sub field 2", 'fields': []},
]
}
match = self.registry('base_import.import')._match_header(
'f0/f1', [f], {})
self.assertEqual(match, [f, f['fields'][1]])
def test_recursive_nomatch(self):
""" Match first level, fail to match second level
"""
f = {
'name': 'f0',
'string': "My Field",
'fields': [
{'name': 'f0', 'string': "Sub field 0", 'fields': []},
{'name': 'f1', 'string': "Sub field 2", 'fields': []},
]
}
match = self.registry('base_import.import')._match_header(
'f0/f2', [f], {})
self.assertEqual(match, [])
class test_match_headers_multiple(TransactionCase):
def test_noheaders(self):
self.assertEqual(
self.registry('base_import.import')._match_headers(
[], [], {}),
(None, None)
)
def test_nomatch(self):
self.assertEqual(
self.registry('base_import.import')._match_headers(
iter([
['foo', 'bar', 'baz', 'qux'],
['v1', 'v2', 'v3', 'v4'],
]),
[],
{'headers': True}),
(
['foo', 'bar', 'baz', 'qux'],
dict.fromkeys(range(4))
)
)
def test_mixed(self):
self.assertEqual(
self.registry('base_import.import')._match_headers(
iter(['foo bar baz qux/corge'.split()]),
[
{'name': 'bar', 'string': 'Bar'},
{'name': 'bob', 'string': 'Baz'},
{'name': 'qux', 'string': 'Qux', 'fields': [
{'name': 'corge', 'fields': []},
]}
],
{'headers': True}),
(['foo', 'bar', 'baz', 'qux/corge'], {
0: None,
1: ['bar'],
2: ['bob'],
3: ['qux', 'corge'],
})
)
class test_preview(TransactionCase):
def make_import(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'res.users',
'file': u"로그인,언어\nbob,1\n".encode('euc_kr'),
})
return Import, id
def test_encoding(self):
Import, id = self.make_import()
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': '"',
'separator': ',',
})
self.assertTrue('error' in result)
def test_csv_errors(self):
Import, id = self.make_import()
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': 'foo',
'separator': ',',
'encoding': 'euc_kr',
})
self.assertTrue('error' in result)
def test_csv_errors(self):
Import, id = self.make_import()
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': '"',
'separator': 'bob',
'encoding': 'euc_kr',
})
self.assertTrue('error' in result)
def test_success(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
'bar,3,4\n'
'qux,5,6\n'
})
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': '"',
'separator': ',',
'headers': True,
})
self.assertEqual(result['matches'], {0: ['name'], 1: ['somevalue'], 2: None})
self.assertEqual(result['headers'], ['name', 'Some Value', 'Counter'])
# Order depends on iteration order of fields_get
self.assertItemsEqual(result['fields'], [
{'id': 'id', 'name': 'id', 'string': 'External ID', 'required':False, 'fields': []},
{'id': 'name', 'name': 'name', 'string': 'Name', 'required':False, 'fields': []},
{'id': 'somevalue', 'name': 'somevalue', 'string': 'Some Value', 'required':True, 'fields': []},
{'id': 'othervalue', 'name': 'othervalue', 'string': 'Other Variable', 'required':False, 'fields': []},
])
self.assertEqual(result['preview'], [
['foo', '1', '2'],
['bar', '3', '4'],
['qux', '5', '6'],
])
# Ensure we only have the response fields we expect
self.assertItemsEqual(result.keys(), ['matches', 'headers', 'fields', 'preview'])
class test_convert_import_data(TransactionCase):
""" Tests conversion of base_import.import input into data which
can be fed to Model.import_data
"""
def test_all(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
'bar,3,4\n'
'qux,5,6\n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', 'somevalue', 'othervalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'somevalue', 'othervalue'])
self.assertItemsEqual(data, [
('foo', '1', '2'),
('bar', '3', '4'),
('qux', '5', '6'),
])
def test_filtered(self):
""" If ``False`` is provided as field mapping for a column,
that column should be removed from importable data
"""
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
'bar,3,4\n'
'qux,5,6\n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', False, 'othervalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'othervalue'])
self.assertItemsEqual(data, [
('foo', '2'),
('bar', '4'),
('qux', '6'),
])
def test_norow(self):
""" If a row is composed only of empty values (due to having
filtered out non-empty values from it), it should be removed
"""
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
',3,\n'
',5,6\n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', False, 'othervalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'othervalue'])
self.assertItemsEqual(data, [
('foo', '2'),
('', '6'),
])
def test_nofield(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
})
record = Import.browse(self.cr, self.uid, id)
self.assertRaises(
ValueError,
Import._convert_import_data,
record, [],
{'quoting': '"', 'separator': ',', 'headers': True,})
def test_falsefields(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
})
record = Import.browse(self.cr, self.uid, id)
self.assertRaises(
ValueError,
Import._convert_import_data,
record, [False, False, False],
{'quoting': '"', 'separator': ',', 'headers': True,})
| agpl-3.0 |
tgsd96/gargnotes | venv/lib/python2.7/site-packages/django/apps/config.py | 75 | 8224 | from importlib import import_module
import os
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from django.utils.module_loading import module_has_submodule
from django.utils._os import upath
MODELS_MODULE_NAME = 'models'
class AppConfig(object):
"""
Class representing a Django application and its configuration.
"""
def __init__(self, app_name, app_module):
# Full Python path to the application eg. 'django.contrib.admin'.
self.name = app_name
# Root module for the application eg. <module 'django.contrib.admin'
# from 'django/contrib/admin/__init__.pyc'>.
self.module = app_module
# The following attributes could be defined at the class level in a
# subclass, hence the test-and-set pattern.
# Last component of the Python path to the application eg. 'admin'.
# This value must be unique across a Django project.
if not hasattr(self, 'label'):
self.label = app_name.rpartition(".")[2]
# Human-readable name for the application eg. "Admin".
if not hasattr(self, 'verbose_name'):
self.verbose_name = self.label.title()
# Filesystem path to the application directory eg.
# u'/usr/lib/python2.7/dist-packages/django/contrib/admin'. Unicode on
# Python 2 and a str on Python 3.
if not hasattr(self, 'path'):
self.path = self._path_from_module(app_module)
# Module containing models eg. <module 'django.contrib.admin.models'
# from 'django/contrib/admin/models.pyc'>. Set by import_models().
# None if the application doesn't have a models module.
self.models_module = None
# Mapping of lower case model names to model classes. Initially set to
# None to prevent accidental access before import_models() runs.
self.models = None
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.label)
def _path_from_module(self, module):
"""Attempt to determine app's filesystem path from its module."""
# See #21874 for extended discussion of the behavior of this method in
# various cases.
# Convert paths to list because Python 3.3 _NamespacePath does not
# support indexing.
paths = list(getattr(module, '__path__', []))
if len(paths) != 1:
filename = getattr(module, '__file__', None)
if filename is not None:
paths = [os.path.dirname(filename)]
if len(paths) > 1:
raise ImproperlyConfigured(
"The app module %r has multiple filesystem locations (%r); "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module, paths))
elif not paths:
raise ImproperlyConfigured(
"The app module %r has no filesystem location, "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module,))
return upath(paths[0])
@classmethod
def create(cls, entry):
"""
Factory that creates an app config from an entry in INSTALLED_APPS.
"""
try:
# If import_module succeeds, entry is a path to an app module,
# which may specify an app config class with default_app_config.
# Otherwise, entry is a path to an app config class or an error.
module = import_module(entry)
except ImportError:
# Track that importing as an app module failed. If importing as an
# app config class fails too, we'll trigger the ImportError again.
module = None
mod_path, _, cls_name = entry.rpartition('.')
# Raise the original exception when entry cannot be a path to an
# app config class.
if not mod_path:
raise
else:
try:
# If this works, the app module specifies an app config class.
entry = module.default_app_config
except AttributeError:
# Otherwise, it simply uses the default app config class.
return cls(entry, module)
else:
mod_path, _, cls_name = entry.rpartition('.')
# If we're reaching this point, we must attempt to load the app config
# class located at <mod_path>.<cls_name>
# Avoid django.utils.module_loading.import_by_path because it
# masks errors -- it reraises ImportError as ImproperlyConfigured.
mod = import_module(mod_path)
try:
cls = getattr(mod, cls_name)
except AttributeError:
if module is None:
# If importing as an app module failed, that error probably
# contains the most informative traceback. Trigger it again.
import_module(entry)
else:
raise
# Check for obvious errors. (This check prevents duck typing, but
# it could be removed if it became a problem in practice.)
if not issubclass(cls, AppConfig):
raise ImproperlyConfigured(
"'%s' isn't a subclass of AppConfig." % entry)
# Obtain app name here rather than in AppClass.__init__ to keep
# all error checking for entries in INSTALLED_APPS in one place.
try:
app_name = cls.name
except AttributeError:
raise ImproperlyConfigured(
"'%s' must supply a name attribute." % entry)
# Ensure app_name points to a valid module.
app_module = import_module(app_name)
# Entry is a path to an app config class.
return cls(app_name, app_module)
def check_models_ready(self):
"""
Raises an exception if models haven't been imported yet.
"""
if self.models is None:
raise AppRegistryNotReady(
"Models for app '%s' haven't been imported yet." % self.label)
def get_model(self, model_name):
"""
Returns the model with the given case-insensitive model_name.
Raises LookupError if no model exists with this name.
"""
self.check_models_ready()
try:
return self.models[model_name.lower()]
except KeyError:
raise LookupError(
"App '%s' doesn't have a '%s' model." % (self.label, model_name))
def get_models(self, include_auto_created=False,
include_deferred=False, include_swapped=False):
"""
Returns an iterable of models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models created to satisfy deferred attribute queries,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
Keyword arguments aren't documented; they're a private API.
"""
self.check_models_ready()
for model in self.models.values():
if model._deferred and not include_deferred:
continue
if model._meta.auto_created and not include_auto_created:
continue
if model._meta.swapped and not include_swapped:
continue
yield model
def import_models(self, all_models):
# Dictionary of models for this app, primarily maintained in the
# 'all_models' attribute of the Apps this AppConfig is attached to.
# Injected as a parameter because it gets populated when models are
# imported, which might happen before populate() imports models.
self.models = all_models
if module_has_submodule(self.module, MODELS_MODULE_NAME):
models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME)
self.models_module = import_module(models_module_name)
def ready(self):
"""
Override this method in subclasses to run code when Django starts.
"""
| mit |
mhvk/numpy | numpy/typing/_callable.py | 3 | 12310 | """
A module with various ``typing.Protocol`` subclasses that implement
the ``__call__`` magic method.
See the `Mypy documentation`_ on protocols for more details.
.. _`Mypy documentation`: https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols
"""
from __future__ import annotations
import sys
from typing import (
Union,
TypeVar,
overload,
Any,
Tuple,
NoReturn,
TYPE_CHECKING,
)
from numpy import (
ndarray,
dtype,
generic,
bool_,
timedelta64,
number,
integer,
unsignedinteger,
signedinteger,
int8,
int_,
floating,
float64,
complexfloating,
complex128,
)
from ._nbit import _NBitInt, _NBitDouble
from ._scalars import (
_BoolLike_co,
_IntLike_co,
_FloatLike_co,
_NumberLike_co,
)
from . import NBitBase, _HAS_TYPING_EXTENSIONS
from ._generic_alias import NDArray
if sys.version_info >= (3, 8):
from typing import Protocol
elif _HAS_TYPING_EXTENSIONS:
from typing_extensions import Protocol
if TYPE_CHECKING or _HAS_TYPING_EXTENSIONS or sys.version_info >= (3, 8):
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
_2Tuple = Tuple[_T1, _T1]
_NBit1 = TypeVar("_NBit1", bound=NBitBase)
_NBit2 = TypeVar("_NBit2", bound=NBitBase)
_IntType = TypeVar("_IntType", bound=integer)
_FloatType = TypeVar("_FloatType", bound=floating)
_NumberType = TypeVar("_NumberType", bound=number)
_NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number)
_GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic)
class _BoolOp(Protocol[_GenericType_co]):
@overload
def __call__(self, __other: _BoolLike_co) -> _GenericType_co: ...
@overload # platform dependent
def __call__(self, __other: int) -> int_: ...
@overload
def __call__(self, __other: float) -> float64: ...
@overload
def __call__(self, __other: complex) -> complex128: ...
@overload
def __call__(self, __other: _NumberType) -> _NumberType: ...
class _BoolBitOp(Protocol[_GenericType_co]):
@overload
def __call__(self, __other: _BoolLike_co) -> _GenericType_co: ...
@overload # platform dependent
def __call__(self, __other: int) -> int_: ...
@overload
def __call__(self, __other: _IntType) -> _IntType: ...
class _BoolSub(Protocol):
# Note that `__other: bool_` is absent here
@overload
def __call__(self, __other: bool) -> NoReturn: ...
@overload # platform dependent
def __call__(self, __other: int) -> int_: ...
@overload
def __call__(self, __other: float) -> float64: ...
@overload
def __call__(self, __other: complex) -> complex128: ...
@overload
def __call__(self, __other: _NumberType) -> _NumberType: ...
class _BoolTrueDiv(Protocol):
@overload
def __call__(self, __other: float | _IntLike_co) -> float64: ...
@overload
def __call__(self, __other: complex) -> complex128: ...
@overload
def __call__(self, __other: _NumberType) -> _NumberType: ...
class _BoolMod(Protocol):
@overload
def __call__(self, __other: _BoolLike_co) -> int8: ...
@overload # platform dependent
def __call__(self, __other: int) -> int_: ...
@overload
def __call__(self, __other: float) -> float64: ...
@overload
def __call__(self, __other: _IntType) -> _IntType: ...
@overload
def __call__(self, __other: _FloatType) -> _FloatType: ...
class _BoolDivMod(Protocol):
@overload
def __call__(self, __other: _BoolLike_co) -> _2Tuple[int8]: ...
@overload # platform dependent
def __call__(self, __other: int) -> _2Tuple[int_]: ...
@overload
def __call__(self, __other: float) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
@overload
def __call__(self, __other: _IntType) -> _2Tuple[_IntType]: ...
@overload
def __call__(self, __other: _FloatType) -> _2Tuple[_FloatType]: ...
class _TD64Div(Protocol[_NumberType_co]):
@overload
def __call__(self, __other: timedelta64) -> _NumberType_co: ...
@overload
def __call__(self, __other: _BoolLike_co) -> NoReturn: ...
@overload
def __call__(self, __other: _FloatLike_co) -> timedelta64: ...
class _IntTrueDiv(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> floating[_NBit1]: ...
@overload
def __call__(self, __other: int) -> floating[_NBit1 | _NBitInt]: ...
@overload
def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, __other: complex
) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
@overload
def __call__(self, __other: integer[_NBit2]) -> floating[_NBit1 | _NBit2]: ...
class _UnsignedIntOp(Protocol[_NBit1]):
# NOTE: `uint64 + signedinteger -> float64`
@overload
def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ...
@overload
def __call__(
self, __other: int | signedinteger[Any]
) -> Any: ...
@overload
def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, __other: complex
) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, __other: unsignedinteger[_NBit2]
) -> unsignedinteger[_NBit1 | _NBit2]: ...
class _UnsignedIntBitOp(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ...
@overload
def __call__(self, __other: int) -> signedinteger[Any]: ...
@overload
def __call__(self, __other: signedinteger[Any]) -> signedinteger[Any]: ...
@overload
def __call__(
self, __other: unsignedinteger[_NBit2]
) -> unsignedinteger[_NBit1 | _NBit2]: ...
class _UnsignedIntMod(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> unsignedinteger[_NBit1]: ...
@overload
def __call__(
self, __other: int | signedinteger[Any]
) -> Any: ...
@overload
def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, __other: unsignedinteger[_NBit2]
) -> unsignedinteger[_NBit1 | _NBit2]: ...
class _UnsignedIntDivMod(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> _2Tuple[signedinteger[_NBit1]]: ...
@overload
def __call__(
self, __other: int | signedinteger[Any]
) -> _2Tuple[Any]: ...
@overload
def __call__(self, __other: float) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
@overload
def __call__(
self, __other: unsignedinteger[_NBit2]
) -> _2Tuple[unsignedinteger[_NBit1 | _NBit2]]: ...
class _SignedIntOp(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> signedinteger[_NBit1]: ...
@overload
def __call__(self, __other: int) -> signedinteger[_NBit1 | _NBitInt]: ...
@overload
def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, __other: complex
) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, __other: signedinteger[_NBit2]
) -> signedinteger[_NBit1 | _NBit2]: ...
class _SignedIntBitOp(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> signedinteger[_NBit1]: ...
@overload
def __call__(self, __other: int) -> signedinteger[_NBit1 | _NBitInt]: ...
@overload
def __call__(
self, __other: signedinteger[_NBit2]
) -> signedinteger[_NBit1 | _NBit2]: ...
class _SignedIntMod(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> signedinteger[_NBit1]: ...
@overload
def __call__(self, __other: int) -> signedinteger[_NBit1 | _NBitInt]: ...
@overload
def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, __other: signedinteger[_NBit2]
) -> signedinteger[_NBit1 | _NBit2]: ...
class _SignedIntDivMod(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> _2Tuple[signedinteger[_NBit1]]: ...
@overload
def __call__(self, __other: int) -> _2Tuple[signedinteger[_NBit1 | _NBitInt]]: ...
@overload
def __call__(self, __other: float) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
@overload
def __call__(
self, __other: signedinteger[_NBit2]
) -> _2Tuple[signedinteger[_NBit1 | _NBit2]]: ...
class _FloatOp(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> floating[_NBit1]: ...
@overload
def __call__(self, __other: int) -> floating[_NBit1 | _NBitInt]: ...
@overload
def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, __other: complex
) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, __other: integer[_NBit2] | floating[_NBit2]
) -> floating[_NBit1 | _NBit2]: ...
class _FloatMod(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> floating[_NBit1]: ...
@overload
def __call__(self, __other: int) -> floating[_NBit1 | _NBitInt]: ...
@overload
def __call__(self, __other: float) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, __other: integer[_NBit2] | floating[_NBit2]
) -> floating[_NBit1 | _NBit2]: ...
class _FloatDivMod(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> _2Tuple[floating[_NBit1]]: ...
@overload
def __call__(self, __other: int) -> _2Tuple[floating[_NBit1 | _NBitInt]]: ...
@overload
def __call__(self, __other: float) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
@overload
def __call__(
self, __other: integer[_NBit2] | floating[_NBit2]
) -> _2Tuple[floating[_NBit1 | _NBit2]]: ...
class _ComplexOp(Protocol[_NBit1]):
@overload
def __call__(self, __other: bool) -> complexfloating[_NBit1, _NBit1]: ...
@overload
def __call__(self, __other: int) -> complexfloating[_NBit1 | _NBitInt, _NBit1 | _NBitInt]: ...
@overload
def __call__(
self, __other: complex
) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
@overload
def __call__(
self,
__other: Union[
integer[_NBit2],
floating[_NBit2],
complexfloating[_NBit2, _NBit2],
]
) -> complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]: ...
class _NumberOp(Protocol):
def __call__(self, __other: _NumberLike_co) -> Any: ...
class _ComparisonOp(Protocol[_T1, _T2]):
@overload
def __call__(self, __other: _T1) -> bool_: ...
@overload
def __call__(self, __other: _T2) -> NDArray[bool_]: ...
else:
_BoolOp = Any
_BoolBitOp = Any
_BoolSub = Any
_BoolTrueDiv = Any
_BoolMod = Any
_BoolDivMod = Any
_TD64Div = Any
_IntTrueDiv = Any
_UnsignedIntOp = Any
_UnsignedIntBitOp = Any
_UnsignedIntMod = Any
_UnsignedIntDivMod = Any
_SignedIntOp = Any
_SignedIntBitOp = Any
_SignedIntMod = Any
_SignedIntDivMod = Any
_FloatOp = Any
_FloatMod = Any
_FloatDivMod = Any
_ComplexOp = Any
_NumberOp = Any
_ComparisonOp = Any
| bsd-3-clause |
Bjay1435/capstone | rootfs/usr/lib/python3.4/xml/etree/ElementTree.py | 9 | 57364 | """Lightweight XML support for Python.
XML is an inherently hierarchical data format, and the most natural way to
represent it is with a tree. This module has two classes for this purpose:
1. ElementTree represents the whole XML document as a tree and
2. Element represents a single node in this tree.
Interactions with the whole document (reading and writing to/from files) are
usually done on the ElementTree level. Interactions with a single XML element
and its sub-elements are done on the Element level.
Element is a flexible container object designed to store hierarchical data
structures in memory. It can be described as a cross between a list and a
dictionary. Each Element has a number of properties associated with it:
'tag' - a string containing the element's name.
'attributes' - a Python dictionary storing the element's attributes.
'text' - a string containing the element's text content.
'tail' - an optional string containing text after the element's end tag.
And a number of child elements stored in a Python sequence.
To create an element instance, use the Element constructor,
or the SubElement factory function.
You can also use the ElementTree class to wrap an element structure
and convert it to and from XML.
"""
#---------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
#
# ElementTree
# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring", "fromstringlist",
"iselement", "iterparse",
"parse", "ParseError",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring", "tostringlist",
"TreeBuilder",
"VERSION",
"XML", "XMLID",
"XMLParser",
"register_namespace",
]
VERSION = "1.3.0"
import sys
import re
import warnings
import io
import contextlib
from . import ElementPath
class ParseError(SyntaxError):
"""An error when parsing an XML document.
In addition to its exception value, a ParseError contains
two extra attributes:
'code' - the specific exception code
'position' - the line and column of the error
"""
pass
# --------------------------------------------------------------------
def iselement(element):
"""Return True if *element* appears to be an Element."""
return hasattr(element, 'tag')
class Element:
"""An XML element.
This class is the reference implementation of the Element interface.
An element's length is its number of subelements. That means if you
want to check if an element is truly empty, you should check BOTH
its length AND its text attribute.
The element tag, attribute names, and attribute values can be either
bytes or strings.
*tag* is the element name. *attrib* is an optional dictionary containing
element attributes. *extra* are additional element attributes given as
keyword arguments.
Example form:
<tag attrib>text<child/>...</tag>tail
"""
tag = None
"""The element's name."""
attrib = None
"""Dictionary of the element's attributes."""
text = None
"""
Text before first subelement. This is either a string or the value None.
Note that if there is no text, this attribute may be either
None or the empty string, depending on the parser.
"""
tail = None
"""
Text after this element's end tag, but before the next sibling element's
start tag. This is either a string or the value None. Note that if there
was no text, this attribute may be either None or an empty string,
depending on the parser.
"""
def __init__(self, tag, attrib={}, **extra):
if not isinstance(attrib, dict):
raise TypeError("attrib must be dict, not %s" % (
attrib.__class__.__name__,))
attrib = attrib.copy()
attrib.update(extra)
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<Element %s at 0x%x>" % (repr(self.tag), id(self))
def makeelement(self, tag, attrib):
"""Create a new element with the same type.
*tag* is a string containing the element name.
*attrib* is a dictionary containing the element attributes.
Do not call this method, use the SubElement factory function instead.
"""
return self.__class__(tag, attrib)
def copy(self):
"""Return copy of current element.
This creates a shallow copy. Subelements will be shared with the
original tree.
"""
elem = self.makeelement(self.tag, self.attrib)
elem.text = self.text
elem.tail = self.tail
elem[:] = self
return elem
def __len__(self):
return len(self._children)
def __bool__(self):
warnings.warn(
"The behavior of this method will change in future versions. "
"Use specific 'len(elem)' or 'elem is not None' test instead.",
FutureWarning, stacklevel=2
)
return len(self._children) != 0 # emulate old behaviour, for now
def __getitem__(self, index):
return self._children[index]
def __setitem__(self, index, element):
# if isinstance(index, slice):
# for elt in element:
# assert iselement(elt)
# else:
# assert iselement(element)
self._children[index] = element
def __delitem__(self, index):
del self._children[index]
def append(self, subelement):
"""Add *subelement* to the end of this element.
The new element will appear in document order after the last existing
subelement (or directly after the text, if it's the first subelement),
but before the end tag for this element.
"""
self._assert_is_element(subelement)
self._children.append(subelement)
def extend(self, elements):
"""Append subelements from a sequence.
*elements* is a sequence with zero or more elements.
"""
for element in elements:
self._assert_is_element(element)
self._children.extend(elements)
def insert(self, index, subelement):
"""Insert *subelement* at position *index*."""
self._assert_is_element(subelement)
self._children.insert(index, subelement)
def _assert_is_element(self, e):
# Need to refer to the actual Python implementation, not the
# shadowing C implementation.
if not isinstance(e, _Element_Py):
raise TypeError('expected an Element, not %s' % type(e).__name__)
def remove(self, subelement):
"""Remove matching subelement.
Unlike the find methods, this method compares elements based on
identity, NOT ON tag value or contents. To remove subelements by
other means, the easiest way is to use a list comprehension to
select what elements to keep, and then use slice assignment to update
the parent element.
ValueError is raised if a matching element could not be found.
"""
# assert iselement(element)
self._children.remove(subelement)
def getchildren(self):
"""(Deprecated) Return all subelements.
Elements are returned in document order.
"""
warnings.warn(
"This method will be removed in future versions. "
"Use 'list(elem)' or iteration over elem instead.",
DeprecationWarning, stacklevel=2
)
return self._children
def find(self, path, namespaces=None):
"""Find first matching element by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
return ElementPath.find(self, path, namespaces)
def findtext(self, path, default=None, namespaces=None):
"""Find text for first matching element by tag name or path.
*path* is a string having either an element tag or an XPath,
*default* is the value to return if the element was not found,
*namespaces* is an optional mapping from namespace prefix to full name.
Return text content of first matching element, or default value if
none was found. Note that if an element is found having no text
content, the empty string is returned.
"""
return ElementPath.findtext(self, path, default, namespaces)
def findall(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Returns list containing all matching elements in document order.
"""
return ElementPath.findall(self, path, namespaces)
def iterfind(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return an iterable yielding all matching elements in document order.
"""
return ElementPath.iterfind(self, path, namespaces)
def clear(self):
"""Reset element.
This function removes all subelements, clears all attributes, and sets
the text and tail attributes to None.
"""
self.attrib.clear()
self._children = []
self.text = self.tail = None
def get(self, key, default=None):
"""Get element attribute.
Equivalent to attrib.get, but some implementations may handle this a
bit more efficiently. *key* is what attribute to look for, and
*default* is what to return if the attribute was not found.
Returns a string containing the attribute value, or the default if
attribute was not found.
"""
return self.attrib.get(key, default)
def set(self, key, value):
"""Set element attribute.
Equivalent to attrib[key] = value, but some implementations may handle
this a bit more efficiently. *key* is what attribute to set, and
*value* is the attribute value to set it to.
"""
self.attrib[key] = value
def keys(self):
"""Get list of attribute names.
Names are returned in an arbitrary order, just like an ordinary
Python dict. Equivalent to attrib.keys()
"""
return self.attrib.keys()
def items(self):
"""Get element attributes as a sequence.
The attributes are returned in arbitrary order. Equivalent to
attrib.items().
Return a list of (name, value) tuples.
"""
return self.attrib.items()
def iter(self, tag=None):
"""Create tree iterator.
The iterator loops over the element and all subelements in document
order, returning all elements with a matching tag.
If the tree structure is modified during iteration, new or removed
elements may or may not be included. To get a stable set, use the
list() function on the iterator, and loop over the resulting list.
*tag* is what tags to look for (default is to return all elements)
Return an iterator containing all the matching elements.
"""
if tag == "*":
tag = None
if tag is None or self.tag == tag:
yield self
for e in self._children:
yield from e.iter(tag)
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'elem.iter()' or 'list(elem.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
def itertext(self):
"""Create text iterator.
The iterator loops over the element and all subelements in document
order, returning all inner text.
"""
tag = self.tag
if not isinstance(tag, str) and tag is not None:
return
if self.text:
yield self.text
for e in self:
yield from e.itertext()
if e.tail:
yield e.tail
def SubElement(parent, tag, attrib={}, **extra):
"""Subelement factory which creates an element instance, and appends it
to an existing parent.
The element tag, attribute names, and attribute values can be either
bytes or Unicode strings.
*parent* is the parent element, *tag* is the subelements name, *attrib* is
an optional directory containing element attributes, *extra* are
additional attributes given as keyword arguments.
"""
attrib = attrib.copy()
attrib.update(extra)
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
def Comment(text=None):
"""Comment element factory.
This function creates a special element which the standard serializer
serializes as an XML comment.
*text* is a string containing the comment string.
"""
element = Element(Comment)
element.text = text
return element
def ProcessingInstruction(target, text=None):
"""Processing Instruction element factory.
This function creates a special element which the standard serializer
serializes as an XML comment.
*target* is a string containing the processing instruction, *text* is a
string containing the processing instruction contents, if any.
"""
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
class QName:
"""Qualified name wrapper.
This class can be used to wrap a QName attribute value in order to get
proper namespace handing on output.
*text_or_uri* is a string containing the QName value either in the form
{uri}local, or if the tag argument is given, the URI part of a QName.
*tag* is an optional argument which if given, will make the first
argument (text_or_uri) be interpreted as a URI, and this argument (tag)
be interpreted as a local name.
"""
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __repr__(self):
return '<QName %r>' % (self.text,)
def __hash__(self):
return hash(self.text)
def __le__(self, other):
if isinstance(other, QName):
return self.text <= other.text
return self.text <= other
def __lt__(self, other):
if isinstance(other, QName):
return self.text < other.text
return self.text < other
def __ge__(self, other):
if isinstance(other, QName):
return self.text >= other.text
return self.text >= other
def __gt__(self, other):
if isinstance(other, QName):
return self.text > other.text
return self.text > other
def __eq__(self, other):
if isinstance(other, QName):
return self.text == other.text
return self.text == other
def __ne__(self, other):
if isinstance(other, QName):
return self.text != other.text
return self.text != other
# --------------------------------------------------------------------
class ElementTree:
"""An XML element hierarchy.
This class also provides support for serialization to and from
standard XML.
*element* is an optional root element node,
*file* is an optional file handle or file name of an XML file whose
contents will be used to initialize the tree with.
"""
def __init__(self, element=None, file=None):
# assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
def getroot(self):
"""Return root element of this tree."""
return self._root
def _setroot(self, element):
"""Replace root element of this tree.
This will discard the current contents of the tree and replace it
with the given element. Use with care!
"""
# assert iselement(element)
self._root = element
def parse(self, source, parser=None):
"""Load external XML document into element tree.
*source* is a file name or file object, *parser* is an optional parser
instance that defaults to XMLParser.
ParseError is raised if the parser fails to parse the document.
Returns the root element of the given source document.
"""
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
try:
if parser is None:
# If no parser was specified, create a default XMLParser
parser = XMLParser()
if hasattr(parser, '_parse_whole'):
# The default XMLParser, when it comes from an accelerator,
# can define an internal _parse_whole API for efficiency.
# It can be used to parse the whole source without feeding
# it with chunks.
self._root = parser._parse_whole(source)
return self._root
while True:
data = source.read(65536)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
finally:
if close_source:
source.close()
def iter(self, tag=None):
"""Create and return tree iterator for the root element.
The iterator loops over all elements in this tree, in document order.
*tag* is a string with the tag name to iterate over
(default is to return all elements).
"""
# assert self._root is not None
return self._root.iter(tag)
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'tree.iter()' or 'list(tree.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
def find(self, path, namespaces=None):
"""Find first matching element by tag name or path.
Same as getroot().find(path), which is Element.find()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.find(path, namespaces)
def findtext(self, path, default=None, namespaces=None):
"""Find first matching element by tag name or path.
Same as getroot().findtext(path), which is Element.findtext()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findtext(path, default, namespaces)
def findall(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
Same as getroot().findall(path), which is Element.findall().
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return list containing all matching elements in document order.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findall(path, namespaces)
def iterfind(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
Same as getroot().iterfind(path), which is element.iterfind()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return an iterable yielding all matching elements in document order.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.iterfind(path, namespaces)
def write(self, file_or_filename,
encoding=None,
xml_declaration=None,
default_namespace=None,
method=None, *,
short_empty_elements=True):
"""Write element tree to a file as XML.
Arguments:
*file_or_filename* -- file name or a file object opened for writing
*encoding* -- the output encoding (default: US-ASCII)
*xml_declaration* -- bool indicating if an XML declaration should be
added to the output. If None, an XML declaration
is added if encoding IS NOT either of:
US-ASCII, UTF-8, or Unicode
*default_namespace* -- sets the default XML namespace (for "xmlns")
*method* -- either "xml" (default), "html, "text", or "c14n"
*short_empty_elements* -- controls the formatting of elements
that contain no content. If True (default)
they are emitted as a single self-closed
tag, otherwise they are emitted as a pair
of start/end tags
"""
if not method:
method = "xml"
elif method not in _serialize:
raise ValueError("unknown method %r" % method)
if not encoding:
if method == "c14n":
encoding = "utf-8"
else:
encoding = "us-ascii"
else:
encoding = encoding.lower()
with _get_writer(file_or_filename, encoding) as write:
if method == "xml" and (xml_declaration or
(xml_declaration is None and
encoding not in ("utf-8", "us-ascii", "unicode"))):
declared_encoding = encoding
if encoding == "unicode":
# Retrieve the default encoding for the xml declaration
import locale
declared_encoding = locale.getpreferredencoding()
write("<?xml version='1.0' encoding='%s'?>\n" % (
declared_encoding,))
if method == "text":
_serialize_text(write, self._root)
else:
qnames, namespaces = _namespaces(self._root, default_namespace)
serialize = _serialize[method]
serialize(write, self._root, qnames, namespaces,
short_empty_elements=short_empty_elements)
def write_c14n(self, file):
# lxml.etree compatibility. use output method instead
return self.write(file, method="c14n")
# --------------------------------------------------------------------
# serialization support
@contextlib.contextmanager
def _get_writer(file_or_filename, encoding):
# returns text write method and release all resources after using
try:
write = file_or_filename.write
except AttributeError:
# file_or_filename is a file name
if encoding == "unicode":
file = open(file_or_filename, "w")
else:
file = open(file_or_filename, "w", encoding=encoding,
errors="xmlcharrefreplace")
with file:
yield file.write
else:
# file_or_filename is a file-like object
# encoding determines if it is a text or binary writer
if encoding == "unicode":
# use a text writer as is
yield write
else:
# wrap a binary writer with TextIOWrapper
with contextlib.ExitStack() as stack:
if isinstance(file_or_filename, io.BufferedIOBase):
file = file_or_filename
elif isinstance(file_or_filename, io.RawIOBase):
file = io.BufferedWriter(file_or_filename)
# Keep the original file open when the BufferedWriter is
# destroyed
stack.callback(file.detach)
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
file = io.BufferedIOBase()
file.writable = lambda: True
file.write = write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
file.seekable = file_or_filename.seekable
file.tell = file_or_filename.tell
except AttributeError:
pass
file = io.TextIOWrapper(file,
encoding=encoding,
errors="xmlcharrefreplace",
newline="\n")
# Keep the original file open when the TextIOWrapper is
# destroyed
stack.callback(file.detach)
yield file.write
def _namespaces(elem, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].rsplit("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = "%s:%s" % (prefix, tag)
else:
qnames[qname] = tag # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = qname
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
for elem in elem.iter():
tag = elem.tag
if isinstance(tag, QName):
if tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, str):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def _serialize_xml(write, elem, qnames, namespaces,
short_empty_elements, **kwargs):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % text)
elif tag is ProcessingInstruction:
write("<?%s?>" % text)
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None,
short_empty_elements=short_empty_elements)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib(v)
write(" %s=\"%s\"" % (qnames[k], v))
if text or len(elem) or not short_empty_elements:
write(">")
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None,
short_empty_elements=short_empty_elements)
write("</" + tag + ">")
else:
write(" />")
if elem.tail:
write(_escape_cdata(elem.tail))
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta", "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
def _serialize_html(write, elem, qnames, namespaces, **kwargs):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
write(">")
ltag = tag.lower()
if text:
if ltag == "script" or ltag == "style":
write(text)
else:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None)
if ltag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail))
def _serialize_text(write, elem):
for part in elem.itertext():
write(part)
if elem.tail:
write(elem.tail)
_serialize = {
"xml": _serialize_xml,
"html": _serialize_html,
"text": _serialize_text,
# this optional method is imported at the end of the module
# "c14n": _serialize_c14n,
}
def register_namespace(prefix, uri):
"""Register a namespace prefix.
The registry is global, and any existing mapping for either the
given prefix or the namespace URI will be removed.
*prefix* is the namespace prefix, *uri* is a namespace uri. Tags and
attributes in this namespace will be serialized with prefix if possible.
ValueError is raised if prefix is reserved or is invalid.
"""
if re.match("ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in list(_namespace_map.items()):
if k == uri or v == prefix:
del _namespace_map[k]
_namespace_map[uri] = prefix
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublin core
"http://purl.org/dc/elements/1.1/": "dc",
}
# For tests and troubleshooting
register_namespace._namespace_map = _namespace_map
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _escape_cdata(text):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
if "\n" in text:
text = text.replace("\n", " ")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
def tostring(element, encoding=None, method=None, *,
short_empty_elements=True):
"""Generate string representation of XML element.
All subelements are included. If encoding is "unicode", a string
is returned. Otherwise a bytestring is returned.
*element* is an Element instance, *encoding* is an optional output
encoding defaulting to US-ASCII, *method* is an optional output which can
be one of "xml" (default), "html", "text" or "c14n".
Returns an (optionally) encoded string containing the XML data.
"""
stream = io.StringIO() if encoding == 'unicode' else io.BytesIO()
ElementTree(element).write(stream, encoding, method=method,
short_empty_elements=short_empty_elements)
return stream.getvalue()
class _ListDataStream(io.BufferedIOBase):
"""An auxiliary stream accumulating into a list reference."""
def __init__(self, lst):
self.lst = lst
def writable(self):
return True
def seekable(self):
return True
def write(self, b):
self.lst.append(b)
def tell(self):
return len(self.lst)
def tostringlist(element, encoding=None, method=None, *,
short_empty_elements=True):
lst = []
stream = _ListDataStream(lst)
ElementTree(element).write(stream, encoding, method=method,
short_empty_elements=short_empty_elements)
return lst
def dump(elem):
"""Write element tree or element structure to sys.stdout.
This function should be used for debugging only.
*elem* is either an ElementTree, or a single Element. The exact output
format is implementation dependent. In this version, it's written as an
ordinary XML file.
"""
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout, encoding="unicode")
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
# --------------------------------------------------------------------
# parsing
def parse(source, parser=None):
"""Parse XML document into element tree.
*source* is a filename or file object containing XML data,
*parser* is an optional parser instance defaulting to XMLParser.
Return an ElementTree instance.
"""
tree = ElementTree()
tree.parse(source, parser)
return tree
def iterparse(source, events=None, parser=None):
"""Incrementally parse XML document into ElementTree.
This class also reports what's going on to the user based on the
*events* it is initialized with. The supported events are the strings
"start", "end", "start-ns" and "end-ns" (the "ns" events are used to get
detailed namespace information). If *events* is omitted, only
"end" events are reported.
*source* is a filename or file object containing XML data, *events* is
a list of events to report back, *parser* is an optional parser instance.
Returns an iterator providing (event, elem) pairs.
"""
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
return _IterParseIterator(source, events, parser, close_source)
class XMLPullParser:
def __init__(self, events=None, *, _parser=None):
# The _parser argument is for internal use only and must not be relied
# upon in user code. It will be removed in a future release.
# See http://bugs.python.org/issue17741 for more details.
# _elementtree.c expects a list, not a deque
self._events_queue = []
self._index = 0
self._parser = _parser or XMLParser(target=TreeBuilder())
# wire up the parser for event reporting
if events is None:
events = ("end",)
self._parser._setevents(self._events_queue, events)
def feed(self, data):
"""Feed encoded data to parser."""
if self._parser is None:
raise ValueError("feed() called after end of stream")
if data:
try:
self._parser.feed(data)
except SyntaxError as exc:
self._events_queue.append(exc)
def _close_and_return_root(self):
# iterparse needs this to set its root attribute properly :(
root = self._parser.close()
self._parser = None
return root
def close(self):
"""Finish feeding data to parser.
Unlike XMLParser, does not return the root element. Use
read_events() to consume elements from XMLPullParser.
"""
self._close_and_return_root()
def read_events(self):
"""Return an iterator over currently available (event, elem) pairs.
Events are consumed from the internal event queue as they are
retrieved from the iterator.
"""
events = self._events_queue
while True:
index = self._index
try:
event = events[self._index]
# Avoid retaining references to past events
events[self._index] = None
except IndexError:
break
index += 1
# Compact the list in a O(1) amortized fashion
# As noted above, _elementree.c needs a list, not a deque
if index * 2 >= len(events):
events[:index] = []
self._index = 0
else:
self._index = index
if isinstance(event, Exception):
raise event
else:
yield event
class _IterParseIterator:
def __init__(self, source, events, parser, close_source=False):
# Use the internal, undocumented _parser argument for now; When the
# parser argument of iterparse is removed, this can be killed.
self._parser = XMLPullParser(events=events, _parser=parser)
self._file = source
self._close_file = close_source
self.root = self._root = None
def __next__(self):
while 1:
for event in self._parser.read_events():
return event
if self._parser._parser is None:
self.root = self._root
if self._close_file:
self._file.close()
raise StopIteration
# load event buffer
data = self._file.read(16 * 1024)
if data:
self._parser.feed(data)
else:
self._root = self._parser._close_and_return_root()
def __iter__(self):
return self
def XML(text, parser=None):
"""Parse XML document from string constant.
This function can be used to embed "XML Literals" in Python code.
*text* is a string containing XML data, *parser* is an
optional parser instance, defaulting to the standard XMLParser.
Returns an Element instance.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
return parser.close()
def XMLID(text, parser=None):
"""Parse XML document from string constant for its IDs.
*text* is a string containing XML data, *parser* is an
optional parser instance, defaulting to the standard XMLParser.
Returns an (Element, dict) tuple, in which the
dict maps element id:s to elements.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.iter():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
# Parse XML document from string constant. Alias for XML().
fromstring = XML
def fromstringlist(sequence, parser=None):
"""Parse XML document from sequence of string fragments.
*sequence* is a list of other sequence, *parser* is an optional parser
instance, defaulting to the standard XMLParser.
Returns an Element instance.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
for text in sequence:
parser.feed(text)
return parser.close()
# --------------------------------------------------------------------
class TreeBuilder:
"""Generic element structure builder.
This builder converts a sequence of start, data, and end method
calls to a well-formed element structure.
You can use this class to build an element structure using a custom XML
parser, or a parser for some other XML-like format.
*element_factory* is an optional element factory which is called
to create new Element instances, as necessary.
"""
def __init__(self, element_factory=None):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._tail = None # true if we're after an end tag
if element_factory is None:
element_factory = Element
self._factory = element_factory
def close(self):
"""Flush builder buffers and return toplevel document Element."""
assert len(self._elem) == 0, "missing end tags"
assert self._last is not None, "missing toplevel element"
return self._last
def _flush(self):
if self._data:
if self._last is not None:
text = "".join(self._data)
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
def data(self, data):
"""Add text to current element."""
self._data.append(data)
def start(self, tag, attrs):
"""Open new element and return it.
*tag* is the element name, *attrs* is a dict containing element
attributes.
"""
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
self._elem.append(elem)
self._tail = 0
return elem
def end(self, tag):
"""Close and return current Element.
*tag* is the element name.
"""
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
# also see ElementTree and TreeBuilder
class XMLParser:
"""Element structure builder for XML source data based on the expat parser.
*html* are predefined HTML entities (not supported currently),
*target* is an optional target object which defaults to an instance of the
standard TreeBuilder class, *encoding* is an optional encoding string
which if given, overrides the encoding specified in the XML file:
http://www.iana.org/assignments/character-sets
"""
def __init__(self, html=0, target=None, encoding=None):
try:
from xml.parsers import expat
except ImportError:
try:
import pyexpat as expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
parser = expat.ParserCreate(encoding, "}")
if target is None:
target = TreeBuilder()
# underscored names are provided for compatibility only
self.parser = self._parser = parser
self.target = self._target = target
self._error = expat.error
self._names = {} # name memo cache
# main callbacks
parser.DefaultHandlerExpand = self._default
if hasattr(target, 'start'):
parser.StartElementHandler = self._start
if hasattr(target, 'end'):
parser.EndElementHandler = self._end
if hasattr(target, 'data'):
parser.CharacterDataHandler = target.data
# miscellaneous callbacks
if hasattr(target, 'comment'):
parser.CommentHandler = target.comment
if hasattr(target, 'pi'):
parser.ProcessingInstructionHandler = target.pi
# Configure pyexpat: buffering, new-style attribute handling.
parser.buffer_text = 1
parser.ordered_attributes = 1
parser.specified_attributes = 1
self._doctype = None
self.entity = {}
try:
self.version = "Expat %d.%d.%d" % expat.version_info
except AttributeError:
pass # unknown
def _setevents(self, events_queue, events_to_report):
# Internal API for XMLPullParser
# events_to_report: a list of events to report during parsing (same as
# the *events* of XMLPullParser's constructor.
# events_queue: a list of actual parsing events that will be populated
# by the underlying parser.
#
parser = self._parser
append = events_queue.append
for event_name in events_to_report:
if event_name == "start":
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event_name, append=append,
start=self._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event_name == "end":
def handler(tag, event=event_name, append=append,
end=self._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event_name == "start-ns":
def handler(prefix, uri, event=event_name, append=append):
append((event, (prefix or "", uri or "")))
parser.StartNamespaceDeclHandler = handler
elif event_name == "end-ns":
def handler(prefix, event=event_name, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
else:
raise ValueError("unknown event %r" % event_name)
def _raiseerror(self, value):
err = ParseError(value)
err.code = value.code
err.position = value.lineno, value.offset
raise err
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name
return name
def _start(self, tag, attr_list):
# Handler for expat's StartElementHandler. Since ordered_attributes
# is set, the attributes are reported as a list of alternating
# attribute name,value.
fixname = self._fixname
tag = fixname(tag)
attrib = {}
if attr_list:
for i in range(0, len(attr_list), 2):
attrib[fixname(attr_list[i])] = attr_list[i+1]
return self.target.start(tag, attrib)
def _end(self, tag):
return self.target.end(self._fixname(tag))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
data_handler = self.target.data
except AttributeError:
return
try:
data_handler(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
err = expat.error(
"undefined entity %s: line %d, column %d" %
(text, self.parser.ErrorLineNumber,
self.parser.ErrorColumnNumber)
)
err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
err.lineno = self.parser.ErrorLineNumber
err.offset = self.parser.ErrorColumnNumber
raise err
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = text.strip()
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
if pubid:
pubid = pubid[1:-1]
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
elif self.doctype != self._XMLParser__doctype:
# warn about deprecated call
self._XMLParser__doctype(name, pubid, system[1:-1])
self.doctype(name, pubid, system[1:-1])
self._doctype = None
def doctype(self, name, pubid, system):
"""(Deprecated) Handle doctype declaration
*name* is the Doctype name, *pubid* is the public identifier,
and *system* is the system identifier.
"""
warnings.warn(
"This method of XMLParser is deprecated. Define doctype() "
"method on the TreeBuilder target.",
DeprecationWarning,
)
# sentinel, if doctype is redefined in a subclass
__doctype = doctype
def feed(self, data):
"""Feed encoded data to parser."""
try:
self.parser.Parse(data, 0)
except self._error as v:
self._raiseerror(v)
def close(self):
"""Finish feeding data to parser and return element structure."""
try:
self.parser.Parse("", 1) # end of data
except self._error as v:
self._raiseerror(v)
try:
close_handler = self.target.close
except AttributeError:
pass
else:
return close_handler()
finally:
# get rid of circular references
del self.parser, self._parser
del self.target, self._target
# Import the C accelerators
try:
# Element is going to be shadowed by the C implementation. We need to keep
# the Python version of it accessible for some "creative" by external code
# (see tests)
_Element_Py = Element
# Element, SubElement, ParseError, TreeBuilder, XMLParser
from _elementtree import *
except ImportError:
pass
| mit |
rg3915/spark | spark/activities/views.py | 1 | 1357 | from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import render
from spark.activities.models import Notification
from spark.decorators import ajax_required
@login_required
def notifications(request):
user = request.user
notifications = Notification.objects.filter(to_user=user)
unread = Notification.objects.filter(to_user=user, is_read=False)
for notification in unread:
notification.is_read = True
notification.save()
return render(request, 'activities/notifications.html',
{'notifications': notifications})
@login_required
@ajax_required
def last_notifications(request):
user = request.user
notifications = Notification.objects.filter(to_user=user,
is_read=False)[:5]
for notification in notifications:
notification.is_read = True
notification.save()
return render(request,
'activities/last_notifications.html',
{'notifications': notifications})
@login_required
@ajax_required
def check_notifications(request):
user = request.user
notifications = Notification.objects.filter(to_user=user,
is_read=False)[:5]
return HttpResponse(len(notifications))
| mit |
JohanWesto/receptive-field-models | rf_models/rf_helper.py | 1 | 17339 | #!/usr/bin/python
"""
" @section DESCRIPTION
" Helper functions for training and evaluating RF models
"""
import os
import numpy as np
import cPickle as pickle
from scipy.io import loadmat
from scipy.linalg import toeplitz
from sklearn.neighbors import kneighbors_graph
from numpy.lib import stride_tricks
from operator import mul
from cython.rf_cython import cross_corr_c, cf_mat_der_c
def drange(start, stop, step):
r = start
while r < stop:
yield r
r += step
def add_fake_dimension(org_ndarray, time_win_size):
""" Rolls a time window over a vector and extract the window content
Stride_tricks only affect the shape and strides in the array interface.
The memory footprint is therefore equal for both org_ndarray and
fake_ndarray.
Important!!!
The time dimension in X must be along the first dimension (axis=0)
Args:
org_ndarray: vector to roll the window over
time_win_size: window size in vector elements (time dimension)
Returns:
fake_ndarray:
Raises:
"""
n_element = org_ndarray.size
element_size = org_ndarray.itemsize
input_dims = org_ndarray.shape
stride_length = 1
for dims in input_dims[1:]:
stride_length *= dims
org_1darray = org_ndarray.ravel()
shape = (n_element/stride_length - time_win_size + 1,
time_win_size*stride_length)
strides = (stride_length*element_size, element_size)
fake_2darray = stride_tricks.as_strided(org_1darray,
shape=shape,
strides=strides)
new_shape = [shape[0], time_win_size]
for dims in input_dims[1:]:
new_shape.append(dims)
fake_ndarray = fake_2darray.reshape(new_shape)
return fake_ndarray
def gaussian_field(shape, origin):
""" Generates a multi-dimensional Gaussian field
:param shape:
:param origin:
:return:
"""
cov_inv = np.diag(np.ones(3))
# cov_inv = np.diag([10. / shape[1], 10. / shape[0], 10. / shape[2]])
dim0, dim1, dim2 = np.meshgrid(np.arange(shape[1]) - shape[1] / 2 - origin[1],
np.arange(shape[0]) - shape[0] / 2 - origin[0],
np.arange(shape[2]) - shape[2] / 2 - origin[2])
x = np.vstack([dim0.ravel(), dim1.ravel(), dim2.ravel()])
tmp = (x * np.dot(cov_inv, x)).sum(axis=0)
field = np.exp(-0.5 * tmp).reshape(shape)
field /= field.max()
return field
def smooth_reg_l(shape):
""" Smooth regularization using a n-D discrete Laplace operator
:param shape:
:return reg_l:
"""
shape = [dim for dim in shape if dim > 1]
if len(shape) == 1:
row = np.concatenate([[-2, 1], np.zeros(shape[0] - 2)])
reg_l = toeplitz(row)
reg_l[0, :] = 0
reg_l[-1, :] = 0
else:
if len(shape) == 2:
dim0, dim1 = np.meshgrid(range(shape[1]), range(shape[0]))
dim = np.vstack([dim0.ravel(), dim1.ravel()])
elif len(shape) == 3:
dim0, dim1, dim2 = np.meshgrid(range(shape[1]),
range(shape[0]),
range(shape[2]))
dim = np.vstack([dim0.ravel(), dim1.ravel(), dim2.ravel()])
con_mat = kneighbors_graph(dim.T, 6, mode='distance').toarray()
con_mat[con_mat > 1] = 0
connections_per_node = con_mat.sum(axis=0)
con_mat[con_mat == 1] = -1
con_mat[np.diag_indices_from(con_mat)] = connections_per_node
reg_l = con_mat
return reg_l
def field_part_der(x_nd, field, part_idx):
""" Field part derivative in multilinear (separable) models
:param x_nd:
:param field:
:param part_idx:
:return part_der:
"""
n_parts = len(field.parts)
# Create the outer product between non-part_idx parts
cross_idx = range(part_idx) + \
range(part_idx + 1, n_parts)
part_cross = outer_product(field.parts, cross_idx)
# Sum up contributions along other dimensions
x_axes = range(1, part_idx + 1) + \
range(part_idx + 2, 2 + part_cross.ndim)
field_axes = range(len(part_cross.shape))
part_der = np.tensordot(x_nd, part_cross, axes=(x_axes, field_axes))
return part_der
def sta_and_stc(x_2d, y):
""" Calculate the STA and the STC
Args:
x_2d: input array (assumed to have zero mean)
y: output array
Returns:
sta:
stc:
Raise
"""
# Select the spike triggered ensemble
x_2d_ste = x_2d[y.ravel() > 0, :]
# STA
yx_2d_ste = x_2d_ste * y[y > 0, None]
sta = np.sum(yx_2d_ste, axis=0) / y.sum()
# STC
# Remove the STA
x_2d_ste -= sta
yx_2d_ste = x_2d_ste * y[y > 0, None]
stc = np.dot(yx_2d_ste.T, x_2d_ste) / (y.sum()-1)
return sta, stc
def get_insignificant_basis(x, y, rf_shape):
# Make a 2D matrix
x_nd = add_fake_dimension(x, rf_shape[0])
x_nd_full = x_nd.copy()
n_samples = x_nd_full.shape[0]
rf_size = reduce(mul, rf_shape)
x_2d = x_nd_full.reshape(n_samples, rf_size)
# Mean center and whiten
x_2d -= x_2d.mean(axis=0)
x_2d /= x_2d.std(axis=0)
_, stc = sta_and_stc(x_2d, y)
eig_val, eig_vec = np.linalg.eig(stc)
sort_idxs = np.argsort(eig_val)
n_zero_val = (np.abs(eig_val) < 1e-10).sum()
middle_idx = (sort_idxs.size - n_zero_val) / 2 + n_zero_val
# insignificant_basis = np.real(eig_vec[:, sort_idxs[middle_idx]])
# rf = insignificant_basis.reshape(rf_shape)
# return rf
rfs = []
for i in range(-2, 3, 1):
insignificant_basis = np.real(eig_vec[:, sort_idxs[middle_idx + i]])
rfs.append(insignificant_basis.reshape(rf_shape))
return rfs
def scale_params(params):
for cf_id in range(len(params.cfs)):
scale_factor = 1 / params.cfs[cf_id].bias
params.rfs[0].field[params.context_map == cf_id] /= scale_factor
params.cfs[cf_id].field *= scale_factor
params.cfs[cf_id].bias *= scale_factor
return params
def outer_product(parts, cross_idx=[]):
""" Calculates an outer product between 1 to 3 vectors
Args:
parts: list with vectors
cross_idx: indices indicating which vectors to multiply
Returns:
part_cross
Raise
Exception if more than three parts
"""
# If part_cross is empty we use all vecotrs
if len(cross_idx) == 0:
cross_idx = range(len(parts))
# Outer product between selected vectors
if len(cross_idx) == 1:
part_cross = parts[cross_idx[0]]
elif len(cross_idx) == 2:
if parts[cross_idx[0]].ndim == parts[cross_idx[1]].ndim:
part_cross = np.outer(parts[cross_idx[0]], parts[cross_idx[1]])
else:
part_cross = parts[cross_idx[0]][:, np.newaxis, np.newaxis] * \
parts[cross_idx[1]]
elif len(cross_idx) == 3:
part_cross = parts[cross_idx[0]][:, np.newaxis, np.newaxis] * \
np.outer(parts[cross_idx[1]], parts[cross_idx[2]])
else:
raise Exception("Can only handle max 3 parts")
return part_cross
def inner_product(x_nd, rfs):
""" Calculates the inner product between between multidimensional arrays
This function calculates a generalized multidimensional euclidean inner
product using numpy.tensordot as numpy.dot can't handle multidimensional
matrices. The inner product is calculated for each provided receptive field
and stored columnwise in the matrix inner_product
Args:
x_nd: multidimensional input array
rfs: list with receptive fields
Returns:
inner_product_nd:
Raise
"""
# Stores the inner product from each receptive field in separate columns
inner_product_nd = np.empty([x_nd.shape[0], len(rfs)])
for rf, rf_idx in zip(rfs, range(len(rfs))):
# Inner product
x_axes = range(1, len(x_nd.shape))
rf_axes = range(len(rf.shape))
inner_product_nd[:, rf_idx] = np.tensordot(x_nd,
rf.field,
axes=(x_axes, rf_axes))
# Check whether this is a quadratic filter
if hasattr(rf, 'qn_square') and rf.qn_square:
inner_product_nd[:, rf_idx] *= \
rf.qn_lambda * inner_product_nd[:, rf_idx]
# Add the filter's bias term
inner_product_nd[:, rf_idx] += rfs[rf_idx].bias
return inner_product_nd
def cross_corr(x, rf):
""" Calculates the cross-correlation between x and rf
Computes the cross-correlation between x and rf without the need to
create a large input matrix by adding a fake dimension.
The function is a python wrapper for the cython function:
cross_corr_c()
Args:
x: input array
rf: receptive field
Returns:
z: similarity score
Raise
"""
win_size = rf.field.size
stride = reduce(mul, x.shape[1:])
n_vals = x.shape[0] - rf.shape[0] + 1
z = np.empty(n_vals)
z[:] = cross_corr_c(x.ravel(), rf.field.ravel(), n_vals, stride, win_size)
# z += rf.bias
return z
def cf_mat_der(x, e, rf):
win_size = rf.field.size
stride = reduce(mul, x.shape[1:])
n_vals = x.shape[0] - rf.shape[0] + 1
cf_der_sum = np.zeros(win_size)
cf_der_sum[:] = cf_mat_der_c(x.ravel(), e.ravel(), rf.field.ravel(), n_vals, stride, win_size)
cf_der_sum = cf_der_sum / n_vals
return cf_der_sum
def z_dist(z, y, n_bins):
"""Approximates the similarity score distributions P(z) and P(z|spike)
IMPORTANT!
This function ONLY uses the first two receptive fields in the LN-model
Args:
z: similarity score array
y: spike count array
n_bins: number of bins to use when approximating the distribution
Returns:
p_z: P(z)
p_z_spike: P(z|spike)
z_edges: bin edge values
Raises:
Exception if z has more than two receptive fields (columns)
"""
# The histogram range goes linearly between -n_std to + n_std
n_std = 3
# scores resulting in one or more spikes
spike_in_bin = (y > 0).ravel() # spike indicator vector
z_spike = z.compress(spike_in_bin, axis=0)
# We use weights to account for situations were an input caused more
# than one spike.
z_edges = []
# One receptive field
if z.shape[1] == 1:
edges = np.linspace(z.mean() - n_std * z.std(),
z.mean() + n_std * z.std(), n_bins - 1)
edges = np.insert(edges, 0, -np.inf)
edges = np.append(edges, np.inf)
# P(z)
z_count, edges = np.histogram(z.ravel(), edges)
# P(z|spike)
weights = y[y > 0]
z_count_spike, edges = np.histogram(z_spike.ravel(),
edges,
weights=weights.ravel())
z_count = z_count[:, None]
z_count_spike = z_count_spike[:, None]
z_edges.append(edges)
# Two receptive fields
elif z.shape[1] >= 2:
edges_row = np.linspace(z[:, 0].mean() - n_std * z[:, 0].std(),
z[:, 0].mean() + n_std * z[:, 0].std(),
n_bins - 1)
edges_row = np.insert(edges_row, 0, -np.inf)
edges_row = np.append(edges_row, np.inf)
edges_col = np.linspace(z[:, 1].mean() - n_std * z[:, 1].std(),
z[:, 1].mean() + n_std * z[:, 1].std(),
n_bins - 1)
edges_col = np.insert(edges_col, 0, -np.inf)
edges_col = np.append(edges_col, np.inf)
# P(z)
z_count, edges_row, edges_col = \
np.histogram2d(z[:, 0].ravel(),
z[:, 1].ravel(),
[edges_row, edges_col])
# P(z|spike)
weights = y[y > 0]
z_count_spike, edges_row, edges_col = \
np.histogram2d(z_spike[:, 0].ravel(),
z_spike[:, 1].ravel(),
[edges_row, edges_col],
weights=weights)
z_edges.append(edges_row)
z_edges.append(edges_col)
if z.shape[1] > 2:
print "Warning! Probability distributions are only evaluated using " \
"the first two filters in LN-models with more than two filters."
p_z = np.float64(z_count) / np.sum(z_count)
p_z_spike = np.float64(z_count_spike) / np.sum(z_count_spike)
# Manipulates the last score bin edge to make sure that also the
# largest score falls into the last bin
for dim in range(len(z_edges)):
z_edges[dim][-1] += 1e-10
return p_z, p_z_spike, z_edges
def calculate_r(vec_1, vec_2):
""" Calculates the pearson r correlation coefficient
Args:
vec_1: first vector
vec_2: second vector
Returns:
Raises:
"""
# Make sure the both vectors are one-dimensional
vec_1 = vec_1.ravel()
vec_2 = vec_2.ravel()
# The following should be equal to scipy.stats.pearsonr
r = np.mean((vec_1 - np.mean(vec_1)) * (vec_2 - np.mean(vec_2))) / np.std(vec_1) / np.std(vec_2)
return r
def load_mat_dat_file(file_name):
""" Load simulated or recorded data
:param file_name: file name including path
:return data:
"""
# Separate behaviour for pickled Python *.dat files
if file_name[-3:] == 'dat':
data = pickle.load(open(file_name, 'rb'))
# and Matlab *.mat files
elif file_name[-3:] == 'mat':
data_mat = loadmat(file_name)
data = {'x': np.float64(data_mat['x']),
'x_labels': [label[0] for label in data_mat['x_labels'][0]],
'x_ticks': [ticks.tolist() for ticks in data_mat['x_ticks'][0]],
'y': np.float64(data_mat['y']),
'name': data_mat['name'][0],
'origin': data_mat['origin'][0],
'params': {'dt': data_mat['dt_ms'][0, 0]}
}
else:
raise Exception("Unknown file format: {}".format(file_name[-3:]))
return data
def load_saved_models(load_path, tag=None):
""" Load saved rf models in specified directory
:param load_path:
:return:
"""
models = []
if load_path is not None:
if os.path.isdir(load_path):
contents = os.listdir(load_path)
# Filter by tag
if tag is not None:
contents = [s for s in contents if tag in s]
for file_name in sorted(contents):
# Assume that all *.dat files are saved models
if file_name[-3:] == 'dat':
model = pickle.load(open(load_path + file_name, 'rb'))
models.append(model)
else:
print "Provided model path does not exist!"
else:
print "No model path provided!"
return models
def load_saved_models_old(results_path, result_files=[]):
""" Read pickled models
Args:
results_path: path to results folder
result_files: stored files to read
Returns:
all_fields: rfs and cfs in all files
all_simulation_data: simulation data form all files
Raises:
"""
all_fields = [] # STRF, CF, and r-values
all_simulation_data = [] # Configuration used
# Load all files with a *.dat extension if no file names are provided
if len(result_files) == 0:
for file in os.listdir(results_path):
if file.endswith(".dat"):
result_files.append(file)
for result_file in result_files:
with open(results_path+result_file, 'rb') as handle:
results = pickle.load(handle)
n_models = len(results['models'])
rfs = []
rf_names =[]
cfs = []
cf_names = []
r_train = []
r_test = []
obj_fun_val = []
for i in range(n_models):
name = results['models'][i].name
if name.rfind('_') >= 0:
name = name[0:name.rfind('_')]
else:
name += str(len(results['models'][i].rfs))
for rf in results['models'][i].rfs:
if len(rf) > 0:
# rf_tmp = rf['field']/np.linalg.norm(rf['field'])
rf_tmp = rf['field']
rfs.append(rf_tmp)
rf_names.append(name)
for cf in results['models'][i].cfs:
if len(cf) > 0:
cfs.append(cf['field'][::-1, ::-1, ::-1])
cf_names.append(name)
r_train.append(results['models'][i].r_train)
r_test.append(results['models'][i].r_test)
obj_fun_val.append(results['models'][i].obj_fun_val)
tmp_dic = {'rfs': rfs,
'rf_names': rf_names,
'cfs': cfs,
'cf_names': cf_names,
'r_train': r_train,
'r_test': r_test,
'obj_fun_val': obj_fun_val}
all_fields.append(tmp_dic)
all_simulation_data.append(results['simulation_data'])
return all_fields, all_simulation_data
| mit |
mozilla/MozDef | tests/mq/plugins/test_stackdriver.py | 3 | 9388 | from mozdef_util.utilities.toUTC import toUTC
from mq.plugins.stackdriver import message
class TestStackDriver(object):
def setup(self):
self.plugin = message()
self.metadata = {"index": "events"}
# Should never match and be modified by the plugin
def test_nodetails_log(self):
metadata = {"index": "events"}
event = {"tags": "pubsub"}
result, metadata = self.plugin.onMessage(event, metadata)
# in = out - plugin didn't touch it
assert result == event
def verify_metadata(self, metadata):
assert metadata["index"] == "events"
def verify_defaults(self, result):
assert result["category"] == "data_access"
assert toUTC(result["receivedtimestamp"]).isoformat() == result["receivedtimestamp"]
def test_defaults(self):
event = {
"receivedtimestamp": "2019-11-21T22:43:10.041549+00:00",
"mozdefhostname": "mozdefqa2.private.mdc1.mozilla.com",
"details": {
"insertId": "-81ga0vdqblo",
"logName": "projects/mcd-001-252615/logs/cloudaudit.googleapis.com%2Fdata_access",
"protoPayload": {
"@type": "type.googleapis.com/google.cloud.audit.AuditLog",
"authenticationInfo": {"principalEmail": "mpurzynski@gcp.infra.mozilla.com"},
"authorizationInfo": [
{
"granted": True,
"permission": "compute.instances.list",
"resourceAttributes": {
"name": "projects/mcd-001-252615",
"service": "resourcemanager",
"type": "resourcemanager.projects",
},
}
],
"methodName": "beta.compute.instances.aggregatedList",
"numResponseItems": "61",
"request": {"@type": "type.googleapis.com/compute.instances.aggregatedList"},
"requestMetadata": {
"callerIp": "2620:101:80fb:224:2864:cebc:a1e:640c",
"callerSuppliedUserAgent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:72.0) Gecko/20100101 Firefox/72.0,gzip(gfe),gzip(gfe)",
"destinationAttributes": {},
"requestAttributes": {"auth": {}, "time": "2019-11-21T22:42:26.336Z",},
},
"resourceLocation": {"currentLocations": ["global"]},
"resourceName": "projects/mcd-001-252615/global/instances",
"serviceName": "compute.googleapis.com",
},
"receiveTimestamp": "2019-11-21T22:42:26.904624537Z",
"resource": {
"labels": {
"location": "global",
"method": "compute.instances.aggregatedList",
"project_id": "mcd-001-252615",
"service": "compute.googleapis.com",
"version": "beta",
},
"type": "api",
},
"severity": "INFO",
"timestamp": "2019-11-21T22:42:25.759Z",
},
"tags": ["projects/mcd-001-252615/subscriptions/mozdefsubscription", "pubsub",],
}
result, metadata = self.plugin.onMessage(event, self.metadata)
self.verify_defaults(result)
self.verify_metadata(metadata)
def test_nomatch_syslog(self):
event = {
"category": "syslog",
"processid": "0",
"receivedtimestamp": "2017-09-26T00:22:24.210945+00:00",
"severity": "7",
"utctimestamp": "2017-09-26T00:22:23+00:00",
"timestamp": "2017-09-26T00:22:23+00:00",
"hostname": "something1.test.com",
"mozdefhostname": "something1.test.com",
"summary": "Connection from 10.22.74.208 port 9071 on 10.22.74.45 pubsub stackdriver port 22\n",
"eventsource": "systemslogs",
"tags": "something",
"details": {
"processid": "21233",
"sourceipv4address": "10.22.74.208",
"hostname": "hostname1.subdomain.domain.com",
"program": "sshd",
"sourceipaddress": "10.22.74.208",
},
}
result, metadata = self.plugin.onMessage(event, self.metadata)
assert result["category"] == "syslog"
assert result["eventsource"] == "systemslogs"
assert result == event
def test_nomatch_auditd(self):
event = {
"category": "execve",
"processid": "0",
"receivedtimestamp": "2017-09-26T00:36:27.463745+00:00",
"severity": "INFO",
"utctimestamp": "2017-09-26T00:36:27+00:00",
"tags": ["audisp-json", "2.1.1", "audit"],
"summary": "Execve: sh -c sudo squid proxy /usr/lib64/nagios/plugins/custom/check_auditd.sh",
"processname": "audisp-json",
"details": {
"fsuid": "398",
"tty": "(none)",
"uid": "398",
"process": "/bin/bash",
"auditkey": "exec",
"pid": "10553",
"processname": "sh",
"session": "16467",
"fsgid": "398",
"sgid": "398",
"auditserial": "3834716",
"inode": "1835094",
"ouid": "0",
"ogid": "0",
"suid": "398",
"originaluid": "0",
"gid": "398",
"originaluser": "pubsub",
"ppid": "10552",
"cwd": "/",
"parentprocess": "stackdriver",
"euid": "398",
"path": "/bin/sh",
"rdev": "00:00",
"dev": "08:03",
"egid": "398",
"command": "sh -c sudo /usr/lib64/nagios/plugins/custom/check_auditd.sh",
"mode": "0100755",
"user": "squid",
},
}
result, metadata = self.plugin.onMessage(event, self.metadata)
assert result["category"] == "execve"
assert "eventsource" not in result
assert result == event
def test_stackdriver(self):
event = {
"receivedtimestamp": "2019-11-21T22:43:10.041549+00:00",
"mozdefhostname": "mozdefqa2.private.mdc1.mozilla.com",
"details": {
"insertId": "-81ga0vdqblo",
"logName": "projects/mcd-001-252615/logs/cloudaudit.googleapis.com%2Fdata_access",
"protoPayload": {
"@type": "type.googleapis.com/google.cloud.audit.AuditLog",
"authenticationInfo": {"principalEmail": "mpurzynski@gcp.infra.mozilla.com"},
"authorizationInfo": [
{
"granted": True,
"permission": "compute.instances.list",
"resourceAttributes": {
"name": "projects/mcd-001-252615",
"service": "resourcemanager",
"type": "resourcemanager.projects",
},
}
],
"methodName": "beta.compute.instances.aggregatedList",
"numResponseItems": "61",
"request": {"@type": "type.googleapis.com/compute.instances.aggregatedList"},
"requestMetadata": {
"callerIp": "2620:101:80fb:224:2864:cebc:a1e:640c",
"callerSuppliedUserAgent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:72.0) Gecko/20100101 Firefox/72.0,gzip(gfe),gzip(gfe)",
"destinationAttributes": {},
"requestAttributes": {"auth": {}, "time": "2019-11-21T22:42:26.336Z",},
},
"resourceLocation": {"currentLocations": ["global"]},
"resourceName": "projects/mcd-001-252615/global/instances",
"serviceName": "compute.googleapis.com",
},
"receiveTimestamp": "2019-11-21T22:42:26.904624537Z",
"resource": {
"labels": {
"location": "global",
"method": "compute.instances.aggregatedList",
"project_id": "mcd-001-252615",
"service": "compute.googleapis.com",
"version": "beta",
},
"type": "api",
},
"severity": "INFO",
"timestamp": "2019-11-21T22:42:25.759Z",
},
"tags": ["projects/mcd-001-252615/subscriptions/mozdefsubscription", "pubsub",],
}
result, metadata = self.plugin.onMessage(event, self.metadata)
assert result["category"] == "data_access"
assert result["details"]["protoPayload"]["@type"] == "type.googleapis.com/google.cloud.audit.AuditLog"
| mpl-2.0 |
Jeff-Tian/mybnb | Python27/Lib/test/test_pkgimport.py | 13 | 2964 | import os, sys, string, random, tempfile, unittest
from test.test_support import run_unittest
class TestImport(unittest.TestCase):
def __init__(self, *args, **kw):
self.package_name = 'PACKAGE_'
while self.package_name in sys.modules:
self.package_name += random.choose(string.letters)
self.module_name = self.package_name + '.foo'
unittest.TestCase.__init__(self, *args, **kw)
def remove_modules(self):
for module_name in (self.package_name, self.module_name):
if module_name in sys.modules:
del sys.modules[module_name]
def setUp(self):
self.test_dir = tempfile.mkdtemp()
sys.path.append(self.test_dir)
self.package_dir = os.path.join(self.test_dir,
self.package_name)
os.mkdir(self.package_dir)
open(os.path.join(
self.package_dir, '__init__'+os.extsep+'py'), 'w').close()
self.module_path = os.path.join(self.package_dir, 'foo'+os.extsep+'py')
def tearDown(self):
for file in os.listdir(self.package_dir):
os.remove(os.path.join(self.package_dir, file))
os.rmdir(self.package_dir)
os.rmdir(self.test_dir)
self.assertNotEqual(sys.path.count(self.test_dir), 0)
sys.path.remove(self.test_dir)
self.remove_modules()
def rewrite_file(self, contents):
for extension in "co":
compiled_path = self.module_path + extension
if os.path.exists(compiled_path):
os.remove(compiled_path)
f = open(self.module_path, 'w')
f.write(contents)
f.close()
def test_package_import__semantics(self):
# Generate a couple of broken modules to try importing.
# ...try loading the module when there's a SyntaxError
self.rewrite_file('for')
try: __import__(self.module_name)
except SyntaxError: pass
else: raise RuntimeError, 'Failed to induce SyntaxError'
self.assertNotIn(self.module_name, sys.modules)
self.assertFalse(hasattr(sys.modules[self.package_name], 'foo'))
# ...make up a variable name that isn't bound in __builtins__
var = 'a'
while var in dir(__builtins__):
var += random.choose(string.letters)
# ...make a module that just contains that
self.rewrite_file(var)
try: __import__(self.module_name)
except NameError: pass
else: raise RuntimeError, 'Failed to induce NameError.'
# ...now change the module so that the NameError doesn't
# happen
self.rewrite_file('%s = 1' % var)
module = __import__(self.module_name).foo
self.assertEqual(getattr(module, var), 1)
def test_main():
run_unittest(TestImport)
if __name__ == "__main__":
test_main()
| apache-2.0 |
Lancher/tornado | tornado/test/locks_test.py | 5 | 16582 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
from datetime import timedelta
from tornado import gen, locks
from tornado.gen import TimeoutError
from tornado.testing import gen_test, AsyncTestCase
from tornado.test.util import unittest, skipBefore35, exec_test
class ConditionTest(AsyncTestCase):
def setUp(self):
super(ConditionTest, self).setUp()
self.history = []
def record_done(self, future, key):
"""Record the resolution of a Future returned by Condition.wait."""
def callback(_):
if not future.result():
# wait() resolved to False, meaning it timed out.
self.history.append('timeout')
else:
self.history.append(key)
future.add_done_callback(callback)
def loop_briefly(self):
"""Run all queued callbacks on the IOLoop.
In these tests, this method is used after calling notify() to
preserve the pre-5.0 behavior in which callbacks ran
synchronously.
"""
self.io_loop.add_callback(self.stop)
self.wait()
def test_repr(self):
c = locks.Condition()
self.assertIn('Condition', repr(c))
self.assertNotIn('waiters', repr(c))
c.wait()
self.assertIn('waiters', repr(c))
@gen_test
def test_notify(self):
c = locks.Condition()
self.io_loop.call_later(0.01, c.notify)
yield c.wait()
def test_notify_1(self):
c = locks.Condition()
self.record_done(c.wait(), 'wait1')
self.record_done(c.wait(), 'wait2')
c.notify(1)
self.loop_briefly()
self.history.append('notify1')
c.notify(1)
self.loop_briefly()
self.history.append('notify2')
self.assertEqual(['wait1', 'notify1', 'wait2', 'notify2'],
self.history)
def test_notify_n(self):
c = locks.Condition()
for i in range(6):
self.record_done(c.wait(), i)
c.notify(3)
self.loop_briefly()
# Callbacks execute in the order they were registered.
self.assertEqual(list(range(3)), self.history)
c.notify(1)
self.loop_briefly()
self.assertEqual(list(range(4)), self.history)
c.notify(2)
self.loop_briefly()
self.assertEqual(list(range(6)), self.history)
def test_notify_all(self):
c = locks.Condition()
for i in range(4):
self.record_done(c.wait(), i)
c.notify_all()
self.loop_briefly()
self.history.append('notify_all')
# Callbacks execute in the order they were registered.
self.assertEqual(
list(range(4)) + ['notify_all'],
self.history)
@gen_test
def test_wait_timeout(self):
c = locks.Condition()
wait = c.wait(timedelta(seconds=0.01))
self.io_loop.call_later(0.02, c.notify) # Too late.
yield gen.sleep(0.03)
self.assertFalse((yield wait))
@gen_test
def test_wait_timeout_preempted(self):
c = locks.Condition()
# This fires before the wait times out.
self.io_loop.call_later(0.01, c.notify)
wait = c.wait(timedelta(seconds=0.02))
yield gen.sleep(0.03)
yield wait # No TimeoutError.
@gen_test
def test_notify_n_with_timeout(self):
# Register callbacks 0, 1, 2, and 3. Callback 1 has a timeout.
# Wait for that timeout to expire, then do notify(2) and make
# sure everyone runs. Verifies that a timed-out callback does
# not count against the 'n' argument to notify().
c = locks.Condition()
self.record_done(c.wait(), 0)
self.record_done(c.wait(timedelta(seconds=0.01)), 1)
self.record_done(c.wait(), 2)
self.record_done(c.wait(), 3)
# Wait for callback 1 to time out.
yield gen.sleep(0.02)
self.assertEqual(['timeout'], self.history)
c.notify(2)
yield gen.sleep(0.01)
self.assertEqual(['timeout', 0, 2], self.history)
self.assertEqual(['timeout', 0, 2], self.history)
c.notify()
yield
self.assertEqual(['timeout', 0, 2, 3], self.history)
@gen_test
def test_notify_all_with_timeout(self):
c = locks.Condition()
self.record_done(c.wait(), 0)
self.record_done(c.wait(timedelta(seconds=0.01)), 1)
self.record_done(c.wait(), 2)
# Wait for callback 1 to time out.
yield gen.sleep(0.02)
self.assertEqual(['timeout'], self.history)
c.notify_all()
yield
self.assertEqual(['timeout', 0, 2], self.history)
@gen_test
def test_nested_notify(self):
# Ensure no notifications lost, even if notify() is reentered by a
# waiter calling notify().
c = locks.Condition()
# Three waiters.
futures = [c.wait() for _ in range(3)]
# First and second futures resolved. Second future reenters notify(),
# resolving third future.
futures[1].add_done_callback(lambda _: c.notify())
c.notify(2)
yield
self.assertTrue(all(f.done() for f in futures))
@gen_test
def test_garbage_collection(self):
# Test that timed-out waiters are occasionally cleaned from the queue.
c = locks.Condition()
for _ in range(101):
c.wait(timedelta(seconds=0.01))
future = c.wait()
self.assertEqual(102, len(c._waiters))
# Let first 101 waiters time out, triggering a collection.
yield gen.sleep(0.02)
self.assertEqual(1, len(c._waiters))
# Final waiter is still active.
self.assertFalse(future.done())
c.notify()
self.assertTrue(future.done())
class EventTest(AsyncTestCase):
def test_repr(self):
event = locks.Event()
self.assertTrue('clear' in str(event))
self.assertFalse('set' in str(event))
event.set()
self.assertFalse('clear' in str(event))
self.assertTrue('set' in str(event))
def test_event(self):
e = locks.Event()
future_0 = e.wait()
e.set()
future_1 = e.wait()
e.clear()
future_2 = e.wait()
self.assertTrue(future_0.done())
self.assertTrue(future_1.done())
self.assertFalse(future_2.done())
@gen_test
def test_event_timeout(self):
e = locks.Event()
with self.assertRaises(TimeoutError):
yield e.wait(timedelta(seconds=0.01))
# After a timed-out waiter, normal operation works.
self.io_loop.add_timeout(timedelta(seconds=0.01), e.set)
yield e.wait(timedelta(seconds=1))
def test_event_set_multiple(self):
e = locks.Event()
e.set()
e.set()
self.assertTrue(e.is_set())
def test_event_wait_clear(self):
e = locks.Event()
f0 = e.wait()
e.clear()
f1 = e.wait()
e.set()
self.assertTrue(f0.done())
self.assertTrue(f1.done())
class SemaphoreTest(AsyncTestCase):
def test_negative_value(self):
self.assertRaises(ValueError, locks.Semaphore, value=-1)
def test_repr(self):
sem = locks.Semaphore()
self.assertIn('Semaphore', repr(sem))
self.assertIn('unlocked,value:1', repr(sem))
sem.acquire()
self.assertIn('locked', repr(sem))
self.assertNotIn('waiters', repr(sem))
sem.acquire()
self.assertIn('waiters', repr(sem))
def test_acquire(self):
sem = locks.Semaphore()
f0 = sem.acquire()
self.assertTrue(f0.done())
# Wait for release().
f1 = sem.acquire()
self.assertFalse(f1.done())
f2 = sem.acquire()
sem.release()
self.assertTrue(f1.done())
self.assertFalse(f2.done())
sem.release()
self.assertTrue(f2.done())
sem.release()
# Now acquire() is instant.
self.assertTrue(sem.acquire().done())
self.assertEqual(0, len(sem._waiters))
@gen_test
def test_acquire_timeout(self):
sem = locks.Semaphore(2)
yield sem.acquire()
yield sem.acquire()
acquire = sem.acquire(timedelta(seconds=0.01))
self.io_loop.call_later(0.02, sem.release) # Too late.
yield gen.sleep(0.3)
with self.assertRaises(gen.TimeoutError):
yield acquire
sem.acquire()
f = sem.acquire()
self.assertFalse(f.done())
sem.release()
self.assertTrue(f.done())
@gen_test
def test_acquire_timeout_preempted(self):
sem = locks.Semaphore(1)
yield sem.acquire()
# This fires before the wait times out.
self.io_loop.call_later(0.01, sem.release)
acquire = sem.acquire(timedelta(seconds=0.02))
yield gen.sleep(0.03)
yield acquire # No TimeoutError.
def test_release_unacquired(self):
# Unbounded releases are allowed, and increment the semaphore's value.
sem = locks.Semaphore()
sem.release()
sem.release()
# Now the counter is 3. We can acquire three times before blocking.
self.assertTrue(sem.acquire().done())
self.assertTrue(sem.acquire().done())
self.assertTrue(sem.acquire().done())
self.assertFalse(sem.acquire().done())
@gen_test
def test_garbage_collection(self):
# Test that timed-out waiters are occasionally cleaned from the queue.
sem = locks.Semaphore(value=0)
futures = [sem.acquire(timedelta(seconds=0.01)) for _ in range(101)]
future = sem.acquire()
self.assertEqual(102, len(sem._waiters))
# Let first 101 waiters time out, triggering a collection.
yield gen.sleep(0.02)
self.assertEqual(1, len(sem._waiters))
# Final waiter is still active.
self.assertFalse(future.done())
sem.release()
self.assertTrue(future.done())
# Prevent "Future exception was never retrieved" messages.
for future in futures:
self.assertRaises(TimeoutError, future.result)
class SemaphoreContextManagerTest(AsyncTestCase):
@gen_test
def test_context_manager(self):
sem = locks.Semaphore()
with (yield sem.acquire()) as yielded:
self.assertTrue(yielded is None)
# Semaphore was released and can be acquired again.
self.assertTrue(sem.acquire().done())
@skipBefore35
@gen_test
def test_context_manager_async_await(self):
# Repeat the above test using 'async with'.
sem = locks.Semaphore()
namespace = exec_test(globals(), locals(), """
async def f():
async with sem as yielded:
self.assertTrue(yielded is None)
""")
yield namespace['f']()
# Semaphore was released and can be acquired again.
self.assertTrue(sem.acquire().done())
@gen_test
def test_context_manager_exception(self):
sem = locks.Semaphore()
with self.assertRaises(ZeroDivisionError):
with (yield sem.acquire()):
1 / 0
# Semaphore was released and can be acquired again.
self.assertTrue(sem.acquire().done())
@gen_test
def test_context_manager_timeout(self):
sem = locks.Semaphore()
with (yield sem.acquire(timedelta(seconds=0.01))):
pass
# Semaphore was released and can be acquired again.
self.assertTrue(sem.acquire().done())
@gen_test
def test_context_manager_timeout_error(self):
sem = locks.Semaphore(value=0)
with self.assertRaises(gen.TimeoutError):
with (yield sem.acquire(timedelta(seconds=0.01))):
pass
# Counter is still 0.
self.assertFalse(sem.acquire().done())
@gen_test
def test_context_manager_contended(self):
sem = locks.Semaphore()
history = []
@gen.coroutine
def f(index):
with (yield sem.acquire()):
history.append('acquired %d' % index)
yield gen.sleep(0.01)
history.append('release %d' % index)
yield [f(i) for i in range(2)]
expected_history = []
for i in range(2):
expected_history.extend(['acquired %d' % i, 'release %d' % i])
self.assertEqual(expected_history, history)
@gen_test
def test_yield_sem(self):
# Ensure we catch a "with (yield sem)", which should be
# "with (yield sem.acquire())".
with self.assertRaises(gen.BadYieldError):
with (yield locks.Semaphore()):
pass
def test_context_manager_misuse(self):
# Ensure we catch a "with sem", which should be
# "with (yield sem.acquire())".
with self.assertRaises(RuntimeError):
with locks.Semaphore():
pass
class BoundedSemaphoreTest(AsyncTestCase):
def test_release_unacquired(self):
sem = locks.BoundedSemaphore()
self.assertRaises(ValueError, sem.release)
# Value is 0.
sem.acquire()
# Block on acquire().
future = sem.acquire()
self.assertFalse(future.done())
sem.release()
self.assertTrue(future.done())
# Value is 1.
sem.release()
self.assertRaises(ValueError, sem.release)
class LockTests(AsyncTestCase):
def test_repr(self):
lock = locks.Lock()
# No errors.
repr(lock)
lock.acquire()
repr(lock)
def test_acquire_release(self):
lock = locks.Lock()
self.assertTrue(lock.acquire().done())
future = lock.acquire()
self.assertFalse(future.done())
lock.release()
self.assertTrue(future.done())
@gen_test
def test_acquire_fifo(self):
lock = locks.Lock()
self.assertTrue(lock.acquire().done())
N = 5
history = []
@gen.coroutine
def f(idx):
with (yield lock.acquire()):
history.append(idx)
futures = [f(i) for i in range(N)]
self.assertFalse(any(future.done() for future in futures))
lock.release()
yield futures
self.assertEqual(list(range(N)), history)
@skipBefore35
@gen_test
def test_acquire_fifo_async_with(self):
# Repeat the above test using `async with lock:`
# instead of `with (yield lock.acquire()):`.
lock = locks.Lock()
self.assertTrue(lock.acquire().done())
N = 5
history = []
namespace = exec_test(globals(), locals(), """
async def f(idx):
async with lock:
history.append(idx)
""")
futures = [namespace['f'](i) for i in range(N)]
lock.release()
yield futures
self.assertEqual(list(range(N)), history)
@gen_test
def test_acquire_timeout(self):
lock = locks.Lock()
lock.acquire()
with self.assertRaises(gen.TimeoutError):
yield lock.acquire(timeout=timedelta(seconds=0.01))
# Still locked.
self.assertFalse(lock.acquire().done())
def test_multi_release(self):
lock = locks.Lock()
self.assertRaises(RuntimeError, lock.release)
lock.acquire()
lock.release()
self.assertRaises(RuntimeError, lock.release)
@gen_test
def test_yield_lock(self):
# Ensure we catch a "with (yield lock)", which should be
# "with (yield lock.acquire())".
with self.assertRaises(gen.BadYieldError):
with (yield locks.Lock()):
pass
def test_context_manager_misuse(self):
# Ensure we catch a "with lock", which should be
# "with (yield lock.acquire())".
with self.assertRaises(RuntimeError):
with locks.Lock():
pass
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
hujiajie/pa-chromium | chrome/test/functional/media/audio_tools.py | 56 | 6590 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Audio tools for recording and analyzing audio.
The audio tools provided here are mainly to:
- record playing audio.
- remove silence from beginning and end of audio file.
- compare audio files using PESQ tool.
The tools are supported on Windows and Linux.
"""
import commands
import ctypes
import logging
import os
import re
import subprocess
import sys
import threading
import time
import pyauto_media
import pyauto
_TOOLS_PATH = os.path.abspath(os.path.join(pyauto.PyUITest.DataDir(),
'pyauto_private', 'media', 'tools'))
WINDOWS = 'win32' in sys.platform
if WINDOWS:
_PESQ_PATH = os.path.join(_TOOLS_PATH, 'pesq.exe')
_SOX_PATH = os.path.join(_TOOLS_PATH, 'sox.exe')
_AUDIO_RECORDER = r'SoundRecorder.exe'
_FORCE_MIC_VOLUME_MAX_UTIL = os.path.join(_TOOLS_PATH,
r'force_mic_volume_max.exe')
else:
_PESQ_PATH = os.path.join(_TOOLS_PATH, 'pesq')
_SOX_PATH = commands.getoutput('which sox')
_AUDIO_RECORDER = commands.getoutput('which arecord')
_PACMD_PATH = commands.getoutput('which pacmd')
class AudioRecorderThread(threading.Thread):
"""A thread that records audio out of the default audio output."""
def __init__(self, duration, output_file, record_mono=False):
threading.Thread.__init__(self)
self.error = ''
self._duration = duration
self._output_file = output_file
self._record_mono = record_mono
def run(self):
"""Starts audio recording."""
if WINDOWS:
if self._record_mono:
logging.error("Mono recording not supported on Windows yet!")
duration = time.strftime('%H:%M:%S', time.gmtime(self._duration))
cmd = [_AUDIO_RECORDER, '/FILE', self._output_file, '/DURATION',
duration]
# This is needed to run SoundRecorder.exe on Win-64 using Python-32 bit.
ctypes.windll.kernel32.Wow64DisableWow64FsRedirection(
ctypes.byref(ctypes.c_long()))
else:
num_channels = 1 if self._record_mono else 2
cmd = [_AUDIO_RECORDER, '-d', self._duration, '-f', 'dat', '-c',
str(num_channels), self._output_file]
cmd = [str(s) for s in cmd]
logging.debug('Running command: %s', ' '.join(cmd))
returncode = subprocess.call(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if returncode != 0:
self.error = 'Failed to record audio.'
else:
logging.debug('Finished recording audio into %s.', self._output_file)
def RunPESQ(audio_file_ref, audio_file_test, sample_rate=16000):
"""Runs PESQ to compare audio test file to a reference audio file.
Args:
audio_file_ref: The reference audio file used by PESQ.
audio_file_test: The audio test file to compare.
sample_rate: Sample rate used by PESQ algorithm, possible values are only
8000 or 16000.
Returns:
A tuple of float values representing PESQ scores of the audio_file_ref and
audio_file_test consecutively.
"""
# Work around a bug in PESQ when the ref file path is > 128 chars. PESQ will
# compute an incorrect score then (!), and the relative path to the ref file
# should be a lot shorter than the absolute one.
audio_file_ref = os.path.relpath(audio_file_ref)
cmd = [_PESQ_PATH, '+%d' % sample_rate, audio_file_ref, audio_file_test]
logging.debug('Running command: %s', ' '.join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
logging.error('Error running pesq: %s\n%s', output, error)
# Last line of PESQ output shows the results. Example:
# P.862 Prediction (Raw MOS, MOS-LQO): = 4.180 4.319
result = re.search('Prediction.*= (\d{1}\.\d{3})\t(\d{1}\.\d{3})',
output)
if not result or len(result.groups()) != 2:
return None
return (float(result.group(1)), float(result.group(2)))
def RemoveSilence(input_audio_file, output_audio_file):
"""Removes silence from beginning and end of the input_audio_file.
Args:
input_audio_file: The audio file to remove silence from.
output_audio_file: The audio file to save the output audio.
"""
# SOX documentation for silence command: http://sox.sourceforge.net/sox.html
# To remove the silence from both beginning and end of the audio file, we call
# sox silence command twice: once on normal file and again on its reverse,
# then we reverse the final output.
# Silence parameters are (in sequence):
# ABOVE_PERIODS: The period for which silence occurs. Value 1 is used for
# silence at beginning of audio.
# DURATION: the amount of time in seconds that non-silence must be detected
# before sox stops trimming audio.
# THRESHOLD: value used to indicate what sample value is treates as silence.
ABOVE_PERIODS = '1'
DURATION = '2'
THRESHOLD = '5%'
cmd = [_SOX_PATH, input_audio_file, output_audio_file, 'silence',
ABOVE_PERIODS, DURATION, THRESHOLD, 'reverse', 'silence',
ABOVE_PERIODS, DURATION, THRESHOLD, 'reverse']
logging.debug('Running command: %s', ' '.join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
logging.error('Error removing silence from audio: %s\n%s', output, error)
def ForceMicrophoneVolumeTo100Percent():
if WINDOWS:
# The volume max util is implemented in WebRTC in
# webrtc/tools/force_mic_volume_max/force_mic_volume_max.cc.
if not os.path.exists(_FORCE_MIC_VOLUME_MAX_UTIL):
raise Exception('Missing required binary %s.' %
_FORCE_MIC_VOLUME_MAX_UTIL)
cmd = [_FORCE_MIC_VOLUME_MAX_UTIL]
else:
# The recording device id is machine-specific. We assume here it is called
# Monitor of render (which corresponds to the id render.monitor). You can
# list the available recording devices with pacmd list-sources.
RECORDING_DEVICE_ID = 'render.monitor'
HUNDRED_PERCENT_VOLUME = '65536'
cmd = [_PACMD_PATH, 'set-source-volume', RECORDING_DEVICE_ID,
HUNDRED_PERCENT_VOLUME]
logging.debug('Running command: %s', ' '.join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
logging.error('Error forcing mic volume to 100%%: %s\n%s', output, error)
| bsd-3-clause |
bobthechemist/bustracker | weather.py | 1 | 2122 | import requests
from xml.etree import ElementTree
from Adafruit_CharLCDPlate import Adafruit_CharLCDPlate
import time
def getWeatherMessages():
'Grabs weather information from NOAH and returns a some information about the forecast'
weather_url = 'http://graphical.weather.gov/xml/SOAP_server/ndfdXMLclient.php?whichClient=NDFDgen&lat=38.99&lon=-77.01&product=time-series&begin=2004-01-01T00%3A00%3A00&end=2019-08-01T00%3A00%3A00&Unit=e&maxt=maxt&mint=mint&temp=temp&wx=wx&tmpabv14d=tmpabv14d&tmpblw14d=tmpblw14d&prcpabv14d=prcpabv14d&prcpblw14d=prcpblw14d&precipa_r=precipa_r&sky_r=sky_r&temp_r=temp_r&Submit=Submit'
npage = requests.get(weather_url).content
ntree = ElementTree.fromstring(npage)
weatherMessages = ["Today's Weather"]
# First, get the max/min temps for the day
tmax = 'N/A'
tmin = 'N/A'
for i in ntree.find('data').find('parameters').findall('temperature'):
if i.attrib['type']=='maximum':
tmax = i[1].text
if i.attrib['type']=='minimum':
tmin = i[1].text
weatherMessages.append("Hi:" + tmax + " Lo:" + tmin)
# Find the first weather forecast - return the element
for i in ntree.find('data').find('parameters').find('weather').iter('weather-conditions'):
itemp = i.getchildren()
if len(itemp)>0:
forecast = itemp
break
# Now make the forecast results user friendly.
for i in forecast:
adj = ''
if i.attrib['intensity'] != 'none':
adj = i.attrib['intensity'] + " "
weatherMessages.append(i.attrib['coverage']+' of '+adj+i.attrib['weather-type'])
# Return the list of weather messages
return weatherMessages
def printWeather(fordisplay):
for i in fordisplay:
lcd.clear()
if len(i)>16:
if len(i)>32:
lcd.message(i[0:15]+"\n"+i[16:31])
time.sleep(3)
lcd.clear()
lcd.message(i[32:-1])
else:
lcd.message(i[0:15]+"\n"+i[16:-1])
else:
lcd.message(i)
time.sleep(3)
# Start LCD Panel
lcd = Adafruit_CharLCDPlate(busnum = 1)
lcd.clear()
lcd.message('LCD activated')
time.sleep(3)
printWeather(getWeatherMessages())
lcd.clear()
| gpl-2.0 |
will-Do/tp-libvirt_v2v | libvirt/tests/src/virsh_cmd/pool/virsh_pool_create.py | 1 | 5240 | import os
import logging
from autotest.client.shared import error
from autotest.client import utils
from virttest import virsh
from virttest import xml_utils
from virttest import libvirt_storage
from virttest import libvirt_xml
from virttest.utils_test import libvirt as utlv
from provider import libvirt_version
def run(test, params, env):
"""
Test command: virsh pool-create.
Create a libvirt pool from an XML file. The file could be given by tester or
generated by dumpxml a pre-defined pool.
"""
pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
option = params.get("pool_create_extra_option", "")
readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no")
status_error = "yes" == params.get("status_error", "no")
pre_def_pool = "yes" == params.get("pre_def_pool", "no")
pool_type = params.get("pool_type", "dir")
source_format = params.get("pool_src_format", "")
source_name = params.get("pool_source_name", "")
source_path = params.get("pool_source_path", "/")
pool_target = params.get("pool_create_target", "pool_target")
duplicate_element = params.get("pool_create_duplicate_element", "")
new_pool_name = params.get("new_pool_create_name")
if not libvirt_version.version_compare(1, 0, 0):
if pool_type == "gluster":
raise error.TestNAError("Gluster pool is not supported in current"
" libvirt version.")
if "/PATH/TO/POOL.XML" in pool_xml_f:
raise error.TestNAError("Please replace %s with valid pool xml file" %
pool_xml_f)
pool_ins = libvirt_storage.StoragePool()
if pre_def_pool and pool_ins.pool_exists(pool_name):
raise error.TestFail("Pool %s already exist" % pool_name)
emulated_image = "emulated_image"
kwargs = {'image_size': '1G', 'source_path': source_path,
'source_name': source_name, 'source_format': source_format}
pvt = utlv.PoolVolumeTest(test, params)
old_uuid = None
if pre_def_pool:
try:
pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
**kwargs)
virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
old_uuid = virsh.pool_uuid(pool_name).stdout.strip()
if duplicate_element == "name":
pass
elif duplicate_element == "uuid":
# Remove <uuid>
cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f
utils.run(cmd)
elif duplicate_element == "source":
# Remove <uuid> and update <name>
cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f
utils.run(cmd)
cmd = "sed -i 's/<name>.*<\/name>/<name>%s<\/name>/g' %s" % (new_pool_name, pool_xml_f)
utils.run(cmd)
else:
# The transient pool will gone after destroyed
virsh.pool_destroy(pool_name)
new_source_format = params.get("new_pool_src_format")
if new_source_format:
cmd = "sed -i s/type=\\\'%s\\\'/type=\\\'%s\\\'/g %s" % (
source_format, new_source_format, pool_xml_f)
utils.run(cmd)
except Exception, e:
pvt.cleanup_pool(pool_name, pool_type, pool_target,
emulated_image, **kwargs)
raise error.TestError("Error occurred when prepare pool xml:\n %s"
% e)
# Create an invalid pool xml file
if pool_xml_f == "invalid-pool-xml":
tmp_xml_f = xml_utils.TempXMLFile()
tmp_xml_f.write('"<pool><<<BAD>>><\'XML</name\>'
'!@#$%^&*)>(}>}{CORRUPTE|>!</pool>')
tmp_xml_f.flush()
pool_xml_f = tmp_xml_f.name
# Readonly mode
ro_flag = False
if readonly_mode:
logging.debug("Readonly mode test")
ro_flag = True
# Run virsh test
if os.path.exists(pool_xml_f):
f = open(pool_xml_f, 'r')
try:
logging.debug("Create pool from file:\n %s", f.read())
finally:
f.close()
try:
cmd_result = virsh.pool_create(pool_xml_f, option, ignore_status=True,
debug=True, readonly=ro_flag)
err = cmd_result.stderr.strip()
status = cmd_result.exit_status
if not status_error:
if status:
raise error.TestFail(err)
utlv.check_actived_pool(pool_name)
pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
logging.debug("Pool detail: %s", pool_detail)
if pool_detail['uuid'] == old_uuid:
error.TestFail("New created pool still use the old UUID %s"
% old_uuid)
elif status_error and status == 0:
raise error.TestFail("Expect fail, but run successfully.")
finally:
pvt.cleanup_pool(pool_name, pool_type, pool_target,
emulated_image, **kwargs)
if os.path.exists(pool_xml_f):
os.remove(pool_xml_f)
| gpl-2.0 |
matt-deboer/marathon-lb | tests/test_utils.py | 3 | 14884 | import unittest
from mock import Mock, patch
from common import cleanup_json
import utils
from utils import ServicePortAssigner
class TestUtils(unittest.TestCase):
def test_get_task_ip_and_ports_ip_per_task(self):
app = {
"ipAddress": {
"discovery": {
"ports": [{"number": 123}, {"number": 234}]
}
},
}
task = {
"id": "testtaskid",
"ipAddresses": [{"ipAddress": "1.2.3.4"}]
}
result = utils.get_task_ip_and_ports(app, task)
expected = ("1.2.3.4", [123, 234])
self.assertEquals(result, expected)
def test_get_task_ip_and_ports_ip_per_task_no_ip(self):
app = {
"ipAddress": {
"discovery": {
"ports": [{"number": 123}, {"number": 234}]
}
},
}
task = {
"id": "testtaskid"
}
result = utils.get_task_ip_and_ports(app, task)
expected = (None, None)
self.assertEquals(result, expected)
def test_get_task_ip_and_ports_ip_per_task_marathon13(self):
app = {
'ipAddress': {},
'container': {
'type': 'DOCKER',
'docker': {
'network': 'USER',
'portMappings': [
{
'containerPort': 80,
'servicePort': 10000,
},
{
'containerPort': 81,
'servicePort': 10001,
},
],
},
},
}
task = {
"id": "testtaskid",
"ipAddresses": [{"ipAddress": "1.2.3.4"}]
}
result = utils.get_task_ip_and_ports(app, task)
expected = ("1.2.3.4", [80, 81])
self.assertEquals(result, expected)
def test_get_task_ip_and_ports_ip_per_task_no_ip_marathon13(self):
app = {
'ipAddress': {},
'container': {
'type': 'DOCKER',
'docker': {
'network': 'USER',
'portMappings': [
{
'containerPort': 80,
'servicePort': 10000,
},
{
'containerPort': 81,
'servicePort': 10001,
},
],
},
},
}
task = {
"id": "testtaskid",
}
result = utils.get_task_ip_and_ports(app, task)
expected = (None, None)
self.assertEquals(result, expected)
def test_get_task_ip_and_ports_ip_per_task_marathon15(self):
app = {
'container': {
'type': 'DOCKER',
'docker': {
'image': 'nginx'
},
'portMappings': [
{
'containerPort': 80,
'servicePort': 10000,
},
{
'containerPort': 81,
'servicePort': 10001,
},
]
},
'networks': [
{
'mode': 'container',
'name': 'dcos'
}
]
}
task = {
"id": "testtaskid",
"ipAddresses": [{"ipAddress": "1.2.3.4"}]
}
result = utils.get_task_ip_and_ports(app, task)
expected = ("1.2.3.4", [80, 81])
self.assertEquals(result, expected)
task_no_ip = {
"id": "testtaskid",
}
result = utils.get_task_ip_and_ports(app, task_no_ip)
expected = (None, None)
self.assertEquals(result, expected)
def test_get_task_ip_and_ports_portmapping_null(self):
app = {
'ipAddress': {},
'container': {
'type': 'DOCKER',
'docker': {
'network': 'USER',
'portMappings': [{
}]
},
},
}
task = {
"id": "testtaskid",
}
result = utils.get_task_ip_and_ports(app, task)
expected = (None, None)
self.assertEquals(result, expected)
def test_get_task_ip_and_ports_port_map(self):
app = {}
task = {
"id": "testtaskid",
"ports": [234, 345, 567],
"host": "agent1"
}
with patch("utils.resolve_ip", return_value="1.2.3.4"):
result = utils.get_task_ip_and_ports(app, task)
expected = ("1.2.3.4", [234, 345, 567])
self.assertEquals(result, expected)
def test_get_task_ip_and_ports_port_map_no_ip(self):
app = {}
task = {
"id": "testtaskid",
"ports": [234, 345, 567],
"host": "agent1"
}
with patch("utils.resolve_ip", return_value=None):
result = utils.get_task_ip_and_ports(app, task)
expected = (None, None)
self.assertEquals(result, expected)
class TestServicePortAssigner(unittest.TestCase):
def setUp(self):
self.assigner = ServicePortAssigner()
self.assigner.set_ports(10000, 10020)
def test_no_assignment_ports_not_set(self):
"""
Test that no assignments are made if the port values are not set.
"""
assigner = ServicePortAssigner()
app = _get_app(idx=1, num_ports=3, num_tasks=1)
# No ports set
self.assertEquals(assigner.get_service_ports(app), [])
def test_not_ip_per_task(self):
"""
Test a non-IP-per-task app returns the service ports defined in the
app data.
"""
app = _get_app(ip_per_task=False, inc_service_ports=True)
self.assertEquals(self.assigner.get_service_ports(app),
[100, 101, 102])
def test_ip_per_task_with_ports(self):
"""
Test an IP-per-task app returns the service ports defined in the
app data.
"""
app = _get_app(ip_per_task=True, inc_service_ports=True)
self.assertEquals(self.assigner.get_service_ports(app),
[100, 101, 102])
def test_ip_per_task_no_clash(self):
"""
Check that the same ports are assigned are assigned for task-per-IP
apps and are based on the number of host ports but not the actual
ports themselves.
"""
# When assigning a single port for apps with index 1 and 2 there are
# no clashes.
app1 = _get_app(idx=1, num_ports=1, num_tasks=1)
app2 = _get_app(idx=2, num_ports=1, num_tasks=1)
# Store the ports assigned for app1 and app2
ports1 = self.assigner.get_service_ports(app1)
ports2 = self.assigner.get_service_ports(app2)
# Check we get returned the same ports.
self.assertEquals(ports2, self.assigner.get_service_ports(app2))
self.assertEquals(ports1, self.assigner.get_service_ports(app1))
# Now reset the assigner, and assign in a different order. Check the
# ports are still the same.
self.assigner.reset()
self.assertEquals(ports2, self.assigner.get_service_ports(app2))
self.assertEquals(ports1, self.assigner.get_service_ports(app1))
def test_ip_per_task_clash(self):
"""
Check that the same ports will not be assigned if there are clashes
and we assign in a different order.
"""
# When assigning 5 ports for apps with index 1 and 3 there are
# clashes.
app1 = _get_app(idx=1, num_ports=5, num_tasks=1)
app2 = _get_app(idx=3, num_ports=5, num_tasks=1)
# Store the ports assigned for app1 and app2
ports1 = self.assigner.get_service_ports(app1)
ports2 = self.assigner.get_service_ports(app2)
# Check we get returned the same ports.
self.assertEquals(ports2, self.assigner.get_service_ports(app2))
self.assertEquals(ports1, self.assigner.get_service_ports(app1))
# Now reset the assigner, and assign in a different order. Check the
# ports are not the same.
self.assigner.reset()
self.assertNotEquals(ports2, self.assigner.get_service_ports(app2))
self.assertNotEquals(ports1, self.assigner.get_service_ports(app1))
def test_ip_per_task_max_clash(self):
"""
Check that ports are assigned by linear scan when we max out the
clashes.
"""
app = _get_app(idx=1, num_ports=10, num_tasks=1)
# Mock out the hashlib functions so that all hashes return 0.
sha1 = Mock()
sha1.hexdigest.return_value = "0" * 64
with patch("hashlib.sha1", return_value=sha1):
ports = self.assigner.get_service_ports(app)
self.assertEquals(ports, list(range(10000, 10010)))
def test_ip_per_task_exhausted(self):
"""
Check that ports are returned as None when the ports list is
exhausted.
"""
# Create an app with 2 more discovery ports than we are able to
# allocate. Check the last two ports are unassigned, and check all
# ports are allocated from the correct range.
app = _get_app(idx=1, num_ports=24, num_tasks=1)
ports = self.assigner.get_service_ports(app)
self.assertEquals(ports[-3:], [None] * 3)
self.assertEquals(sorted(ports[:-3]), list(range(10000, 10021)))
def test_ip_per_task_marathon13(self):
app = {
'ipAddress': {},
'container': {
'type': 'DOCKER',
'docker': {
'network': 'USER',
'portMappings': [
{
'containerPort': 80,
'servicePort': 10000,
},
{
'containerPort': 81,
'servicePort': 10001,
},
],
},
},
'tasks': [{
"id": "testtaskid",
"ipAddresses": [{"ipAddress": "1.2.3.4"}]
}],
}
self.assertEquals(self.assigner.get_service_ports(app),
[10000, 10001])
def test_ip_per_task_marathon15(self):
app = {
'container': {
'type': 'DOCKER',
'docker': {
'image': 'nginx'
},
'portMappings': [
{
'containerPort': 80,
'servicePort': 10000,
},
{
'containerPort': 81,
'servicePort': 10001,
},
],
},
'networks': [
{
'mode': 'container',
'name': 'dcos'
}
],
'tasks': [{
"id": "testtaskid",
"ipAddresses": [{"ipAddress": "1.2.3.4"}]
}],
}
self.assertEquals(self.assigner.get_service_ports(app),
[10000, 10001])
def test_ip_per_task_portMappings_empty(self):
app = {
'ipAddress': {
'networkName': 'testnet',
'discovery': {
'ports': []
}
},
'container': {
'type': 'DOCKER',
'docker': {
'network': 'USER',
'portMappings': [],
}
},
'tasks': [
{
'id': 'testtaskid',
'ipAddresses': [{'ipAddress': '1.2.3.4'}],
'ports': [],
'host': '4.3.2.1'
}
]
}
self.assertEquals(self.assigner.get_service_ports(app), [])
def test_ip_per_task_portMappings_null(self):
app = {
'ipAddress': {},
'container': {
'type': 'DOCKER',
'docker': {
'network': 'USER',
'portMappings': None,
},
},
'tasks': [{
"id": "testtaskid",
"ipAddresses": [{"ipAddress": "1.2.3.4"}]
}],
"portDefinitions": [
{
'port': 10000,
},
{
'port': 10001,
},
],
}
# Calling cleanup_json because all entrypoints to get_service_ports
# also call cleanup_json, so None isn't expected at runtime
self.assertEquals(self.assigner.get_service_ports(cleanup_json(app)),
[10000, 10001])
def test_ip_per_task_portMappings_null_marathon15(self):
app = {
'container': {
'type': 'DOCKER',
'docker': {
'image': 'nginx'
},
'portMappings': None
},
'networks': [
{
'mode': 'container',
'name': 'dcos'
}
],
'tasks': [{
"id": "testtaskid",
"ipAddresses": [{"ipAddress": "1.2.3.4"}]
}],
}
# Calling cleanup_json because all entrypoints to get_service_ports
# also call cleanup_json, so None isn't expected at runtime
self.assertEquals(self.assigner.get_service_ports(cleanup_json(app)),
[])
def _get_app(idx=1, num_ports=3, num_tasks=1, ip_per_task=True,
inc_service_ports=False):
app = {
"id": "app-%d" % idx,
"tasks": [_get_task(idx*10 + idx2) for idx2 in range(num_tasks)],
"portDefinitions": [],
"ipAddress": None,
}
if inc_service_ports:
app["portDefinitions"] = \
[{'port': p} for p in list(range(100, 100 + num_ports))]
if ip_per_task:
app["ipAddress"] = {
"discovery": {
"ports": [
{"number": port} for port in range(500, 500 + num_ports)
]
}
}
return app
def _get_task(idx):
return {
"id": "task-%d" % idx,
"ipAddresses": [{"ipAddress": "1.2.3.4"}]
}
| apache-2.0 |
40223145c2g18/c2g18 | w2/static/Brython2.0.0-20140209-164925/Lib/heapq.py | 208 | 17997 | """Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
from itertools import islice, count, tee, chain
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
else:
returnitem = lastelt
return returnitem
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappushpop_max(heap, item):
"""Maxheap version of a heappush followed by a heappop."""
if heap and item < heap[0]:
item, heap[0] = heap[0], item
_siftup_max(heap, 0)
return item
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, reverse=True)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
heapify(result)
_heappushpop = heappushpop
for elem in it:
_heappushpop(result, elem)
result.sort(reverse=True)
return result
def nsmallest(n, iterable):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
_heapify_max(result)
_heappushpop = _heappushpop_max
for elem in it:
_heappushpop(result, elem)
result.sort()
return result
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
# If available, use C implementation
try:
from _heapq import *
except ImportError:
pass
def merge(*iterables):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
'''
_heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
_len = len
h = []
h_append = h.append
for itnum, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
h_append([next(), itnum, next])
except _StopIteration:
pass
heapify(h)
while _len(h) > 1:
try:
while True:
v, itnum, next = s = h[0]
yield v
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
v, itnum, next = h[0]
yield v
yield from next.__self__
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [min(chain(head, it))]
return [min(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count()) # decorate
result = _nsmallest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(), in2) # decorate
result = _nsmallest(n, it)
return [r[2] for r in result] # undecorate
_nlargest = nlargest
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [max(chain(head, it))]
return [max(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count(0,-1)) # decorate
result = _nlargest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(0,-1), in2) # decorate
result = _nlargest(n, it)
return [r[2] for r in result] # undecorate
if __name__ == "__main__":
# Simple sanity test
heap = []
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
heappush(heap, item)
sort = []
while heap:
sort.append(heappop(heap))
print(sort)
import doctest
doctest.testmod()
| gpl-2.0 |
WtfJoke/rtc2git | configuration.py | 2 | 12775 | import configparser
import os
import shlex
import shutil
import shell
import shouter
config = None
configfile = None
user = None
password = None
stored = None
def read(configname=None):
if not configname:
global configfile
configname = configfile
parsedconfig = configparser.ConfigParser()
if len(parsedconfig.read(configname)) < 1:
raise IOError('unable to read %s' % configname)
generalsection = parsedconfig['General']
migrationsectionname = 'Migration'
migrationsection = parsedconfig[migrationsectionname]
miscsectionname = 'Miscellaneous'
global user
if not user and not stored:
user = generalsection['User']
global password
if not password and not stored:
password = generalsection['Password']
repositoryurl = generalsection['Repo']
scmcommand = generalsection.get('ScmCommand', "lscm")
shell.logcommands = parsedconfig.get(miscsectionname, 'LogShellCommands', fallback="False") == "True"
shell.setencoding(generalsection.get('encoding'))
rtcversion = generalsection.get('RTCVersion', "5");
workspace = shlex.quote(generalsection['WorkspaceName'])
gitreponame = generalsection['GIT-Reponame']
useexistingworkspace = generalsection.get('useExistingWorkspace', "False")
useprovidedhistory = migrationsection.get('UseProvidedHistory', "False")
useautomaticconflictresolution = migrationsection.get('UseAutomaticConflictResolution', "False")
maxchangesetstoaccepttogether = migrationsection.get('MaxChangeSetsToAcceptTogether', "10")
workdirectory = generalsection.get('Directory', os.getcwd())
streamname = shlex.quote(migrationsection['StreamToMigrate'].strip())
previousstreamname = migrationsection.get('PreviousStream', '').strip()
baselines = getinitialcomponentbaselines(migrationsection.get('InitialBaseLines'))
ignorefileextensionsproperty = parsedconfig.get(miscsectionname, 'IgnoreFileExtensions', fallback='')
ignorefileextensions = parsesplittedproperty(ignorefileextensionsproperty)
ignoredirectoriessproperty = parsedconfig.get(miscsectionname, 'IgnoreDirectories', fallback='')
ignoredirectories = parsesplittedproperty(ignoredirectoriessproperty)
includecomponentroots = parsedconfig.get(miscsectionname, 'IncludeComponentRoots', fallback="False")
commitmessageprefix = migrationsection.get('CommitMessageWorkItemPrefix', "")
gitattributesproperty = parsedconfig.get(migrationsectionname, 'Gitattributes', fallback='')
gitattributes = parsesplittedproperty(gitattributesproperty)
configbuilder = Builder().setuser(user).setpassword(password).setstored(stored).setrepourl(repositoryurl)
configbuilder.setscmcommand(scmcommand).setrtcversion(rtcversion)
configbuilder.setworkspace(workspace).setgitreponame(gitreponame).setrootfolder(os.getcwd())
configbuilder.setuseexistingworkspace(useexistingworkspace).setuseprovidedhistory(useprovidedhistory)
configbuilder.setuseautomaticconflictresolution(useautomaticconflictresolution)
configbuilder.setmaxchangesetstoaccepttogether(maxchangesetstoaccepttogether)
configbuilder.setworkdirectory(workdirectory).setstreamname(streamname).setinitialcomponentbaselines(baselines)
configbuilder.setpreviousstreamname(previousstreamname)
configbuilder.setignorefileextensions(ignorefileextensions)
configbuilder.setignoredirectories(ignoredirectories)
configbuilder.setincludecomponentroots(includecomponentroots).setcommitmessageprefix(commitmessageprefix)
configbuilder.setgitattributes(gitattributes)
global config
config = configbuilder.build()
return config
def get():
if not config:
read()
return config
def setconfigfile(newconfigfile):
global configfile
configfile = newconfigfile
def setUser(newuser):
global user
user = newuser
def setPassword(newpassword):
global password
password = newpassword
def setStored(newstored):
global stored
stored = newstored
def getinitialcomponentbaselines(definedbaselines):
initialcomponentbaselines = []
if definedbaselines:
componentbaselines = definedbaselines.split(",")
for entry in componentbaselines:
componentbaseline = entry.split("=")
component = componentbaseline[0].strip()
baseline = componentbaseline[1].strip()
initialcomponentbaselines.append(ComponentBaseLineEntry(component, baseline, component, baseline))
return initialcomponentbaselines
def parsesplittedproperty(property, separator=';'):
"""
:param property
:return: a list single properties, possibly empty
"""
properties = []
if property and len(property) > 0:
for splittedproperty in property.split(separator):
properties.append(splittedproperty.strip())
return properties
class Builder:
def __init__(self):
self.user = ""
self.password = ""
self.stored = False
self.repourl = ""
self.scmcommand = "lscm"
self.rtcversion = ""
self.workspace = ""
self.useexistingworkspace = ""
self.useprovidedhistory = ""
self.useautomaticconflictresolution = ""
self.maxchangesetstoaccepttogether = ""
self.workdirectory = os.path.dirname(os.path.realpath(__file__))
self.rootFolder = self.workdirectory
self.logFolder = self.rootFolder + os.sep + "Logs"
self.hasCreatedLogFolder = os.path.exists(self.logFolder)
self.initialcomponentbaselines = ""
self.streamname = ""
self.gitreponame = ""
self.clonedgitreponame = ""
self.previousstreamname = ""
self.ignorefileextensions = ""
self.ignoredirectories = ""
self.includecomponentroots = ""
self.commitmessageprefix = ""
self.gitattributes = ""
def setuser(self, user):
self.user = user
return self
def setpassword(self, password):
self.password = password
return self
def setstored(self, stored):
self.stored = stored
return self
def setrepourl(self, repourl):
self.repourl = repourl
return self
def setscmcommand(self, scmcommand):
self.scmcommand = scmcommand
return self
def setrtcversion(self, scmversion):
self.rtcversion = int(scmversion)
return self
def setworkspace(self, workspace):
self.workspace = workspace
return self
def setworkdirectory(self, workdirectory):
self.workdirectory = workdirectory
return self
def setrootfolder(self, rootfolder):
self.rootFolder = rootfolder
return self
def setlogfolder(self, logfolder):
self.logFolder = logfolder
return self
def setinitialcomponentbaselines(self, initialcomponentbaselines):
self.initialcomponentbaselines = initialcomponentbaselines
return self
def setstreamname(self, streamname):
self.streamname = streamname
return self
def setgitreponame(self, reponame):
self.gitreponame = reponame
self.clonedgitreponame = reponame[:-4] # cut .git
return self
def setuseexistingworkspace(self, useexistingworkspace):
self.useexistingworkspace = self.isenabled(useexistingworkspace)
return self
def setuseprovidedhistory(self, useprovidedhistory):
self.useprovidedhistory = self.isenabled(useprovidedhistory)
return self
def setuseautomaticconflictresolution(self, useautomaticconflictresolution):
self.useautomaticconflictresolution = self.isenabled(useautomaticconflictresolution)
return self
def setmaxchangesetstoaccepttogether(self, maxchangesetstoaccepttogether):
self.maxchangesetstoaccepttogether = int(maxchangesetstoaccepttogether)
return self
def setpreviousstreamname(self, previousstreamname):
self.previousstreamname = previousstreamname
return self
def setignorefileextensions(self, ignorefileextensions):
self.ignorefileextensions = ignorefileextensions
return self
def setignoredirectories(self, ignoreirectories):
self.ignoredirectories = ignoreirectories
return self
def setincludecomponentroots(self, includecomponentroots):
self.includecomponentroots = self.isenabled(includecomponentroots)
return self
def setcommitmessageprefix(self, commitmessageprefix):
self.commitmessageprefix = commitmessageprefix
return self
def setgitattributes(self, gitattributes):
self.gitattributes = gitattributes
return self
@staticmethod
def isenabled(stringwithbooleanexpression):
return stringwithbooleanexpression == "True"
def build(self):
return ConfigObject(self.user, self.password, self.stored, self.repourl, self.scmcommand, self.rtcversion,
self.workspace,
self.useexistingworkspace, self.workdirectory, self.initialcomponentbaselines,
self.streamname, self.gitreponame, self.useprovidedhistory,
self.useautomaticconflictresolution, self.maxchangesetstoaccepttogether, self.clonedgitreponame, self.rootFolder,
self.previousstreamname, self.ignorefileextensions, self.ignoredirectories, self.includecomponentroots,
self.commitmessageprefix, self.gitattributes)
class ConfigObject:
def __init__(self, user, password, stored, repourl, scmcommand, rtcversion, workspace, useexistingworkspace,
workdirectory,
initialcomponentbaselines, streamname, gitreponame, useprovidedhistory,
useautomaticconflictresolution, maxchangesetstoaccepttogether, clonedgitreponame, rootfolder, previousstreamname,
ignorefileextensions, ignoredirectories, includecomponentroots, commitmessageprefix, gitattributes):
self.user = user
self.password = password
self.stored = stored
self.repo = repourl
self.scmcommand = scmcommand
self.rtcversion = rtcversion
self.workspace = workspace
self.useexistingworkspace = useexistingworkspace
self.useprovidedhistory = useprovidedhistory
self.useautomaticconflictresolution = useautomaticconflictresolution
self.maxchangesetstoaccepttogether = maxchangesetstoaccepttogether
self.workDirectory = workdirectory
self.initialcomponentbaselines = initialcomponentbaselines
self.streamname = streamname
self.gitRepoName = gitreponame
self.clonedGitRepoName = clonedgitreponame
self.rootFolder = rootfolder
self.logFolder = rootfolder + os.sep + "Logs"
self.hasCreatedLogFolder = os.path.exists(self.logFolder)
self.streamuuid = ""
self.previousstreamname = previousstreamname
self.previousstreamuuid = ""
self.ignorefileextensions = ignorefileextensions
self.ignoredirectories = ignoredirectories
self.includecomponentroots = includecomponentroots
self.commitmessageprefix = commitmessageprefix
self.gitattributes = gitattributes
def getlogpath(self, filename):
if not self.hasCreatedLogFolder:
os.makedirs(self.logFolder)
self.hasCreatedLogFolder = True
return self.logFolder + os.sep + filename
def deletelogfolder(self):
if self.hasCreatedLogFolder:
shutil.rmtree(self.logFolder)
self.hasCreatedLogFolder = False
def gethistorypath(self, filename):
historypath = self.rootFolder + os.sep + "History"
return historypath + os.sep + filename
def collectstreamuuid(self, streamname):
if not streamname:
return
shouter.shout("Get UUID of configured stream " + streamname)
showuuidcommand = "%s --show-alias n --show-uuid y show attributes -r %s -w %s" % (
self.scmcommand, self.repo, streamname)
output = shell.getoutput(showuuidcommand)
splittedfirstline = output[0].split(" ")
streamuuid = splittedfirstline[0].strip()[1:-1]
return streamuuid
def collectstreamuuids(self):
self.streamuuid = self.collectstreamuuid(self.streamname)
self.previousstreamuuid = self.collectstreamuuid(self.previousstreamname)
class ComponentBaseLineEntry:
def __init__(self, component, baseline, componentname, baselinename):
self.component = component
self.baseline = baseline
self.componentname = componentname
self.baselinename = baselinename
| mit |
manishpatell/erpcustomizationssaiimpex123qwe | addons/project_timesheet/__init__.py | 441 | 1084 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project_timesheet
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kaiyuanheshang/zulip | provision.py | 65 | 4968 | import os
import sys
import logging
import platform
try:
import sh
except ImportError:
import pbs as sh
SUPPORTED_PLATFORMS = {
"Ubuntu": [
"trusty",
],
}
APT_DEPENDENCIES = {
"trusty": [
"closure-compiler",
"libffi-dev",
"memcached",
"rabbitmq-server",
"libldap2-dev",
"redis-server",
"postgresql-server-dev-all",
"libmemcached-dev",
"postgresql-9.3",
"python-dev",
"hunspell-en-us",
"nodejs",
"python-virtualenv",
"supervisor",
"git",
"npm",
"node-jquery",
"yui-compressor",
"puppet", # Used by lint-all
]
}
# TODO: backport node-{cssstyle,htmlparser2,nwmatcher} to trusty,
# so we can eliminate npm (above) and this section.
NPM_DEPENDENCIES = {
"trusty": [
"cssstyle",
"htmlparser2",
"nwmatcher",
]
}
VENV_PATH="/srv/zulip-venv"
ZULIP_PATH="/srv/zulip"
# tsearch-extras is an extension to postgres's built-in full-text search.
# TODO: use a real APT repository
TSEARCH_URL_BASE = "https://dl.dropboxusercontent.com/u/283158365/zuliposs/"
TSEARCH_PACKAGE_NAME = {
"trusty": "postgresql-9.3-tsearch-extras"
}
TSEARCH_VERSION = "0.1.2"
# TODO: this path is platform-specific!
TSEARCH_STOPWORDS_PATH = "/usr/share/postgresql/9.3/tsearch_data/"
REPO_STOPWORDS_PATH = os.path.join(
ZULIP_PATH,
"puppet",
"zulip",
"files",
"postgresql",
"zulip_english.stop",
)
LOUD = dict(_out=sys.stdout, _err=sys.stderr)
def main():
log = logging.getLogger("zulip-provisioner")
# TODO: support other architectures
if platform.architecture()[0] == '64bit':
arch = 'amd64'
else:
log.critical("Only amd64 is supported.")
vendor, version, codename = platform.dist()
if not (vendor in SUPPORTED_PLATFORMS and codename in SUPPORTED_PLATFORMS[vendor]):
log.critical("Unsupported platform: {} {}".format(vendor, codename))
with sh.sudo:
sh.apt_get.update(**LOUD)
sh.apt_get.install(*APT_DEPENDENCIES["trusty"], assume_yes=True, **LOUD)
temp_deb_path = sh.mktemp("package_XXXXXX.deb", tmpdir=True)
sh.wget(
"{}/{}_{}_{}.deb".format(
TSEARCH_URL_BASE,
TSEARCH_PACKAGE_NAME["trusty"],
TSEARCH_VERSION,
arch,
),
output_document=temp_deb_path,
**LOUD
)
with sh.sudo:
sh.dpkg("--install", temp_deb_path, **LOUD)
with sh.sudo:
PHANTOMJS_PATH = "/srv/phantomjs"
PHANTOMJS_TARBALL = os.path.join(PHANTOMJS_PATH, "phantomjs-1.9.8-linux-x86_64.tar.bz2")
sh.mkdir("-p", PHANTOMJS_PATH, **LOUD)
sh.wget("https://bitbucket.org/ariya/phantomjs/downloads/phantomjs-1.9.8-linux-x86_64.tar.bz2",
output_document=PHANTOMJS_TARBALL, **LOUD)
sh.tar("xj", directory=PHANTOMJS_PATH, file=PHANTOMJS_TARBALL, **LOUD)
sh.ln("-sf", os.path.join(PHANTOMJS_PATH, "phantomjs-1.9.8-linux-x86_64", "bin", "phantomjs"),
"/usr/local/bin/phantomjs", **LOUD)
with sh.sudo:
sh.rm("-rf", VENV_PATH, **LOUD)
sh.mkdir("-p", VENV_PATH, **LOUD)
sh.chown("{}:{}".format(os.getuid(), os.getgid()), VENV_PATH, **LOUD)
sh.virtualenv(VENV_PATH, **LOUD)
# Add the ./tools and ./scripts/setup directories inside the repository root to
# the system path; we'll reference them later.
orig_path = os.environ["PATH"]
os.environ["PATH"] = os.pathsep.join((
os.path.join(ZULIP_PATH, "tools"),
os.path.join(ZULIP_PATH, "scripts", "setup"),
orig_path
))
# Put Python virtualenv activation in our .bash_profile.
with open(os.path.expanduser('~/.bash_profile'), 'w+') as bash_profile:
bash_profile.writelines([
"source .bashrc\n",
"source %s\n" % (os.path.join(VENV_PATH, "bin", "activate"),),
])
# Switch current Python context to the virtualenv.
activate_this = os.path.join(VENV_PATH, "bin", "activate_this.py")
execfile(activate_this, dict(__file__=activate_this))
sh.pip.install(requirement=os.path.join(ZULIP_PATH, "requirements.txt"), **LOUD)
with sh.sudo:
sh.cp(REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH, **LOUD)
# Add additional node packages for test-js-with-node.
with sh.sudo:
sh.npm.install(*NPM_DEPENDENCIES["trusty"], g=True, prefix="/usr", **LOUD)
# Management commands expect to be run from the root of the project.
os.chdir(ZULIP_PATH)
os.system("tools/download-zxcvbn")
os.system("tools/emoji_dump/build_emoji")
os.system("generate_secrets.py -d")
sh.configure_rabbitmq(**LOUD)
sh.postgres_init_db(**LOUD)
sh.do_destroy_rebuild_database(**LOUD)
sh.postgres_init_test_db(**LOUD)
sh.do_destroy_rebuild_test_database(**LOUD)
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
msabramo/hmac_cli | setup.py | 1 | 1142 | import os
from setuptools import setup
this_dir = os.path.dirname(__file__)
long_description = "\n" + open(os.path.join(this_dir, 'README.rst')).read()
setup(
name='hmac_cli',
version='0.0.0',
description='Simple CLI for encrypting data with a private key, using HMAC',
long_description=long_description,
keywords='hmac',
author='Marc Abramowitz',
author_email='msabramo@gmail.com',
url='https://github.com/msabramo/hmac_cli',
py_modules=['hmac_cli'],
zip_safe=False,
install_requires=['click'],
entry_points="""\
[console_scripts]
hmac = hmac_cli:cli
""",
license='MIT',
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Testing',
'Natural Language :: English',
'Intended Audience :: Developers',
],
)
| mit |
rdeheele/odoo | addons/l10n_fr_rib/__init__.py | 433 | 1046 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Numérigraphe SARL.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import bank
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
happyleavesaoc/home-assistant | homeassistant/components/light/sensehat.py | 15 | 3087 | """
Support for Sense Hat LEDs.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.sensehat/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.light import (
ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, ATTR_RGB_COLOR, SUPPORT_RGB_COLOR,
Light, PLATFORM_SCHEMA)
from homeassistant.const import CONF_NAME
REQUIREMENTS = ['sense-hat==2.2.0']
_LOGGER = logging.getLogger(__name__)
SUPPORT_SENSEHAT = (SUPPORT_BRIGHTNESS | SUPPORT_RGB_COLOR)
DEFAULT_NAME = 'sensehat'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Sense Hat Light platform."""
from sense_hat import SenseHat
sensehat = SenseHat()
name = config.get(CONF_NAME)
add_devices([SenseHatLight(sensehat, name)])
class SenseHatLight(Light):
"""Representation of an Sense Hat Light."""
def __init__(self, sensehat, name):
"""Initialize an Sense Hat Light.
Full brightness and white color.
"""
self._sensehat = sensehat
self._name = name
self._is_on = False
self._brightness = 255
self._rgb_color = [255, 255, 255]
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def brightness(self):
"""Read back the brightness of the light."""
return self._brightness
@property
def rgb_color(self):
"""Read back the color of the light.
Returns [r, g, b] list with values in range of 0-255.
"""
return self._rgb_color
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_SENSEHAT
@property
def is_on(self):
"""Return true if light is on."""
return self._is_on
@property
def should_poll(self):
"""Return if we should poll this device."""
return False
@property
def assumed_state(self) -> bool:
"""Return True if unable to access real state of the entity."""
return True
def turn_on(self, **kwargs):
"""Instruct the light to turn on and set correct brightness & color."""
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
percent_bright = (self._brightness / 255)
if ATTR_RGB_COLOR in kwargs:
self._rgb_color = kwargs[ATTR_RGB_COLOR]
self._sensehat.clear(int(self._rgb_color[0] * percent_bright),
int(self._rgb_color[1] * percent_bright),
int(self._rgb_color[2] * percent_bright))
self._is_on = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Instruct the light to turn off."""
self._sensehat.clear()
self._is_on = False
self.schedule_update_ha_state()
| apache-2.0 |
jabesq/home-assistant | tests/components/dialogflow/test_init.py | 12 | 16306 | """The tests for the Dialogflow component."""
import json
from unittest.mock import Mock
import pytest
from homeassistant import data_entry_flow
from homeassistant.components import dialogflow, intent_script
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
SESSION_ID = "a9b84cec-46b6-484e-8f31-f65dba03ae6d"
INTENT_ID = "c6a74079-a8f0-46cd-b372-5a934d23591c"
INTENT_NAME = "tests"
REQUEST_ID = "19ef7e78-fe15-4e94-99dd-0c0b1e8753c3"
REQUEST_TIMESTAMP = "2017-01-21T17:54:18.952Z"
CONTEXT_NAME = "78a5db95-b7d6-4d50-9c9b-2fc73a5e34c3_id_dialog_context"
@pytest.fixture
async def calls(hass, fixture):
"""Return a list of Dialogflow calls triggered."""
calls = []
@callback
def mock_service(call):
"""Mock action call."""
calls.append(call)
hass.services.async_register('test', 'dialogflow', mock_service)
return calls
@pytest.fixture
async def fixture(hass, aiohttp_client):
"""Initialize a Home Assistant server for testing this module."""
await async_setup_component(hass, dialogflow.DOMAIN, {
"dialogflow": {},
})
await async_setup_component(hass, intent_script.DOMAIN, {
"intent_script": {
"WhereAreWeIntent": {
"speech": {
"type": "plain",
"text": """
{%- if is_state("device_tracker.paulus", "home")
and is_state("device_tracker.anne_therese",
"home") -%}
You are both home, you silly
{%- else -%}
Anne Therese is at {{
states("device_tracker.anne_therese")
}} and Paulus is at {{
states("device_tracker.paulus")
}}
{% endif %}
""",
}
},
"GetZodiacHoroscopeIntent": {
"speech": {
"type": "plain",
"text": "You told us your sign is {{ ZodiacSign }}.",
}
},
"CallServiceIntent": {
"speech": {
"type": "plain",
"text": "Service called",
},
"action": {
"service": "test.dialogflow",
"data_template": {
"hello": "{{ ZodiacSign }}"
},
"entity_id": "switch.test",
}
}
}
})
hass.config.api = Mock(base_url='http://example.com')
result = await hass.config_entries.flow.async_init(
'dialogflow',
context={
'source': 'user'
}
)
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM, result
result = await hass.config_entries.flow.async_configure(
result['flow_id'], {})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
webhook_id = result['result'].data['webhook_id']
return await aiohttp_client(hass.http.app), webhook_id
async def test_intent_action_incomplete(fixture):
"""Test when action is not completed."""
mock_client, webhook_id = fixture
data = {
"id": REQUEST_ID,
"timestamp": REQUEST_TIMESTAMP,
"result": {
"source": "agent",
"resolvedQuery": "my zodiac sign is virgo",
"speech": "",
"action": "GetZodiacHoroscopeIntent",
"actionIncomplete": True,
"parameters": {
"ZodiacSign": "virgo"
},
"metadata": {
"intentId": INTENT_ID,
"webhookUsed": "true",
"webhookForSlotFillingUsed": "false",
"intentName": INTENT_NAME
},
"fulfillment": {
"speech": "",
"messages": [
{
"type": 0,
"speech": ""
}
]
},
"score": 1
},
"status": {
"code": 200,
"errorType": "success"
},
"sessionId": SESSION_ID,
"originalRequest": None
}
response = await mock_client.post(
'/api/webhook/{}'.format(webhook_id),
data=json.dumps(data)
)
assert 200 == response.status
assert "" == await response.text()
async def test_intent_slot_filling(fixture):
"""Test when Dialogflow asks for slot-filling return none."""
mock_client, webhook_id = fixture
data = {
"id": REQUEST_ID,
"timestamp": REQUEST_TIMESTAMP,
"result": {
"source": "agent",
"resolvedQuery": "my zodiac sign is",
"speech": "",
"action": "GetZodiacHoroscopeIntent",
"actionIncomplete": True,
"parameters": {
"ZodiacSign": ""
},
"contexts": [
{
"name": CONTEXT_NAME,
"parameters": {
"ZodiacSign.original": "",
"ZodiacSign": ""
},
"lifespan": 2
},
{
"name": "tests_ha_dialog_context",
"parameters": {
"ZodiacSign.original": "",
"ZodiacSign": ""
},
"lifespan": 2
},
{
"name": "tests_ha_dialog_params_zodiacsign",
"parameters": {
"ZodiacSign.original": "",
"ZodiacSign": ""
},
"lifespan": 1
}
],
"metadata": {
"intentId": INTENT_ID,
"webhookUsed": "true",
"webhookForSlotFillingUsed": "true",
"intentName": INTENT_NAME
},
"fulfillment": {
"speech": "What is the ZodiacSign?",
"messages": [
{
"type": 0,
"speech": "What is the ZodiacSign?"
}
]
},
"score": 0.77
},
"status": {
"code": 200,
"errorType": "success"
},
"sessionId": SESSION_ID,
"originalRequest": None
}
response = await mock_client.post(
'/api/webhook/{}'.format(webhook_id),
data=json.dumps(data)
)
assert 200 == response.status
assert "" == await response.text()
async def test_intent_request_with_parameters(fixture):
"""Test a request with parameters."""
mock_client, webhook_id = fixture
data = {
"id": REQUEST_ID,
"timestamp": REQUEST_TIMESTAMP,
"result": {
"source": "agent",
"resolvedQuery": "my zodiac sign is virgo",
"speech": "",
"action": "GetZodiacHoroscopeIntent",
"actionIncomplete": False,
"parameters": {
"ZodiacSign": "virgo"
},
"contexts": [],
"metadata": {
"intentId": INTENT_ID,
"webhookUsed": "true",
"webhookForSlotFillingUsed": "false",
"intentName": INTENT_NAME
},
"fulfillment": {
"speech": "",
"messages": [
{
"type": 0,
"speech": ""
}
]
},
"score": 1
},
"status": {
"code": 200,
"errorType": "success"
},
"sessionId": SESSION_ID,
"originalRequest": None
}
response = await mock_client.post(
'/api/webhook/{}'.format(webhook_id),
data=json.dumps(data)
)
assert 200 == response.status
text = (await response.json()).get("speech")
assert "You told us your sign is virgo." == text
async def test_intent_request_with_parameters_but_empty(fixture):
"""Test a request with parameters but empty value."""
mock_client, webhook_id = fixture
data = {
"id": REQUEST_ID,
"timestamp": REQUEST_TIMESTAMP,
"result": {
"source": "agent",
"resolvedQuery": "my zodiac sign is virgo",
"speech": "",
"action": "GetZodiacHoroscopeIntent",
"actionIncomplete": False,
"parameters": {
"ZodiacSign": ""
},
"contexts": [],
"metadata": {
"intentId": INTENT_ID,
"webhookUsed": "true",
"webhookForSlotFillingUsed": "false",
"intentName": INTENT_NAME
},
"fulfillment": {
"speech": "",
"messages": [
{
"type": 0,
"speech": ""
}
]
},
"score": 1
},
"status": {
"code": 200,
"errorType": "success"
},
"sessionId": SESSION_ID,
"originalRequest": None
}
response = await mock_client.post(
'/api/webhook/{}'.format(webhook_id),
data=json.dumps(data)
)
assert 200 == response.status
text = (await response.json()).get("speech")
assert "You told us your sign is ." == text
async def test_intent_request_without_slots(hass, fixture):
"""Test a request without slots."""
mock_client, webhook_id = fixture
data = {
"id": REQUEST_ID,
"timestamp": REQUEST_TIMESTAMP,
"result": {
"source": "agent",
"resolvedQuery": "where are we",
"speech": "",
"action": "WhereAreWeIntent",
"actionIncomplete": False,
"parameters": {},
"contexts": [],
"metadata": {
"intentId": INTENT_ID,
"webhookUsed": "true",
"webhookForSlotFillingUsed": "false",
"intentName": INTENT_NAME
},
"fulfillment": {
"speech": "",
"messages": [
{
"type": 0,
"speech": ""
}
]
},
"score": 1
},
"status": {
"code": 200,
"errorType": "success"
},
"sessionId": SESSION_ID,
"originalRequest": None
}
response = await mock_client.post(
'/api/webhook/{}'.format(webhook_id),
data=json.dumps(data)
)
assert 200 == response.status
text = (await response.json()).get("speech")
assert "Anne Therese is at unknown and Paulus is at unknown" == \
text
hass.states.async_set("device_tracker.paulus", "home")
hass.states.async_set("device_tracker.anne_therese", "home")
response = await mock_client.post(
'/api/webhook/{}'.format(webhook_id),
data=json.dumps(data)
)
assert 200 == response.status
text = (await response.json()).get("speech")
assert "You are both home, you silly" == text
async def test_intent_request_calling_service(fixture, calls):
"""Test a request for calling a service.
If this request is done async the test could finish before the action
has been executed. Hard to test because it will be a race condition.
"""
mock_client, webhook_id = fixture
data = {
"id": REQUEST_ID,
"timestamp": REQUEST_TIMESTAMP,
"result": {
"source": "agent",
"resolvedQuery": "my zodiac sign is virgo",
"speech": "",
"action": "CallServiceIntent",
"actionIncomplete": False,
"parameters": {
"ZodiacSign": "virgo"
},
"contexts": [],
"metadata": {
"intentId": INTENT_ID,
"webhookUsed": "true",
"webhookForSlotFillingUsed": "false",
"intentName": INTENT_NAME
},
"fulfillment": {
"speech": "",
"messages": [
{
"type": 0,
"speech": ""
}
]
},
"score": 1
},
"status": {
"code": 200,
"errorType": "success"
},
"sessionId": SESSION_ID,
"originalRequest": None
}
call_count = len(calls)
response = await mock_client.post(
'/api/webhook/{}'.format(webhook_id),
data=json.dumps(data)
)
assert 200 == response.status
assert call_count + 1 == len(calls)
call = calls[-1]
assert "test" == call.domain
assert "dialogflow" == call.service
assert ["switch.test"] == call.data.get("entity_id")
assert "virgo" == call.data.get("hello")
async def test_intent_with_no_action(fixture):
"""Test an intent with no defined action."""
mock_client, webhook_id = fixture
data = {
"id": REQUEST_ID,
"timestamp": REQUEST_TIMESTAMP,
"result": {
"source": "agent",
"resolvedQuery": "my zodiac sign is virgo",
"speech": "",
"action": "",
"actionIncomplete": False,
"parameters": {
"ZodiacSign": ""
},
"contexts": [],
"metadata": {
"intentId": INTENT_ID,
"webhookUsed": "true",
"webhookForSlotFillingUsed": "false",
"intentName": INTENT_NAME
},
"fulfillment": {
"speech": "",
"messages": [
{
"type": 0,
"speech": ""
}
]
},
"score": 1
},
"status": {
"code": 200,
"errorType": "success"
},
"sessionId": SESSION_ID,
"originalRequest": None
}
response = await mock_client.post(
'/api/webhook/{}'.format(webhook_id),
data=json.dumps(data)
)
assert 200 == response.status
text = (await response.json()).get("speech")
assert \
"You have not defined an action in your Dialogflow intent." == text
async def test_intent_with_unknown_action(fixture):
"""Test an intent with an action not defined in the conf."""
mock_client, webhook_id = fixture
data = {
"id": REQUEST_ID,
"timestamp": REQUEST_TIMESTAMP,
"result": {
"source": "agent",
"resolvedQuery": "my zodiac sign is virgo",
"speech": "",
"action": "unknown",
"actionIncomplete": False,
"parameters": {
"ZodiacSign": ""
},
"contexts": [],
"metadata": {
"intentId": INTENT_ID,
"webhookUsed": "true",
"webhookForSlotFillingUsed": "false",
"intentName": INTENT_NAME
},
"fulfillment": {
"speech": "",
"messages": [
{
"type": 0,
"speech": ""
}
]
},
"score": 1
},
"status": {
"code": 200,
"errorType": "success"
},
"sessionId": SESSION_ID,
"originalRequest": None
}
response = await mock_client.post(
'/api/webhook/{}'.format(webhook_id),
data=json.dumps(data)
)
assert 200 == response.status
text = (await response.json()).get("speech")
assert \
"This intent is not yet configured within Home Assistant." == text
| apache-2.0 |
forever342/naarad | lib/luminol/src/luminol/algorithms/anomaly_detector_algorithms/bitmap_detector.py | 4 | 8691 | # coding=utf-8
"""
© 2014 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
from collections import defaultdict
from copy import copy
import math
from luminol import exceptions
from luminol.algorithms.anomaly_detector_algorithms import AnomalyDetectorAlgorithm
from luminol.constants import *
from luminol.modules.time_series import TimeSeries
class BitmapDetector(AnomalyDetectorAlgorithm):
"""
Bitmap Algorithm.
This method breaks time series into chunks and uses the frequency of similar chunks
to determine anomaly scores.
The ideas are from this paper:
Assumption-Free Anomaly Detection in Time Series(http://alumni.cs.ucr.edu/~ratana/SSDBM05.pdf).
"""
def __init__(self, time_series, baseline_time_series=None, precision=None, lag_window_size=None,
future_window_size=None, chunk_size=None):
"""
Initializer
:param TimeSeries time_series: a TimeSeries object.
:param TimeSeries baseline_time_series: baseline TimeSeries.
:param int precision: how many sections to categorize values.
:param int lag_window_size: lagging window size.
:param int future_window_size: future window size.
:param int chunk_size: chunk size.
"""
super(BitmapDetector, self).__init__(self.__class__.__name__, time_series, baseline_time_series)
self.precision = precision if precision and precision > 0 else DEFAULT_BITMAP_PRECISION
self.chunk_size = chunk_size if chunk_size and chunk_size > 0 else DEFAULT_BITMAP_CHUNK_SIZE
if lag_window_size:
self.lag_window_size = lag_window_size
else:
self.lag_window_size = int(self.time_series_length * DEFAULT_BITMAP_LAGGING_WINDOW_SIZE_PCT)
if future_window_size:
self.future_window_size = future_window_size
else:
self.future_window_size = int(self.time_series_length * DEFAULT_BITMAP_LEADING_WINDOW_SIZE_PCT)
self._sanity_check()
def _sanity_check(self):
"""
Check if there are enough data points.
"""
windows = self.lag_window_size + self.future_window_size
if (not self.lag_window_size or not self.future_window_size
or self.time_series_length < windows or windows < DEFAULT_BITMAP_MINIMAL_POINTS_IN_WINDOWS):
raise exceptions.NotEnoughDataPoints
# If window size is too big, too many data points will be assigned a score of 0 in the first lag window
# and the last future window.
if self.lag_window_size > DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS:
self.lag_window_size = DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS
if self.future_window_size > DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS:
self.future_window_size = DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS
def _generate_SAX_single(self, sections, value):
"""
Generate SAX representation(Symbolic Aggregate approXimation) for a single data point.
Read more about it here: Assumption-Free Anomaly Detection in Time Series(http://alumni.cs.ucr.edu/~ratana/SSDBM05.pdf).
:param dict sections: value sections.
:param float value: value to be categorized.
:return str: a SAX representation.
"""
sax = 0
for section_number in sections.keys():
section_lower_bound = sections[section_number]
if value >= section_lower_bound:
sax = section_number
else:
break
return str(sax)
def _generate_SAX(self):
"""
Generate SAX representation for all values of the time series.
"""
sections = {}
self.value_min = self.time_series.min()
self.value_max = self.time_series.max()
# Break the whole value range into different sections.
section_height = (self.value_max - self.value_min) / self.precision
for section_number in range(self.precision):
sections[section_number] = self.value_min + section_number * section_height
# Generate SAX representation.
self.sax = ''.join(self._generate_SAX_single(sections, value) for value in self.time_series.values)
def _construct_SAX_chunk_dict(self, sax):
"""
Form a chunk frequency dictionary from a SAX representation.
:param str sax: a SAX representation.
:return dict: frequency dictionary for chunks in the SAX representation.
"""
frequency = defaultdict(int)
chunk_size = self.chunk_size
length = len(sax)
for i in range(length):
if i + chunk_size <= length:
chunk = sax[i: i + chunk_size]
frequency[chunk] += 1
return frequency
def _construct_all_SAX_chunk_dict(self):
"""
Construct the chunk dicts for lagging window and future window at each index.
e.g: Suppose we have a SAX sequence as '1234567890', both window sizes are 3, and the chunk size is 2.
The first index that has a lagging window is 3. For index equals 3, the lagging window has sequence '123',
the chunk to leave lagging window(lw_leave_chunk) is '12', and the chunk to enter lagging window(lw_enter_chunk) is '34'.
Therefore, given chunk dicts at i, to compute chunk dicts at i+1, simply decrement the count for lw_leave_chunk,
and increment the count for lw_enter_chunk from chunk dicts at i. Same method applies to future window as well.
"""
lag_dicts = {}
fut_dicts = {}
length = self.time_series_length
lws = self.lag_window_size
fws = self.future_window_size
chunk_size = self.chunk_size
for i in range(length):
# If i is too small or too big, there will be no chunk dicts.
if i < lws or i > length - fws:
lag_dicts[i] = None
else:
# Just enter valid range.
if lag_dicts[i - 1] is None:
lag_dict = self._construct_SAX_chunk_dict(self.sax[i - lws: i])
lag_dicts[i] = lag_dict
lw_leave_chunk = self.sax[0:chunk_size]
lw_enter_chunk = self.sax[i - chunk_size + 1: i + 1]
fut_dict = self._construct_SAX_chunk_dict(self.sax[i: i + fws])
fut_dicts[i] = fut_dict
fw_leave_chunk = self.sax[i: i + chunk_size]
fw_enter_chunk = self.sax[i + fws + 1 - chunk_size: i + fws + 1]
else:
# Update dicts according to leave_chunks and enter_chunks.
lag_dict = copy(lag_dicts[i - 1])
lag_dict[lw_leave_chunk] -= 1
lag_dict[lw_enter_chunk] += 1
lag_dicts[i] = lag_dict
fut_dict = copy(fut_dicts[i - 1])
fut_dict[fw_leave_chunk] -= 1
fut_dict[fw_enter_chunk] += 1
fut_dicts[i] = fut_dict
# Updata leave_chunks and enter_chunks.
lw_leave_chunk = self.sax[i - lws: i - lws + chunk_size]
lw_enter_chunk = self.sax[i - chunk_size + 1: i + 1]
fw_leave_chunk = self.sax[i: i + chunk_size]
fw_enter_chunk = self.sax[i + fws + 1 - chunk_size: i + fws + 1]
self.lag_dicts = lag_dicts
self.fut_dicts = fut_dicts
def _compute_anom_score_between_two_windows(self, i):
"""
Compute distance difference between two windows' chunk frequencies,
which is then marked as the anomaly score of the data point on the window boundary in the middle.
:param int i: index of the data point between two windows.
:return float: the anomaly score.
"""
lag_window_chunk_dict = self.lag_dicts[i]
future_window_chunk_dict = self.fut_dicts[i]
score = 0
for chunk in lag_window_chunk_dict:
if chunk in future_window_chunk_dict:
score += math.pow(future_window_chunk_dict[chunk] - lag_window_chunk_dict[chunk], 2)
else:
score += math.pow(lag_window_chunk_dict[chunk], 2)
for chunk in future_window_chunk_dict:
if chunk not in lag_window_chunk_dict:
score += math.pow(future_window_chunk_dict[chunk], 2)
return score
def _set_scores(self):
"""
Compute anomaly scores for the time series by sliding both lagging window and future window.
"""
anom_scores = {}
self._generate_SAX()
self._construct_all_SAX_chunk_dict()
length = self.time_series_length
lws = self.lag_window_size
fws = self.future_window_size
for i, timestamp in enumerate(self.time_series.timestamps):
if i < lws or i > length - fws:
anom_scores[timestamp] = 0
else:
anom_scores[timestamp] = self._compute_anom_score_between_two_windows(i)
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores))
| apache-2.0 |
dougmassay/Sigil | src/Resource_Files/python3lib/xmlprocessor.py | 5 | 17142 | #!/usr/bin/env python3
import sys
import os
from sigil_bs4 import BeautifulSoup
from sigil_bs4.builder._lxml import LXMLTreeBuilderForXML
import re
from urllib.parse import unquote
from urllib.parse import urlsplit
from lxml import etree
from io import BytesIO
from opf_newparser import Opf_Parser
from hrefutils import startingDir, buildBookPath, buildRelativePath
from hrefutils import urldecodepart, urlencodepart
from collections import OrderedDict
TEXT_FOLDER_NAME = "Text"
ebook_xml_empty_tags = ["meta", "item", "itemref", "reference", "content"]
def get_void_tags(mtype):
voidtags = []
if mtype == "application/oebps-package+xml":
voidtags = ["item", "itemref", "mediatype", "mediaType", "reference"]
elif mtype == "application/x-dtbncx+xml":
voidtags = ["meta", "reference", "content"]
elif mtype == "application/smil+xml":
voidtags = ["text", "audio"]
elif mtype == "application/oebps-page-map+xml":
voidtags = ["page"]
else:
voidtags = ebook_xml_empty_tags
return voidtags
def _remove_xml_header(data):
newdata = data
return re.sub(r'<\s*\?xml\s*[^\?>]*\?*>\s*','',newdata, count=1,flags=re.I)
def _well_formed(data):
result = True
newdata = data
if isinstance(newdata, str):
newdata = newdata.encode('utf-8')
try:
parser = etree.XMLParser(encoding='utf-8', recover=False, resolve_entities=True)
tree = etree.parse(BytesIO(newdata), parser)
except Exception:
result = False
pass
return result
def _reformat(data):
newdata = data
if isinstance(newdata, str):
newdata = newdata.encode('utf-8')
parser = etree.XMLParser(encoding='utf-8', recover=True, ns_clean=True,
remove_comments=True, remove_pis=True, strip_cdata=True, resolve_entities=True)
tree = etree.parse(BytesIO(newdata), parser)
newdata = etree.tostring(tree.getroot(),encoding='UTF-8', xml_declaration=False)
return newdata
# does not support cdata sections yet
def _make_it_sane(data):
# first remove all comments as they may contain unescaped xml reserved characters
# that will confuse the remaining _make_it_sane regular expressions
comments = re.compile(r'''<!--.*?-->''', re.DOTALL)
data = comments.sub("",data)
# remove invalid tags that freak out lxml
emptytag = re.compile(r'''(<\s*[/]*\s*>)''')
data=emptytag.sub("", data);
# handle double tag start
badtagstart = re.compile(r'''(<[^>]*<)''')
extrastart = re.compile(r'''<\s*<''');
missingend = re.compile(r'''<\s*[a-zA-Z:]+[^<]*\s<''')
startinattrib = re.compile(r'''<\s*[a-z:A-Z]+[^<]*["'][^<"']*<''')
mo = badtagstart.search(data)
while mo is not None:
fixdata = data[mo.start(1):mo.end(1)]
mextra = extrastart.match(fixdata)
mmiss = missingend.match(fixdata)
mattr = startinattrib.match(fixdata)
if mextra is not None:
fixdata = fixdata[1:]
elif mattr is not None:
fixdata = fixdata[0:-1] + "<"
elif mmiss is not None:
fixdata = fixdata[0:-1].rstrip() + "> <"
else:
fixdata = "<" + fixdata[1:]
data = data[0:mo.start(1)] + fixdata + data[mo.end(1):]
mo = badtagstart.search(data)
# handle double tag end
badtagend = re.compile(r'''(>[^<]*>)''')
extraend = re.compile(r'''>\s*>''');
missingstart = re.compile(r'''>\s[^>]*[a-zA-Z:]+[^>]*>''')
endinattrib = re.compile(r'''>[^>]*["'][^>'"]*>''')
mo = badtagend.search(data)
while mo is not None:
fixdata = data[mo.start(1):mo.end(1)]
mextra = extraend.match(fixdata)
mmiss = missingstart.match(fixdata)
mattr = endinattrib.match(fixdata)
if mextra is not None:
fixdata = fixdata[0:-1]
elif mattr is not None:
fixdata = ">" + fixdata[1:]
elif mmiss is not None:
fixdata = "> <" + fixdata[1:].lstrip()
else:
fixdata = fixdata[0:-1] + ">"
data = data[0:mo.start(1)] + fixdata + data[mo.end(1):]
mo = badtagend.search(data)
return data
# ncx_text_pattern = re.compile(r'''(<text>)\s*(\S[^<]*\S)\s*(</text>)''',re.IGNORECASE)
# re.sub(ncx_text_pattern,r'\1\2\3',newdata)
# data is expectedd to be in unicode
def WellFormedXMLErrorCheck(data, mtype=""):
newdata = _remove_xml_header(data)
if isinstance(newdata, str):
newdata = newdata.encode('utf-8')
line = "-1"
column = "-1"
message = "well-formed"
try:
parser = etree.XMLParser(encoding='utf-8', recover=False, resolve_entities=True)
tree = etree.parse(BytesIO(newdata), parser)
except Exception:
line = "0"
column = "0"
message = "exception"
if len(parser.error_log) > 0:
error = parser.error_log[0]
message = error.message
if isinstance(message, bytes):
message = message.decode('utf-8')
line = "%d" % error.line
column = "%d" % error.column
pass
result = [line, column, message]
return result
def IsWellFormedXML(data, mtype=""):
[line, column, message] = WellFormedXMLErrorCheck(data, mtype)
result = line == "-1"
return result
# data is expected to be in unicode
# note: bs4 with lxml for xml strips whitespace so always prettyprint xml
def repairXML(data, mtype="", indent_chars=" "):
newdata = _remove_xml_header(data)
okay = _well_formed(newdata)
if okay:
if not mtype == "application/oebps-package+xml":
return data
if not okay:
newdata = _make_it_sane(newdata)
if not _well_formed(newdata):
newdata = _reformat(newdata)
# lxml requires utf-8 on Mac, won't work with unicode
if isinstance(newdata, str):
newdata = newdata.encode('utf-8')
voidtags = get_void_tags(mtype)
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=voidtags)
soup = BeautifulSoup(newdata, features=None, from_encoding="utf-8", builder=xmlbuilder)
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=indent_chars)
if mtype == "application/oebps-package+xml":
if isinstance(newdata, bytes):
newdata = newdata.decode('utf-8')
newdata = Opf_Parser(newdata).rebuild_opfxml()
return newdata
# this is used after a xhtml file split to update hrefs in the ncx
def anchorNCXUpdates(data, ncx_bookpath, originating_bookpath, keylist, valuelist):
data = _remove_xml_header(data)
# lxml on a Mac does not seem to handle full unicode properly, so encode as utf-8
data = data.encode('utf-8')
# rebuild serialized lookup dictionary
id_dict = OrderedDict()
for i in range(0, len(keylist)):
id_dict[ keylist[i] ] = valuelist[i]
startdir = startingDir(ncx_bookpath)
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=ebook_xml_empty_tags)
soup = BeautifulSoup(data, features=None, from_encoding="utf-8", builder=xmlbuilder)
for tag in soup.find_all("content"):
if "src" in tag.attrs:
src = tag["src"]
if src.find(":") == -1:
parts = src.split('#')
apath = urldecodepart(parts[0])
# convert this path to its target bookpath
target_bookpath = buildBookPath(apath, startdir)
if (parts is not None) and (len(parts) > 1) and (target_bookpath == originating_bookpath) and (parts[1] != ""):
fragment_id = urldecodepart(parts[1])
if fragment_id in id_dict:
target_bookpath = id_dict[fragment_id]
attribute_value = urlencodepart(buildRelativePath(ncx_bookpath, target_bookpath))
attribute_value = attribute_value + "#" + urlencodepart(fragment_id)
tag["src"] = attribute_value;
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=" ")
return newdata
def anchorNCXUpdatesAfterMerge(data, ncx_bookpath, sink_bookpath, merged_bookpaths):
data = _remove_xml_header(data)
startdir = startingDir(ncx_bookpath)
# lxml on a Mac does not seem to handle full unicode properly, so encode as utf-8
data = data.encode('utf-8')
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=ebook_xml_empty_tags)
soup = BeautifulSoup(data, features=None, from_encoding="utf-8", builder=xmlbuilder)
for tag in soup.find_all("content"):
if "src" in tag.attrs:
src = tag["src"]
if src.find(":") == -1:
parts = src.split('#')
if parts is not None:
apath = urldecodepart(parts[0])
target_bookpath = buildBookPath(apath, startdir)
if target_bookpath in merged_bookpaths:
attribute_value = urlencodepart(buildRelativePath(ncx_bookpath, sink_bookpath))
if len(parts) > 1 and parts[1] != "":
fragment = urldecodepart(parts[1])
attribute_value += "#" + urlencodepart(parts[1])
tag["src"] = attribute_value
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=" ")
return newdata
def performNCXSourceUpdates(data, newbkpath, oldbkpath, keylist, valuelist):
data = _remove_xml_header(data)
# lxml on a Mac does not seem to handle full unicode properly, so encode as utf-8
data = data.encode('utf-8')
# rebuild serialized lookup dictionary
updates = OrderedDict()
for i in range(0, len(keylist)):
updates[ keylist[i] ] = valuelist[i]
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=ebook_xml_empty_tags)
soup = BeautifulSoup(data, features=None, from_encoding="utf-8", builder=xmlbuilder)
for tag in soup.find_all("content"):
if "src" in tag.attrs:
src = tag["src"]
if src.find(":") == -1:
parts = src.split('#')
apath = urldecodepart(parts[0])
fragment = ""
if len(parts) > 1:
fragment = urldecodepart(parts[1])
oldtarget = buildBookPath(apath, startingDir(oldbkpath))
newtarget = updates.get(oldtarget, oldtarget)
attribute_value = urlencodepart(buildRelativePath(newbkpath, newtarget))
if fragment != "":
attribute_value = attribute_value + "#" + urlencodepart(fragment)
tag["src"] = attribute_value
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=" ")
return newdata
def performOPFSourceUpdates(data, newbkpath, oldbkpath, keylist, valuelist):
data = _remove_xml_header(data)
# lxml on a Mac does not seem to handle full unicode properly, so encode as utf-8
data = data.encode('utf-8')
# rebuild serialized lookup dictionary
updates = OrderedDict()
for i in range(0, len(keylist)):
updates[ keylist[i] ] = valuelist[i]
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=ebook_xml_empty_tags)
soup = BeautifulSoup(data, features=None, from_encoding="utf-8", builder=xmlbuilder)
for tag in soup.find_all(["link","item","reference","site"]):
if "href" in tag.attrs :
href = tag["href"]
if href.find(":") == -1 :
parts = href.split('#')
apath = urldecodepart(parts[0])
fragment = ""
if len(parts) > 1:
fragment = urldecodepart(parts[1])
oldtarget = buildBookPath(apath, startingDir(oldbkpath))
newtarget = updates.get(oldtarget, oldtarget)
attribute_value = urlencodepart(buildRelativePath(newbkpath, newtarget))
if fragment != "":
attribute_value = attribute_value + "#" + urlencodepart(fragment)
tag["href"] = attribute_value
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=" ")
return newdata
# Note xml_updates has paths relative to the OEBPS folder as base
# As if they were meant only for OEBPS/content.opf and OEBPS/toc.ncx
# So adjust them to be relative to the Misc directory where .smil files live in Sigil
def performSMILUpdates(data, newbkpath, oldbkpath, keylist, valuelist):
data = _remove_xml_header(data)
# lxml on a Mac does not seem to handle full unicode properly, so encode as utf-8
data = data.encode('utf-8')
# rebuild serialized lookup dictionary of xml_updates, properly adjusted
updates = OrderedDict()
for i in range(0, len(keylist)):
updates[ keylist[i] ] = valuelist[i]
xml_empty_tags = ["text", "audio"]
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=xml_empty_tags)
soup = BeautifulSoup(data, features=None, from_encoding="utf-8", builder=xmlbuilder)
for tag in soup.find_all(["body","seq","text","audio","smil","par"]):
for att in ["src", "epub:textref"]:
if att in tag.attrs :
ref = tag[att]
if ref.find(":") == -1 :
parts = ref.split('#')
apath = urldecodepart(parts[0])
fragment = ""
if len(parts) > 1:
fragment = urldecode(parts[1])
oldtarget = buildBookPath(apath, startingDir(oldbkpath))
newtarget = updates.get(oldtarget, oldtarget)
attribute_value = urlencodepart(buildRelativePath(newbkpath, newtarget))
if fragment != "":
attribute_value = attribute_value + "#" + urlencodepart(fragment)
tag[att] = attribute_value
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=" ")
return newdata
# Note xml_updates has urls/iris relative to the OEBPS folder as base
# As if they were meant only for OEBPS/content.opf and OEBPS/toc.ncx
# So adjust them to be relative to the Misc directory where page-map.xml lives
def performPageMapUpdates(data, newbkpath, oldbkpath, keylist, valuelist):
data = _remove_xml_header(data)
# lxml on a Mac does not seem to handle full unicode properly, so encode as utf-8
data = data.encode('utf-8')
# rebuild serialized lookup dictionary of xml_updates properly adjusted
updates = OrderedDict()
for i in range(0, len(keylist)):
updates[ keylist[i] ] = valuelist[i]
xml_empty_tags = ["page"]
xmlbuilder = LXMLTreeBuilderForXML(parser=None, empty_element_tags=xml_empty_tags)
soup = BeautifulSoup(data, features=None, from_encoding="utf-8", builder=xmlbuilder)
for tag in soup.find_all(["page"]):
for att in ["href"]:
if att in tag.attrs :
ref = tag[att]
if ref.find(":") == -1 :
parts = ref.split('#')
apath = urldecodepart(parts[0])
fragment = ""
if len(parts) > 1:
fragment = urldecodepart(parts[1])
oldtarget = buildBookPath(apath, startingDir(oldbkpath))
newtarget = updates.get(oldtarget, oldtarget)
attribute_value = urlencodepart(buildRelativePath(newbkpath, newtarget))
if fragment != "":
attribute_value = attribute_value + "#" + urlencodepart(fragment)
tag[att] = attribute_value
newdata = soup.decodexml(indent_level=0, formatter='minimal', indent_chars=" ")
return newdata
def main():
argv = sys.argv
opfxml = '''
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="BookId" version="2.0">
<metadata xmlns:mydc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">
<mydc:identifier id="BookId" opf:scheme="UUID">urn:uuid:a418a8f1-dcbc-4c5d-a18f-533765e34ee8</mydc:identifier>
</metadata>
<manifest>
<!-- this has a lot of bad characters & < > \" \'-->
<item href="toc.ncx" id="ncx" media-type="application/x-dtbncx+xml" />
<item href="Text/Section0001.xhtml" id="Section0001.xhtml" media-type="application/xhtml+xml" />
</manifest>
<
<spine toc="ncx">
<itemref idref="Section0001.xhtml">
</spine>
<text>
this is a bunch of nonsense
</text>
<text>
this is a bunch of nonsense 1
</text>
<text>
this is a bunch of nonsense 2
</text>
<guide />
</package>
'''
print(argv)
if not argv[-1].endswith("xmlprocessor.py"):
with open(argv[-1],'rb') as f:
opfxml = f.read();
if isinstance(opfxml, bytes):
opfxml = opfxml.decode('utf-8')
print(repairXML(opfxml, "application/oebps-package+xml"))
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
jullrich/dshieldhoneypot | kippo/kippo-0.8/kippo/commands/wget.py | 1 | 8802 | # Copyright (c) 2009 Upi Tamminen <desaster@gmail.com>
# See the COPYRIGHT file for more information
from kippo.core.honeypot import HoneyPotCommand
from kippo.core.fs import *
from twisted.web import client
from twisted.internet import reactor
import stat, time, urlparse, random, re, exceptions
import os.path
commands = {}
def tdiff(seconds):
t = seconds
days = int(t / (24 * 60 * 60))
t -= (days * 24 * 60 * 60)
hours = int(t / (60 * 60))
t -= (hours * 60 * 60)
minutes = int(t / 60)
t -= (minutes * 60)
s = '%ds' % int(t)
if minutes >= 1: s = '%dm %s' % (minutes, s)
if hours >= 1: s = '%dh %s' % (hours, s)
if days >= 1: s = '%dd %s' % (days, s)
return s
def sizeof_fmt(num):
for x in ['bytes','K','M','G','T']:
if num < 1024.0:
return "%d%s" % (num, x)
num /= 1024.0
# Luciano Ramalho @ http://code.activestate.com/recipes/498181/
def splitthousands( s, sep=','):
if len(s) <= 3: return s
return splitthousands(s[:-3], sep) + sep + s[-3:]
class command_wget(HoneyPotCommand):
def start(self):
url = None
for arg in self.args:
if arg.startswith('-'):
continue
url = arg.strip()
break
if not url:
self.writeln('wget: missing URL')
self.writeln('Usage: wget [OPTION]... [URL]...')
self.nextLine()
self.writeln('Try `wget --help\' for more options.')
self.exit()
return
if '://' not in url:
url = 'http://%s' % url
urldata = urlparse.urlparse(url)
outfile = urldata.path.split('/')[-1]
if not len(outfile.strip()) or not urldata.path.count('/'):
outfile = 'index.html'
self.url = url
self.limit_size = 0
cfg = self.honeypot.env.cfg
if cfg.has_option('honeypot', 'download_limit_size'):
self.limit_size = int(cfg.get('honeypot', 'download_limit_size'))
self.safeoutfile = '%s/%s_%s' % \
(cfg.get('honeypot', 'download_path'),
time.strftime('%Y%m%d%H%M%S'),
re.sub('[^A-Za-z0-9]', '_', url))
self.deferred = self.download(url, outfile, self.safeoutfile)
if self.deferred:
self.deferred.addCallback(self.success)
self.deferred.addErrback(self.error, url)
def download(self, url, fakeoutfile, outputfile, *args, **kwargs):
try:
scheme, host, port, path = client._parse(url)
if scheme == 'https':
self.writeln('Sorry, SSL not supported in this release')
self.exit()
return None
elif scheme != 'http':
raise exceptions.NotImplementedError
except:
self.writeln('%s: Unsupported scheme.' % (url,))
self.exit()
return None
self.writeln('--%s-- %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), url))
self.writeln('Connecting to %s:%d... connected.' % (host, port))
self.write('HTTP request sent, awaiting response... ')
factory = HTTPProgressDownloader(
self, fakeoutfile, url, outputfile, *args, **kwargs)
out_addr = None
if self.honeypot.env.cfg.has_option('honeypot', 'out_addr'):
out_addr = (self.honeypot.env.cfg.get('honeypot', 'out_addr'), 0)
self.connection = reactor.connectTCP(
host, port, factory, bindAddress=out_addr)
return factory.deferred
def ctrl_c(self):
self.writeln('^C')
self.connection.transport.loseConnection()
def success(self, data):
self.exit()
def error(self, error, url):
if hasattr(error, 'getErrorMessage'): # exceptions
error = error.getErrorMessage()
self.writeln(error)
# Real wget also adds this:
#self.writeln('%s ERROR 404: Not Found.' % \
# time.strftime('%Y-%m-%d %T'))
self.exit()
commands['/usr/bin/wget'] = command_wget
# from http://code.activestate.com/recipes/525493/
class HTTPProgressDownloader(client.HTTPDownloader):
def __init__(self, wget, fakeoutfile, url, outfile, headers=None):
client.HTTPDownloader.__init__(self, url, outfile, headers=headers,
agent='Wget/1.11.4')
self.status = None
self.wget = wget
self.fakeoutfile = fakeoutfile
self.lastupdate = 0
self.started = time.time()
self.proglen = 0
self.nomore = False
def noPage(self, reason): # called for non-200 responses
if self.status == '304':
client.HTTPDownloader.page(self, '')
else:
client.HTTPDownloader.noPage(self, reason)
def gotHeaders(self, headers):
if self.status == '200':
self.wget.writeln('200 OK')
if headers.has_key('content-length'):
self.totallength = int(headers['content-length'][0])
else:
self.totallength = 0
if headers.has_key('content-type'):
self.contenttype = headers['content-type'][0]
else:
self.contenttype = 'text/whatever'
self.currentlength = 0.0
if self.totallength > 0:
self.wget.writeln('Length: %d (%s) [%s]' % \
(self.totallength,
sizeof_fmt(self.totallength),
self.contenttype))
else:
self.wget.writeln('Length: unspecified [%s]' % \
(self.contenttype))
if self.wget.limit_size > 0 and \
self.totallength > self.wget.limit_size:
print 'Not saving URL (%s) due to file size limit' % \
(self.wget.url,)
self.fileName = os.path.devnull
self.nomore = True
else:
msg = 'Saving URL (%s) to %s' % (self.wget.url, self.fileName)
self.wget.honeypot.logDispatch(msg)
print msg
self.wget.writeln('Saving to: `%s' % self.fakeoutfile)
self.wget.honeypot.terminal.nextLine()
return client.HTTPDownloader.gotHeaders(self, headers)
def pagePart(self, data):
if self.status == '200':
self.currentlength += len(data)
# if downloading files of unspecified size, this could happen:
if not self.nomore and self.wget.limit_size > 0 and \
self.currentlength > self.wget.limit_size:
print 'File limit reached, not saving any more data!'
self.nomore = True
self.file.close()
self.fileName = os.path.devnull
self.file = self.openFile(data)
if (time.time() - self.lastupdate) < 0.5:
return client.HTTPDownloader.pagePart(self, data)
if self.totallength:
percent = (self.currentlength/self.totallength)*100
spercent = "%i%%" % percent
else:
spercent = '%dK' % (self.currentlength/1000)
percent = 0
self.speed = self.currentlength / (time.time() - self.started)
eta = (self.totallength - self.currentlength) / self.speed
s = '\r%s [%s] %s %dK/s eta %s' % \
(spercent.rjust(3),
('%s>' % (int(39.0 / 100.0 * percent) * '=')).ljust(39),
splitthousands(str(int(self.currentlength))).ljust(12),
self.speed / 1000,
tdiff(eta))
self.wget.write(s.ljust(self.proglen))
self.proglen = len(s)
self.lastupdate = time.time()
return client.HTTPDownloader.pagePart(self, data)
def pageEnd(self):
if self.totallength != 0 and self.currentlength != self.totallength:
return client.HTTPDownloader.pageEnd(self)
self.wget.write('\r100%%[%s] %s %dK/s' % \
('%s>' % (38 * '='),
splitthousands(str(int(self.totallength))).ljust(12),
self.speed / 1000))
self.wget.honeypot.terminal.nextLine()
self.wget.honeypot.terminal.nextLine()
self.wget.writeln(
'%s (%d KB/s) - `%s\' saved [%d/%d]' % \
(time.strftime('%Y-%m-%d %H:%M:%S'),
self.speed / 1000,
self.fakeoutfile, self.currentlength, self.totallength))
outfile = '%s/%s' % (self.wget.honeypot.cwd, self.fakeoutfile)
self.wget.fs.mkfile(outfile, 0, 0, self.totallength, 33188)
self.wget.fs.update_realfile(
self.wget.fs.getfile(outfile),
self.wget.safeoutfile)
return client.HTTPDownloader.pageEnd(self)
# vim: set sw=4 et:
| gpl-2.0 |
pdellaert/ansible | lib/ansible/modules/network/cloudengine/ce_ntp.py | 6 | 20834 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_ntp
version_added: "2.4"
short_description: Manages core NTP configuration on HUAWEI CloudEngine switches.
description:
- Manages core NTP configuration on HUAWEI CloudEngine switches.
author:
- Zhijin Zhou (@QijunPan)
options:
server:
description:
- Network address of NTP server.
peer:
description:
- Network address of NTP peer.
key_id:
description:
- Authentication key identifier to use with given NTP server or peer.
is_preferred:
description:
- Makes given NTP server or peer the preferred NTP server or peer for the device.
choices: ['enable', 'disable']
vpn_name:
description:
- Makes the device communicate with the given
NTP server or peer over a specific vpn.
default: '_public_'
source_int:
description:
- Local source interface from which NTP messages are sent.
Must be fully qualified interface name, i.e. C(40GE1/0/22), C(vlanif10).
Interface types, such as C(10GE), C(40GE), C(100GE), C(Eth-Trunk), C(LoopBack),
C(MEth), C(NULL), C(Tunnel), C(Vlanif).
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: NTP test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Set NTP Server with parameters"
ce_ntp:
server: 192.8.2.6
vpn_name: js
source_int: vlanif4001
is_preferred: enable
key_id: 32
provider: "{{ cli }}"
- name: "Set NTP Peer with parameters"
ce_ntp:
peer: 192.8.2.6
vpn_name: js
source_int: vlanif4001
is_preferred: enable
key_id: 32
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"server": "2.2.2.2", "key_id": "48",
"is_preferred": "enable", "vpn_name":"js",
"source_int": "vlanif4002", "state":"present"}
existing:
description: k/v pairs of existing ntp server/peer
returned: always
type: dict
sample: {"server": "2.2.2.2", "key_id": "32",
"is_preferred": "disable", "vpn_name":"js",
"source_int": "vlanif4002"}
end_state:
description: k/v pairs of ntp info after module execution
returned: always
type: dict
sample: {"server": "2.2.2.2", "key_id": "48",
"is_preferred": "enable", "vpn_name":"js",
"source_int": "vlanif4002"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["ntp server 2.2.2.2 authentication-keyid 48 source-interface vlanif4002 vpn-instance js preferred"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec, get_nc_config, set_nc_config
CE_NC_GET_NTP_CONFIG = """
<filter type="subtree">
<ntp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ntpUCastCfgs>
<ntpUCastCfg>
<addrFamily></addrFamily>
<vpnName></vpnName>
<ifName></ifName>
<ipv4Addr></ipv4Addr>
<ipv6Addr></ipv6Addr>
<type></type>
<isPreferred></isPreferred>
<keyId></keyId>
</ntpUCastCfg>
</ntpUCastCfgs>
</ntp>
</filter>
"""
CE_NC_MERGE_NTP_CONFIG = """
<config>
<ntp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ntpUCastCfgs>
<ntpUCastCfg operation="merge">
<addrFamily>%s</addrFamily>
<ipv4Addr>%s</ipv4Addr>
<ipv6Addr>%s</ipv6Addr>
<type>%s</type>
<vpnName>%s</vpnName>
<keyId>%s</keyId>
<isPreferred>%s</isPreferred>
<ifName>%s</ifName>
<neid>0-0</neid>
</ntpUCastCfg>
</ntpUCastCfgs>
</ntp>
</config>
"""
CE_NC_DELETE_NTP_CONFIG = """
<config>
<ntp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ntpUCastCfgs>
<ntpUCastCfg operation="delete">
<addrFamily>%s</addrFamily>
<ipv4Addr>%s</ipv4Addr>
<ipv6Addr>%s</ipv6Addr>
<type>%s</type>
<vpnName>%s</vpnName>
<neid>0-0</neid>
</ntpUCastCfg>
</ntpUCastCfgs>
</ntp>
</config>
"""
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-Port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
class Ntp(object):
"""Ntp class"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.mutually_exclusive = [('server', 'peer')]
self.init_module()
# ntp configuration info
self.server = self.module.params['server'] or None
self.peer = self.module.params['peer'] or None
self.key_id = self.module.params['key_id']
self.is_preferred = self.module.params['is_preferred']
self.vpn_name = self.module.params['vpn_name']
self.interface = self.module.params['source_int'] or ""
self.state = self.module.params['state']
self.ntp_conf = dict()
self.conf_exsit = False
self.ip_ver = 'IPv4'
if self.server:
self.peer_type = 'Server'
self.address = self.server
elif self.peer:
self.peer_type = 'Peer'
self.address = self.peer
else:
self.peer_type = None
self.address = None
self.check_params()
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = list()
self.end_state = list()
self.init_data()
def init_data(self):
"""Init data"""
if self.interface is not None:
self.interface = self.interface.lower()
if not self.key_id:
self.key_id = ""
if not self.is_preferred:
self.is_preferred = 'disable'
def init_module(self):
"""Init module"""
required_one_of = [("server", "peer")]
self.module = AnsibleModule(
argument_spec=self.spec,
supports_check_mode=True,
required_one_of=required_one_of,
mutually_exclusive=self.mutually_exclusive
)
def check_ipaddr_validate(self):
"""Check ipaddress validate"""
rule1 = r'(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.'
rule2 = r'(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])'
ipv4_regex = '%s%s%s%s%s%s' % ('^', rule1, rule1, rule1, rule2, '$')
ipv6_regex = '^(?:[a-fA-F0-9]{1,4}:){7}[a-fA-F0-9]{1,4}$'
flag = False
if bool(re.match(ipv4_regex, self.address)):
flag = True
self.ip_ver = "IPv4"
if not self.ntp_ucast_ipv4_validate():
flag = False
elif bool(re.match(ipv6_regex, self.address)):
flag = True
self.ip_ver = "IPv6"
else:
flag = True
self.ip_ver = "IPv6"
if not flag:
if self.peer_type == "Server":
self.module.fail_json(msg='Error: Illegal server ip-address.')
else:
self.module.fail_json(msg='Error: Illegal peer ip-address.')
def ntp_ucast_ipv4_validate(self):
"""Check ntp ucast ipv4 address"""
addr_list = re.findall(r'(.*)\.(.*)\.(.*)\.(.*)', self.address)
if not addr_list:
self.module.fail_json(msg='Error: Match ip-address fail.')
value = ((int(addr_list[0][0])) * 0x1000000) + (int(addr_list[0][1]) * 0x10000) + \
(int(addr_list[0][2]) * 0x100) + (int(addr_list[0][3]))
if (value & (0xff000000) == 0x7f000000) or (value & (0xF0000000) == 0xF0000000) \
or (value & (0xF0000000) == 0xE0000000) or (value == 0):
return False
return True
def check_params(self):
"""Check all input params"""
# check interface type
if self.interface:
intf_type = get_interface_type(self.interface)
if not intf_type:
self.module.fail_json(
msg='Error: Interface name of %s '
'is error.' % self.interface)
if self.vpn_name:
if (len(self.vpn_name) < 1) or (len(self.vpn_name) > 31):
self.module.fail_json(
msg='Error: VPN name length is between 1 and 31.')
if self.address:
self.check_ipaddr_validate()
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def set_ntp(self, *args):
"""Configure ntp parameters"""
if self.state == 'present':
if self.ip_ver == 'IPv4':
xml_str = CE_NC_MERGE_NTP_CONFIG % (
args[0], args[1], '::', args[2], args[3], args[4], args[5], args[6])
elif self.ip_ver == 'IPv6':
xml_str = CE_NC_MERGE_NTP_CONFIG % (
args[0], '0.0.0.0', args[1], args[2], args[3], args[4], args[5], args[6])
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "NTP_CORE_CONFIG")
else:
if self.ip_ver == 'IPv4':
xml_str = CE_NC_DELETE_NTP_CONFIG % (
args[0], args[1], '::', args[2], args[3])
elif self.ip_ver == 'IPv6':
xml_str = CE_NC_DELETE_NTP_CONFIG % (
args[0], '0.0.0.0', args[1], args[2], args[3])
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "UNDO_NTP_CORE_CONFIG")
def config_ntp(self):
"""Config ntp"""
if self.state == "present":
if self.address and not self.conf_exsit:
if self.is_preferred == 'enable':
is_preferred = 'true'
else:
is_preferred = 'false'
self.set_ntp(self.ip_ver, self.address, self.peer_type,
self.vpn_name, self.key_id, is_preferred, self.interface)
self.changed = True
else:
if self.address:
self.set_ntp(self.ip_ver, self.address,
self.peer_type, self.vpn_name, '', '', '')
self.changed = True
def show_result(self):
"""Show result"""
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def get_ntp_exist_config(self):
"""Get ntp existed configure"""
ntp_config = list()
conf_str = CE_NC_GET_NTP_CONFIG
con_obj = get_nc_config(self.module, conf_str)
if "<data/>" in con_obj:
return ntp_config
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get all ntp config info
root = ElementTree.fromstring(xml_str)
ntpsite = root.findall("ntp/ntpUCastCfgs/ntpUCastCfg")
for nexthop in ntpsite:
ntp_dict = dict()
for ele in nexthop:
if ele.tag in ["addrFamily", "vpnName", "ifName", "ipv4Addr",
"ipv6Addr", "type", "isPreferred", "keyId"]:
ntp_dict[ele.tag] = ele.text
ip_addr = ntp_dict['ipv6Addr']
if ntp_dict['addrFamily'] == "IPv4":
ip_addr = ntp_dict['ipv4Addr']
if ntp_dict['ifName'] is None:
ntp_dict['ifName'] = ""
if ntp_dict['isPreferred'] == 'true':
is_preferred = 'enable'
else:
is_preferred = 'disable'
if self.state == "present":
key_id = ntp_dict['keyId'] or ""
cur_ntp_cfg = dict(vpn_name=ntp_dict['vpnName'], source_int=ntp_dict['ifName'].lower(), address=ip_addr,
peer_type=ntp_dict['type'], prefer=is_preferred, key_id=key_id)
exp_ntp_cfg = dict(vpn_name=self.vpn_name, source_int=self.interface.lower(), address=self.address,
peer_type=self.peer_type, prefer=self.is_preferred, key_id=self.key_id)
if cur_ntp_cfg == exp_ntp_cfg:
self.conf_exsit = True
vpn_name = ntp_dict['vpnName']
if ntp_dict['vpnName'] == "_public_":
vpn_name = None
if_name = ntp_dict['ifName']
if if_name == "":
if_name = None
if self.peer_type == 'Server':
ntp_config.append(dict(vpn_name=vpn_name,
source_int=if_name, server=ip_addr,
is_preferred=is_preferred, key_id=ntp_dict['keyId']))
else:
ntp_config.append(dict(vpn_name=vpn_name,
source_int=if_name, peer=ip_addr,
is_preferred=is_preferred, key_id=ntp_dict['keyId']))
return ntp_config
def get_existing(self):
"""Get existing info"""
if self.address:
self.existing = self.get_ntp_exist_config()
def get_proposed(self):
"""Get proposed info"""
if self.address:
vpn_name = self.vpn_name
if vpn_name == "_public_":
vpn_name = None
if_name = self.interface
if if_name == "":
if_name = None
key_id = self.key_id
if key_id == "":
key_id = None
if self.peer_type == 'Server':
self.proposed = dict(state=self.state, vpn_name=vpn_name,
source_int=if_name, server=self.address,
is_preferred=self.is_preferred, key_id=key_id)
else:
self.proposed = dict(state=self.state, vpn_name=vpn_name,
source_int=if_name, peer=self.address,
is_preferred=self.is_preferred, key_id=key_id)
def get_end_state(self):
"""Get end state info"""
if self.address:
self.end_state = self.get_ntp_exist_config()
def get_update_cmd(self):
"""Get updated commands"""
if self.conf_exsit:
return
cli_str = ""
if self.state == "present":
if self.address:
if self.peer_type == 'Server':
if self.ip_ver == "IPv4":
cli_str = "%s %s" % (
"ntp unicast-server", self.address)
else:
cli_str = "%s %s" % (
"ntp unicast-server ipv6", self.address)
elif self.peer_type == 'Peer':
if self.ip_ver == "IPv4":
cli_str = "%s %s" % ("ntp unicast-peer", self.address)
else:
cli_str = "%s %s" % (
"ntp unicast-peer ipv6", self.address)
if self.key_id:
cli_str = "%s %s %s" % (
cli_str, "authentication-keyid", self.key_id)
if self.interface:
cli_str = "%s %s %s" % (
cli_str, "source-interface", self.interface)
if (self.vpn_name) and (self.vpn_name != '_public_'):
cli_str = "%s %s %s" % (
cli_str, "vpn-instance", self.vpn_name)
if self.is_preferred == "enable":
cli_str = "%s %s" % (cli_str, "preferred")
else:
if self.address:
if self.peer_type == 'Server':
if self.ip_ver == "IPv4":
cli_str = "%s %s" % (
"undo ntp unicast-server", self.address)
else:
cli_str = "%s %s" % (
"undo ntp unicast-server ipv6", self.address)
elif self.peer_type == 'Peer':
if self.ip_ver == "IPv4":
cli_str = "%s %s" % (
"undo ntp unicast-peer", self.address)
else:
cli_str = "%s %s" % (
"undo ntp unicast-peer ipv6", self.address)
if (self.vpn_name) and (self.vpn_name != '_public_'):
cli_str = "%s %s %s" % (
cli_str, "vpn-instance", self.vpn_name)
self.updates_cmd.append(cli_str)
def work(self):
"""Execute task"""
self.get_existing()
self.get_proposed()
self.config_ntp()
self.get_update_cmd()
self.get_end_state()
self.show_result()
def main():
"""Main function entry"""
argument_spec = dict(
server=dict(type='str'),
peer=dict(type='str'),
key_id=dict(type='str'),
is_preferred=dict(type='str', choices=['enable', 'disable']),
vpn_name=dict(type='str', default='_public_'),
source_int=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(ce_argument_spec)
ntp_obj = Ntp(argument_spec)
ntp_obj.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
fr500/mgba | src/platform/python/mgba/log.py | 2 | 1181 | # Copyright (c) 2013-2016 Jeffrey Pfau
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ._pylib import ffi, lib
from . import createCallback
createCallback("mLoggerPy", "log", "_pyLog")
defaultLogger = None
def installDefault(logger):
global defaultLogger
defaultLogger = logger
lib.mLogSetDefaultLogger(logger._native)
class Logger(object):
FATAL = lib.mLOG_FATAL
DEBUG = lib.mLOG_DEBUG
INFO = lib.mLOG_INFO
WARN = lib.mLOG_WARN
ERROR = lib.mLOG_ERROR
STUB = lib.mLOG_STUB
GAME_ERROR = lib.mLOG_GAME_ERROR
def __init__(self):
self._handle = ffi.new_handle(self)
self._native = ffi.gc(lib.mLoggerPythonCreate(self._handle), lib.free)
@staticmethod
def categoryName(category):
return ffi.string(lib.mLogCategoryName(category)).decode('UTF-8')
def log(self, category, level, message):
print("{}: {}".format(self.categoryName(category), message))
class NullLogger(Logger):
def log(self, category, level, message):
pass
| mpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.